From 48c0dc0484d1661cc432f32e3ba3476ea4129f2d Mon Sep 17 00:00:00 2001 From: Pulkit Aggarwal <54775856+Pulkit0110@users.noreply.github.com> Date: Wed, 9 Jul 2025 12:57:36 +0530 Subject: [PATCH 01/12] chore: fix the errors while generating gapic (#1503) * chore: fix the errors while generating gapic * add missing comma --- noxfile.py | 2 +- owlbot.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/noxfile.py b/noxfile.py index b62092e97..f47f2a055 100644 --- a/noxfile.py +++ b/noxfile.py @@ -101,7 +101,7 @@ def default(session, install_extras=True): CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) # Install all test dependencies, then install this package in-place. - session.install("mock", "pytest", "pytest-cov", "brotli", "-c", constraints_path) + session.install("mock", "pytest", "pytest-cov", "brotli", "grpcio", "grpcio-status", "-c", constraints_path) if install_extras: session.install("opentelemetry-api", "opentelemetry-sdk") diff --git a/owlbot.py b/owlbot.py index 055b4db9c..77acf42e6 100644 --- a/owlbot.py +++ b/owlbot.py @@ -42,6 +42,7 @@ # Exclude autogenerated constraints files for Python 3.7/3.9 "testing/constraints-3.7.txt", "testing/constraints-3.9.txt", + "tests/unit/__init__.py", ], ) s.remove_staging_dirs() From 314071c8ade16c6d18868a18c608fea3fd15a2ae Mon Sep 17 00:00:00 2001 From: Pulkit Aggarwal <54775856+Pulkit0110@users.noreply.github.com> Date: Wed, 9 Jul 2025 18:31:17 +0530 Subject: [PATCH 02/12] chore: fix the module imports error for gapic (#1504) --- noxfile.py | 2 +- owlbot.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/noxfile.py b/noxfile.py index f47f2a055..c38d9caae 100644 --- a/noxfile.py +++ b/noxfile.py @@ -101,7 +101,7 @@ def default(session, install_extras=True): CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) # Install all test dependencies, then install this package in-place. - session.install("mock", "pytest", "pytest-cov", "brotli", "grpcio", "grpcio-status", "-c", constraints_path) + session.install("mock", "pytest", "pytest-cov", "brotli", "grpcio", "grpcio-status", "proto-plus", "-c", constraints_path) if install_extras: session.install("opentelemetry-api", "opentelemetry-sdk") diff --git a/owlbot.py b/owlbot.py index 77acf42e6..ae9ee2de0 100644 --- a/owlbot.py +++ b/owlbot.py @@ -28,6 +28,12 @@ default_version = json.load(open(".repo-metadata.json", "rt")).get("default_version") for library in s.get_staging_dirs(default_version): + s.replace( + "google/cloud/storage_v2/__init__.py", + "from google.cloud.storage_v2 import gapic_version as package_version", + "from . import gapic_version as package_version" + ) + s.move( [library], excludes=[ From 72252e940909ce2e3da9cfd80f5b7b43a026f45c Mon Sep 17 00:00:00 2001 From: Pulkit Aggarwal <54775856+Pulkit0110@users.noreply.github.com> Date: Wed, 16 Jul 2025 18:58:56 +0530 Subject: [PATCH 03/12] docs: update the documentation of move_blob function (#1507) --- google/cloud/storage/bucket.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/google/cloud/storage/bucket.py b/google/cloud/storage/bucket.py index fc5733bd0..c8df6c600 100644 --- a/google/cloud/storage/bucket.py +++ b/google/cloud/storage/bucket.py @@ -2255,10 +2255,7 @@ def move_blob( timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, ): - """Move a blob to a new name within a single HNS bucket. - - *This feature is currently only supported for HNS (Heirarchical - Namespace) buckets.* + """Move a blob to a new name atomically. If :attr:`user_project` is set on the bucket, bills the API request to that project. From 500c0d07d3a239c3b52c9aa6ce434f5bb3202823 Mon Sep 17 00:00:00 2001 From: Pulkit Aggarwal <54775856+Pulkit0110@users.noreply.github.com> Date: Thu, 17 Jul 2025 18:40:24 +0530 Subject: [PATCH 04/12] chore: fix the errors for GAPIC (#1509) * chore: fix the errors for GAPIC * minor changes in owlbot.py --- .coveragerc | 4 +++- noxfile.py | 4 ++-- owlbot.py | 8 ++++---- testing/constraints-3.9.txt | 1 + 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/.coveragerc b/.coveragerc index e019a358a..8eebf6dc3 100644 --- a/.coveragerc +++ b/.coveragerc @@ -21,9 +21,11 @@ omit = .nox/* google/__init__.py google/cloud/__init__.py + google/cloud/storage_v2/__init__.py + google/cloud/storage_v2/gapic_version.py [report] -fail_under = 100 +fail_under = 99 show_missing = True exclude_lines = # Re-enable the standard pragma diff --git a/noxfile.py b/noxfile.py index c38d9caae..693fcb1b4 100644 --- a/noxfile.py +++ b/noxfile.py @@ -101,7 +101,7 @@ def default(session, install_extras=True): CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) # Install all test dependencies, then install this package in-place. - session.install("mock", "pytest", "pytest-cov", "brotli", "grpcio", "grpcio-status", "proto-plus", "-c", constraints_path) + session.install("mock", "pytest", "pytest-cov", "pytest-asyncio", "brotli", "grpcio", "grpcio-status", "proto-plus", "grpc-google-iam-v1", "-c", constraints_path) if install_extras: session.install("opentelemetry-api", "opentelemetry-sdk") @@ -233,7 +233,7 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") + session.run("coverage", "report", "--show-missing", "--fail-under=99") session.run("coverage", "erase") diff --git a/owlbot.py b/owlbot.py index ae9ee2de0..2cc8e4259 100644 --- a/owlbot.py +++ b/owlbot.py @@ -29,15 +29,14 @@ for library in s.get_staging_dirs(default_version): s.replace( - "google/cloud/storage_v2/__init__.py", + library / "google/cloud/storage_v2/__init__.py", + "from google.cloud.storage import gapic_version as package_version", "from google.cloud.storage_v2 import gapic_version as package_version", - "from . import gapic_version as package_version" ) s.move( [library], excludes=[ - "**/gapic_version.py", "docs/**/*", "scripts/fixup*.py", "setup.py", @@ -59,7 +58,7 @@ # Add templated files # ---------------------------------------------------------------------------- templated_files = common.py_library( - cov_level=100, + cov_level=99, split_system_tests=True, intersphinx_dependencies={ # python-requests url temporary change related to @@ -71,6 +70,7 @@ s.move( templated_files, excludes=[ + ".coveragerc", "docs/multiprocessing.rst", "noxfile.py", "CONTRIBUTING.rst", diff --git a/testing/constraints-3.9.txt b/testing/constraints-3.9.txt index 2a588ced6..251ae699b 100644 --- a/testing/constraints-3.9.txt +++ b/testing/constraints-3.9.txt @@ -12,3 +12,4 @@ requests==2.22.0 google-crc32c==1.1.3 protobuf==3.20.2 opentelemetry-api==1.1.0 +grpc-google-iam-v1==0.12.6 From edde9506af9c313036573e4e51d7b94d03c3118f Mon Sep 17 00:00:00 2001 From: Pulkit Aggarwal <54775856+Pulkit0110@users.noreply.github.com> Date: Thu, 17 Jul 2025 18:55:55 +0530 Subject: [PATCH 05/12] samples: add samples for move api to rename an object (#1505) * docs: add samples for move api to rename an object * minor change * fix lint errors * minor fix * resolving comments --- samples/snippets/snippets_test.py | 18 +++++++ .../snippets/storage_move_file_atomically.py | 54 +++++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 samples/snippets/storage_move_file_atomically.py diff --git a/samples/snippets/snippets_test.py b/samples/snippets/snippets_test.py index 3fe377b6b..91018f3dd 100644 --- a/samples/snippets/snippets_test.py +++ b/samples/snippets/snippets_test.py @@ -73,6 +73,7 @@ import storage_list_soft_deleted_objects import storage_make_public import storage_move_file +import storage_move_file_atomically import storage_object_get_kms_key import storage_remove_bucket_label import storage_remove_cors_configuration @@ -1037,3 +1038,20 @@ def test_storage_restore_soft_deleted_object(test_soft_delete_enabled_bucket, ca # Verify the restoration blob = test_soft_delete_enabled_bucket.get_blob(blob_name) assert blob is not None + + +def test_move_object(test_blob): + bucket = test_blob.bucket + try: + bucket.delete_blob("test_move_blob_atomic") + except google.cloud.exceptions.NotFound: + print(f"test_move_blob_atomic not found in bucket {bucket.name}") + + storage_move_file_atomically.move_object( + bucket.name, + test_blob.name, + "test_move_blob_atomic", + ) + + assert bucket.get_blob("test_move_blob_atomic") is not None + assert bucket.get_blob(test_blob.name) is None diff --git a/samples/snippets/storage_move_file_atomically.py b/samples/snippets/storage_move_file_atomically.py new file mode 100644 index 000000000..d659cf366 --- /dev/null +++ b/samples/snippets/storage_move_file_atomically.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python + +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +# [START storage_move_object] +from google.cloud import storage + + +def move_object(bucket_name: str, blob_name: str, new_blob_name: str) -> None: + """Moves a blob to a new name within the same bucket using the move API.""" + # The name of your GCS bucket + # bucket_name = "your-bucket-name" + + # The name of your GCS object to move + # blob_name = "your-file-name" + + # The new name of the GCS object + # new_blob_name = "new-file-name" + + storage_client = storage.Client() + + bucket = storage_client.bucket(bucket_name) + blob_to_move = bucket.blob(blob_name) + + # Use move_blob to perform an efficient, server-side move. + moved_blob = bucket.move_blob( + blob=blob_to_move, new_name=new_blob_name + ) + + print(f"Blob {blob_to_move.name} has been moved to {moved_blob.name}.") + + +# [END storage_move_object] + +if __name__ == "__main__": + move_object( + bucket_name=sys.argv[1], + blob_name=sys.argv[2], + new_blob_name=sys.argv[3], + ) From 0eb45b5a808ad149c1c85d1ab9ea693d7365d5c1 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 18 Jul 2025 12:56:24 +0530 Subject: [PATCH 06/12] feat: Add new field `contexts` for Object Contexts in message `Object` (#1502) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Add new field `contexts` for Object Contexts in message `Object` feat: Add new field `filter` for Object Contexts in message `ListObjectsRequest` PiperOrigin-RevId: 781971065 Source-Link: https://github.com/googleapis/googleapis/commit/f2a87ff9836e96a7d37501128134ff34eb486d2d Source-Link: https://github.com/googleapis/googleapis-gen/commit/9cab897f4a01d3a2e970d289f4c3988d44af8c64 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOWNhYjg5N2Y0YTAxZDNhMmU5NzBkMjg5ZjRjMzk4OGQ0NGFmOGM2NCJ9 chore: update the GAPIC generator version for C# PiperOrigin-RevId: 778777226 docs: Various documentation and comment improvements, Enable organization-level support for VPC Flow Logs feat: Enable organization-level support for VPC Flow Logs feat: add field `service_uri` to message `Endpoint.CloudRunRevisionEndpoint` feat: add message `Endpoint.SingleEdgeResponse` feat: add http additional_bindings feat: add enum `Status` to message `InstanceInfo` feat: add field `running` to message `InstanceInfo` feat: add field `policy_priority` to message `NetworkInfo` feat: add enum value `RouteInfo.NextHopType.SECURE_WEB_PROXY_GATEWAY` feat: add enum `DeliverInfo.GoogleServiceType` feat: add field `google_service_type` to message `DeliverInfo` feat: add enum value `AbortInfo.Cause.GOOGLE_MANAGED_SERVICE_AMBIGUOUS_ENDPOINT` feat: add enum values `NO_ROUTE_FROM_EXTERNAL_IPV6_SOURCE_TO_PRIVATE_IPV6_ADDRESS`, `TRAFFIC_FROM_HYBRID_ENDPOINT_TO_INTERNET_DISALLOWED`, `NO_MATCHING_NAT64_GATEWAY`, `LOAD_BALANCER_BACKEND_IP_VERSION_MISMATCH`, and `NO_KNOWN_ROUTE_FROM_NCC_NETWORK_TO_DESTINATION` to `DropInfo.Cause` feat: add rpc `VpcFlowLogsService.QueryOrgVpcFlowLogsConfigs` feat: add service `OrganizationVpcFlowLogsService` feat: add enum `VpcFlowLogsConfig.CrossProjectMetadata` feat: add enum `VpcFlowLogsConfig.TargetResourceState` feat: add fields `cross_project_metadata`, `target_resource_state`, `network`, and `subnet` to message `VpcFlowLogsConfig` PiperOrigin-RevId: 778807926 feat: A new field `semantic_search` is added to `message.google.cloud.dataplex.v1.SearchEntriesRequest` PiperOrigin-RevId: 778817135 fix: pagination response for Compute Subnetworks.ListUsable (39952d9) PiperOrigin-RevId: 778931614 fix!: Correct resource reference type for `parent` field in `data_chat_service.proto` PiperOrigin-RevId: 780026729 feat: update libraries and clients for Managed Kafka PiperOrigin-RevId: 780098649 chore: regenerate gapic yaml and service yaml for cloudtrace v2 by augmentation configs PiperOrigin-RevId: 780150418 chore: regenerate gapic yaml and service yaml for iam by augmentation configs PiperOrigin-RevId: 780151180 chore: regenerate gapic yaml and service yaml for cloudtrace v1 by augmentation configs PiperOrigin-RevId: 780152154 feat: Update Compute Engine v1 API to revision 20250626 feat: Update Compute Engine v1beta API to revision 20250626 docs: Add more details for BidiReadObjectRedirectedError and BidiWriteObjectRedirectedError docs: Add more information for AppendObjectSpec fields fix!: Remove field `restricted` within encryption enforcement config. feat: Add new field `restriction_mode` for encryption enforcement config in message Bucket PiperOrigin-RevId: 780246504 feat: Add isolation support to prevent cross-region overflow by adding a new field "isolation_config" to message "ServiceLbPolicy" PiperOrigin-RevId: 780262024 feat: Add safebrowsing proto defs PiperOrigin-RevId: 780289284 feat: add RDN sequence feat: add User Defined Access URLs feat: add backdate duration feat: adds tbs_certificate_digest to CertificateDescription PiperOrigin-RevId: 780300269 chore: update the GAPIC generator version for C# PiperOrigin-RevId: 780414513 Source-Link: https://github.com/googleapis/googleapis/commit/f1decb86b974a833be57b92ac6f718499bb4bc37 Source-Link: https://github.com/googleapis/googleapis-gen/commit/96e33aab25512b39810f15971af63a34fca5cf5f Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOTZlMzNhYWIyNTUxMmIzOTgxMGYxNTk3MWFmNjNhMzRmY2E1Y2Y1ZiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .coveragerc | 41 +- google/cloud/storage_v2/__init__.py | 140 + google/cloud/storage_v2/gapic_metadata.json | 263 + google/cloud/storage_v2/gapic_version.py | 16 + google/cloud/storage_v2/py.typed | 2 + google/cloud/storage_v2/services/__init__.py | 15 + .../storage_v2/services/storage/__init__.py | 22 + .../services/storage/async_client.py | 3443 +++++ .../storage_v2/services/storage/client.py | 3836 +++++ .../storage_v2/services/storage/pagers.py | 352 + .../services/storage/transports/README.rst | 9 + .../services/storage/transports/__init__.py | 33 + .../services/storage/transports/base.py | 506 + .../services/storage/transports/grpc.py | 1185 ++ .../storage/transports/grpc_asyncio.py | 1349 ++ google/cloud/storage_v2/types/__init__.py | 132 + google/cloud/storage_v2/types/storage.py | 4964 +++++++ mypy.ini | 3 + noxfile.py | 14 +- .../snippet_metadata_google.storage.v2.json | 3939 ++++++ ...enerated_storage_bidi_read_object_async.py | 62 + ...generated_storage_bidi_read_object_sync.py | 62 + ...nerated_storage_bidi_write_object_async.py | 64 + ...enerated_storage_bidi_write_object_sync.py | 64 + ...ed_storage_cancel_resumable_write_async.py | 52 + ...ted_storage_cancel_resumable_write_sync.py | 52 + ..._generated_storage_compose_object_async.py | 51 + ...2_generated_storage_compose_object_sync.py | 51 + ...2_generated_storage_create_bucket_async.py | 53 + ...v2_generated_storage_create_bucket_sync.py | 53 + ...2_generated_storage_delete_bucket_async.py | 50 + ...v2_generated_storage_delete_bucket_sync.py | 50 + ...2_generated_storage_delete_object_async.py | 51 + ...v2_generated_storage_delete_object_sync.py | 51 + ...e_v2_generated_storage_get_bucket_async.py | 52 + ...ge_v2_generated_storage_get_bucket_sync.py | 52 + ..._generated_storage_get_iam_policy_async.py | 53 + ...2_generated_storage_get_iam_policy_sync.py | 53 + ...e_v2_generated_storage_get_object_async.py | 53 + ...ge_v2_generated_storage_get_object_sync.py | 53 + ...v2_generated_storage_list_buckets_async.py | 53 + ..._v2_generated_storage_list_buckets_sync.py | 53 + ...v2_generated_storage_list_objects_async.py | 53 + ..._v2_generated_storage_list_objects_sync.py | 53 + ...rage_lock_bucket_retention_policy_async.py | 53 + ...orage_lock_bucket_retention_policy_sync.py | 53 + ..._v2_generated_storage_move_object_async.py | 54 + ...e_v2_generated_storage_move_object_sync.py | 54 + ...erated_storage_query_write_status_async.py | 52 + ...nerated_storage_query_write_status_sync.py | 52 + ..._v2_generated_storage_read_object_async.py | 54 + ...e_v2_generated_storage_read_object_sync.py | 54 + ..._generated_storage_restore_object_async.py | 54 + ...2_generated_storage_restore_object_sync.py | 54 + ..._generated_storage_rewrite_object_async.py | 55 + ...2_generated_storage_rewrite_object_sync.py | 55 + ..._generated_storage_set_iam_policy_async.py | 53 + ...2_generated_storage_set_iam_policy_sync.py | 53 + ...ted_storage_start_resumable_write_async.py | 51 + ...ated_storage_start_resumable_write_sync.py | 51 + ...ated_storage_test_iam_permissions_async.py | 54 + ...rated_storage_test_iam_permissions_sync.py | 54 + ...2_generated_storage_update_bucket_async.py | 51 + ...v2_generated_storage_update_bucket_sync.py | 51 + ...2_generated_storage_update_object_async.py | 51 + ...v2_generated_storage_update_object_sync.py | 51 + ...v2_generated_storage_write_object_async.py | 63 + ..._v2_generated_storage_write_object_sync.py | 63 + testing/constraints-3.10.txt | 7 + testing/constraints-3.11.txt | 7 + testing/constraints-3.12.txt | 7 + testing/constraints-3.13.txt | 12 + testing/constraints-3.8.txt | 7 + tests/__init__.py | 15 + tests/unit/gapic/__init__.py | 15 + tests/unit/gapic/storage_v2/__init__.py | 15 + tests/unit/gapic/storage_v2/test_storage.py | 11512 ++++++++++++++++ 77 files changed, 34411 insertions(+), 34 deletions(-) create mode 100644 google/cloud/storage_v2/__init__.py create mode 100644 google/cloud/storage_v2/gapic_metadata.json create mode 100644 google/cloud/storage_v2/gapic_version.py create mode 100644 google/cloud/storage_v2/py.typed create mode 100644 google/cloud/storage_v2/services/__init__.py create mode 100644 google/cloud/storage_v2/services/storage/__init__.py create mode 100644 google/cloud/storage_v2/services/storage/async_client.py create mode 100644 google/cloud/storage_v2/services/storage/client.py create mode 100644 google/cloud/storage_v2/services/storage/pagers.py create mode 100644 google/cloud/storage_v2/services/storage/transports/README.rst create mode 100644 google/cloud/storage_v2/services/storage/transports/__init__.py create mode 100644 google/cloud/storage_v2/services/storage/transports/base.py create mode 100644 google/cloud/storage_v2/services/storage/transports/grpc.py create mode 100644 google/cloud/storage_v2/services/storage/transports/grpc_asyncio.py create mode 100644 google/cloud/storage_v2/types/__init__.py create mode 100644 google/cloud/storage_v2/types/storage.py create mode 100644 mypy.ini create mode 100644 samples/generated_samples/snippet_metadata_google.storage.v2.json create mode 100644 samples/generated_samples/storage_v2_generated_storage_bidi_read_object_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_bidi_read_object_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_bidi_write_object_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_bidi_write_object_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_cancel_resumable_write_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_cancel_resumable_write_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_compose_object_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_compose_object_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_create_bucket_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_create_bucket_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_delete_bucket_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_delete_bucket_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_delete_object_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_delete_object_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_get_bucket_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_get_bucket_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_get_iam_policy_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_get_iam_policy_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_get_object_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_get_object_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_list_buckets_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_list_buckets_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_list_objects_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_list_objects_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_lock_bucket_retention_policy_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_lock_bucket_retention_policy_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_move_object_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_move_object_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_query_write_status_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_query_write_status_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_read_object_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_read_object_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_restore_object_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_restore_object_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_rewrite_object_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_rewrite_object_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_set_iam_policy_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_set_iam_policy_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_start_resumable_write_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_start_resumable_write_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_test_iam_permissions_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_test_iam_permissions_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_update_bucket_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_update_bucket_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_update_object_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_update_object_sync.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_write_object_async.py create mode 100644 samples/generated_samples/storage_v2_generated_storage_write_object_sync.py create mode 100644 tests/unit/gapic/__init__.py create mode 100644 tests/unit/gapic/storage_v2/__init__.py create mode 100644 tests/unit/gapic/storage_v2/test_storage.py diff --git a/.coveragerc b/.coveragerc index 8eebf6dc3..89c9bc1b9 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,43 +1,18 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generated by synthtool. DO NOT EDIT! [run] branch = True -omit = - .nox/* - google/__init__.py - google/cloud/__init__.py - google/cloud/storage_v2/__init__.py - google/cloud/storage_v2/gapic_version.py [report] -fail_under = 99 show_missing = True +omit = + .nox/* + .nox/* + .nox/* + .nox/* + .nox/* + google/cloud/storage/__init__.py + google/cloud/storage/gapic_version.py exclude_lines = # Re-enable the standard pragma pragma: NO COVER # Ignore debug-only repr def __repr__ - # Ignore abstract methods - raise NotImplementedError -omit = - .nox/* - */gapic/*.py - */proto/*.py - */core/*.py - */site-packages/*.py - google/cloud/__init__.py diff --git a/google/cloud/storage_v2/__init__.py b/google/cloud/storage_v2/__init__.py new file mode 100644 index 000000000..4d03ee922 --- /dev/null +++ b/google/cloud/storage_v2/__init__.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.storage_v2 import gapic_version as package_version + +__version__ = package_version.__version__ + + +from .services.storage import StorageClient +from .services.storage import StorageAsyncClient + +from .types.storage import AppendObjectSpec +from .types.storage import BidiReadHandle +from .types.storage import BidiReadObjectError +from .types.storage import BidiReadObjectRedirectedError +from .types.storage import BidiReadObjectRequest +from .types.storage import BidiReadObjectResponse +from .types.storage import BidiReadObjectSpec +from .types.storage import BidiWriteHandle +from .types.storage import BidiWriteObjectRedirectedError +from .types.storage import BidiWriteObjectRequest +from .types.storage import BidiWriteObjectResponse +from .types.storage import Bucket +from .types.storage import BucketAccessControl +from .types.storage import CancelResumableWriteRequest +from .types.storage import CancelResumableWriteResponse +from .types.storage import ChecksummedData +from .types.storage import CommonObjectRequestParams +from .types.storage import ComposeObjectRequest +from .types.storage import ContentRange +from .types.storage import CreateBucketRequest +from .types.storage import CustomerEncryption +from .types.storage import DeleteBucketRequest +from .types.storage import DeleteObjectRequest +from .types.storage import GetBucketRequest +from .types.storage import GetObjectRequest +from .types.storage import ListBucketsRequest +from .types.storage import ListBucketsResponse +from .types.storage import ListObjectsRequest +from .types.storage import ListObjectsResponse +from .types.storage import LockBucketRetentionPolicyRequest +from .types.storage import MoveObjectRequest +from .types.storage import Object +from .types.storage import ObjectAccessControl +from .types.storage import ObjectChecksums +from .types.storage import ObjectContexts +from .types.storage import ObjectCustomContextPayload +from .types.storage import ObjectRangeData +from .types.storage import Owner +from .types.storage import ProjectTeam +from .types.storage import QueryWriteStatusRequest +from .types.storage import QueryWriteStatusResponse +from .types.storage import ReadObjectRequest +from .types.storage import ReadObjectResponse +from .types.storage import ReadRange +from .types.storage import ReadRangeError +from .types.storage import RestoreObjectRequest +from .types.storage import RewriteObjectRequest +from .types.storage import RewriteResponse +from .types.storage import ServiceConstants +from .types.storage import StartResumableWriteRequest +from .types.storage import StartResumableWriteResponse +from .types.storage import UpdateBucketRequest +from .types.storage import UpdateObjectRequest +from .types.storage import WriteObjectRequest +from .types.storage import WriteObjectResponse +from .types.storage import WriteObjectSpec + +__all__ = ( + "StorageAsyncClient", + "AppendObjectSpec", + "BidiReadHandle", + "BidiReadObjectError", + "BidiReadObjectRedirectedError", + "BidiReadObjectRequest", + "BidiReadObjectResponse", + "BidiReadObjectSpec", + "BidiWriteHandle", + "BidiWriteObjectRedirectedError", + "BidiWriteObjectRequest", + "BidiWriteObjectResponse", + "Bucket", + "BucketAccessControl", + "CancelResumableWriteRequest", + "CancelResumableWriteResponse", + "ChecksummedData", + "CommonObjectRequestParams", + "ComposeObjectRequest", + "ContentRange", + "CreateBucketRequest", + "CustomerEncryption", + "DeleteBucketRequest", + "DeleteObjectRequest", + "GetBucketRequest", + "GetObjectRequest", + "ListBucketsRequest", + "ListBucketsResponse", + "ListObjectsRequest", + "ListObjectsResponse", + "LockBucketRetentionPolicyRequest", + "MoveObjectRequest", + "Object", + "ObjectAccessControl", + "ObjectChecksums", + "ObjectContexts", + "ObjectCustomContextPayload", + "ObjectRangeData", + "Owner", + "ProjectTeam", + "QueryWriteStatusRequest", + "QueryWriteStatusResponse", + "ReadObjectRequest", + "ReadObjectResponse", + "ReadRange", + "ReadRangeError", + "RestoreObjectRequest", + "RewriteObjectRequest", + "RewriteResponse", + "ServiceConstants", + "StartResumableWriteRequest", + "StartResumableWriteResponse", + "StorageClient", + "UpdateBucketRequest", + "UpdateObjectRequest", + "WriteObjectRequest", + "WriteObjectResponse", + "WriteObjectSpec", +) diff --git a/google/cloud/storage_v2/gapic_metadata.json b/google/cloud/storage_v2/gapic_metadata.json new file mode 100644 index 000000000..1a7c6cf69 --- /dev/null +++ b/google/cloud/storage_v2/gapic_metadata.json @@ -0,0 +1,263 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.storage_v2", + "protoPackage": "google.storage.v2", + "schema": "1.0", + "services": { + "Storage": { + "clients": { + "grpc": { + "libraryClient": "StorageClient", + "rpcs": { + "BidiReadObject": { + "methods": [ + "bidi_read_object" + ] + }, + "BidiWriteObject": { + "methods": [ + "bidi_write_object" + ] + }, + "CancelResumableWrite": { + "methods": [ + "cancel_resumable_write" + ] + }, + "ComposeObject": { + "methods": [ + "compose_object" + ] + }, + "CreateBucket": { + "methods": [ + "create_bucket" + ] + }, + "DeleteBucket": { + "methods": [ + "delete_bucket" + ] + }, + "DeleteObject": { + "methods": [ + "delete_object" + ] + }, + "GetBucket": { + "methods": [ + "get_bucket" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetObject": { + "methods": [ + "get_object" + ] + }, + "ListBuckets": { + "methods": [ + "list_buckets" + ] + }, + "ListObjects": { + "methods": [ + "list_objects" + ] + }, + "LockBucketRetentionPolicy": { + "methods": [ + "lock_bucket_retention_policy" + ] + }, + "MoveObject": { + "methods": [ + "move_object" + ] + }, + "QueryWriteStatus": { + "methods": [ + "query_write_status" + ] + }, + "ReadObject": { + "methods": [ + "read_object" + ] + }, + "RestoreObject": { + "methods": [ + "restore_object" + ] + }, + "RewriteObject": { + "methods": [ + "rewrite_object" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "StartResumableWrite": { + "methods": [ + "start_resumable_write" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UpdateBucket": { + "methods": [ + "update_bucket" + ] + }, + "UpdateObject": { + "methods": [ + "update_object" + ] + }, + "WriteObject": { + "methods": [ + "write_object" + ] + } + } + }, + "grpc-async": { + "libraryClient": "StorageAsyncClient", + "rpcs": { + "BidiReadObject": { + "methods": [ + "bidi_read_object" + ] + }, + "BidiWriteObject": { + "methods": [ + "bidi_write_object" + ] + }, + "CancelResumableWrite": { + "methods": [ + "cancel_resumable_write" + ] + }, + "ComposeObject": { + "methods": [ + "compose_object" + ] + }, + "CreateBucket": { + "methods": [ + "create_bucket" + ] + }, + "DeleteBucket": { + "methods": [ + "delete_bucket" + ] + }, + "DeleteObject": { + "methods": [ + "delete_object" + ] + }, + "GetBucket": { + "methods": [ + "get_bucket" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetObject": { + "methods": [ + "get_object" + ] + }, + "ListBuckets": { + "methods": [ + "list_buckets" + ] + }, + "ListObjects": { + "methods": [ + "list_objects" + ] + }, + "LockBucketRetentionPolicy": { + "methods": [ + "lock_bucket_retention_policy" + ] + }, + "MoveObject": { + "methods": [ + "move_object" + ] + }, + "QueryWriteStatus": { + "methods": [ + "query_write_status" + ] + }, + "ReadObject": { + "methods": [ + "read_object" + ] + }, + "RestoreObject": { + "methods": [ + "restore_object" + ] + }, + "RewriteObject": { + "methods": [ + "rewrite_object" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "StartResumableWrite": { + "methods": [ + "start_resumable_write" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UpdateBucket": { + "methods": [ + "update_bucket" + ] + }, + "UpdateObject": { + "methods": [ + "update_object" + ] + }, + "WriteObject": { + "methods": [ + "write_object" + ] + } + } + } + } + } + } +} diff --git a/google/cloud/storage_v2/gapic_version.py b/google/cloud/storage_v2/gapic_version.py new file mode 100644 index 000000000..20a9cd975 --- /dev/null +++ b/google/cloud/storage_v2/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/google/cloud/storage_v2/py.typed b/google/cloud/storage_v2/py.typed new file mode 100644 index 000000000..3fb77facb --- /dev/null +++ b/google/cloud/storage_v2/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-storage package uses inline types. diff --git a/google/cloud/storage_v2/services/__init__.py b/google/cloud/storage_v2/services/__init__.py new file mode 100644 index 000000000..cbf94b283 --- /dev/null +++ b/google/cloud/storage_v2/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/google/cloud/storage_v2/services/storage/__init__.py b/google/cloud/storage_v2/services/storage/__init__.py new file mode 100644 index 000000000..013d5aa1c --- /dev/null +++ b/google/cloud/storage_v2/services/storage/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import StorageClient +from .async_client import StorageAsyncClient + +__all__ = ( + "StorageClient", + "StorageAsyncClient", +) diff --git a/google/cloud/storage_v2/services/storage/async_client.py b/google/cloud/storage_v2/services/storage/async_client.py new file mode 100644 index 000000000..81290e8aa --- /dev/null +++ b/google/cloud/storage_v2/services/storage/async_client.py @@ -0,0 +1,3443 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import logging as std_logging +from collections import OrderedDict +import re +from typing import ( + Dict, + Callable, + Mapping, + MutableMapping, + MutableSequence, + Optional, + AsyncIterable, + Awaitable, + AsyncIterator, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.storage_v2 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore +import google.protobuf + + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.cloud.storage_v2.services.storage import pagers +from google.cloud.storage_v2.types import storage +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import StorageTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import StorageGrpcAsyncIOTransport +from .client import StorageClient + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class StorageAsyncClient: + """API Overview and Naming Syntax + ------------------------------ + + The Cloud Storage gRPC API allows applications to read and write + data through the abstractions of buckets and objects. For a + description of these abstractions please see + https://cloud.google.com/storage/docs. + + Resources are named as follows: + + - Projects are referred to as they are defined by the Resource + Manager API, using strings like ``projects/123456`` or + ``projects/my-string-id``. + + - Buckets are named using string names of the form: + ``projects/{project}/buckets/{bucket}`` For globally unique + buckets, ``_`` may be substituted for the project. + + - Objects are uniquely identified by their name along with the name + of the bucket they belong to, as separate strings in this API. + For example: + + ReadObjectRequest { bucket: 'projects/_/buckets/my-bucket' + object: 'my-object' } Note that object names can contain ``/`` + characters, which are treated as any other character (no special + directory semantics). + """ + + _client: StorageClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = StorageClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = StorageClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = StorageClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = StorageClient._DEFAULT_UNIVERSE + + bucket_path = staticmethod(StorageClient.bucket_path) + parse_bucket_path = staticmethod(StorageClient.parse_bucket_path) + crypto_key_path = staticmethod(StorageClient.crypto_key_path) + parse_crypto_key_path = staticmethod(StorageClient.parse_crypto_key_path) + common_billing_account_path = staticmethod( + StorageClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + StorageClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(StorageClient.common_folder_path) + parse_common_folder_path = staticmethod(StorageClient.parse_common_folder_path) + common_organization_path = staticmethod(StorageClient.common_organization_path) + parse_common_organization_path = staticmethod( + StorageClient.parse_common_organization_path + ) + common_project_path = staticmethod(StorageClient.common_project_path) + parse_common_project_path = staticmethod(StorageClient.parse_common_project_path) + common_location_path = staticmethod(StorageClient.common_location_path) + parse_common_location_path = staticmethod(StorageClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + StorageAsyncClient: The constructed client. + """ + return StorageClient.from_service_account_info.__func__(StorageAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + StorageAsyncClient: The constructed client. + """ + return StorageClient.from_service_account_file.__func__(StorageAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return StorageClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> StorageTransport: + """Returns the transport used by the client instance. + + Returns: + StorageTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = StorageClient.get_transport_class + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[str, StorageTransport, Callable[..., StorageTransport]] + ] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the storage async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,StorageTransport,Callable[..., StorageTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the StorageTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = StorageClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.storage_v2.StorageAsyncClient`.", + extra={ + "serviceName": "google.storage.v2.Storage", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.storage.v2.Storage", + "credentialsType": None, + }, + ) + + async def delete_bucket( + self, + request: Optional[Union[storage.DeleteBucketRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Permanently deletes an empty bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_delete_bucket(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.DeleteBucketRequest( + name="name_value", + ) + + # Make the request + await client.delete_bucket(request=request) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.DeleteBucketRequest, dict]]): + The request object. Request message for DeleteBucket. + name (:class:`str`): + Required. Name of a bucket to delete. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.DeleteBucketRequest): + request = storage.DeleteBucketRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_bucket + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.name) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def get_bucket( + self, + request: Optional[Union[storage.GetBucketRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Bucket: + r"""Returns metadata for the specified bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_get_bucket(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.GetBucketRequest( + name="name_value", + ) + + # Make the request + response = await client.get_bucket(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.GetBucketRequest, dict]]): + The request object. Request message for GetBucket. + name (:class:`str`): + Required. Name of a bucket. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Bucket: + A bucket. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.GetBucketRequest): + request = storage.GetBucketRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_bucket + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.name) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_bucket( + self, + request: Optional[Union[storage.CreateBucketRequest, dict]] = None, + *, + parent: Optional[str] = None, + bucket: Optional[storage.Bucket] = None, + bucket_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Bucket: + r"""Creates a new bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_create_bucket(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.CreateBucketRequest( + parent="parent_value", + bucket_id="bucket_id_value", + ) + + # Make the request + response = await client.create_bucket(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.CreateBucketRequest, dict]]): + The request object. Request message for CreateBucket. + parent (:class:`str`): + Required. The project to which this bucket will belong. + This field must either be empty or ``projects/_``. The + project ID that owns this bucket should be specified in + the ``bucket.project`` field. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + bucket (:class:`google.cloud.storage_v2.types.Bucket`): + Optional. Properties of the new bucket being inserted. + The name of the bucket is specified in the ``bucket_id`` + field. Populating ``bucket.name`` field will result in + an error. The project of the bucket must be specified in + the ``bucket.project`` field. This field must be in + ``projects/{projectIdentifier}`` format, + {projectIdentifier} can be the project ID or project + number. The ``parent`` field must be either empty or + ``projects/_``. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + bucket_id (:class:`str`): + Required. The ID to use for this bucket, which will + become the final component of the bucket's resource + name. For example, the value ``foo`` might result in a + bucket with the name ``projects/123456/buckets/foo``. + + This corresponds to the ``bucket_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Bucket: + A bucket. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, bucket, bucket_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.CreateBucketRequest): + request = storage.CreateBucketRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if bucket is not None: + request.bucket = bucket + if bucket_id is not None: + request.bucket_id = bucket_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_bucket + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.parent) + if regex_match and regex_match.group("project"): + header_params["project"] = regex_match.group("project") + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket.project) + if regex_match and regex_match.group("project"): + header_params["project"] = regex_match.group("project") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_buckets( + self, + request: Optional[Union[storage.ListBucketsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListBucketsAsyncPager: + r"""Retrieves a list of buckets for a given project. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_list_buckets(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.ListBucketsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_buckets(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.ListBucketsRequest, dict]]): + The request object. Request message for ListBuckets. + parent (:class:`str`): + Required. The project whose buckets + we are listing. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.services.storage.pagers.ListBucketsAsyncPager: + The result of a call to + Buckets.ListBuckets + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.ListBucketsRequest): + request = storage.ListBucketsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_buckets + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.parent) + if regex_match and regex_match.group("project"): + header_params["project"] = regex_match.group("project") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListBucketsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def lock_bucket_retention_policy( + self, + request: Optional[Union[storage.LockBucketRetentionPolicyRequest, dict]] = None, + *, + bucket: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Bucket: + r"""Locks retention policy on a bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_lock_bucket_retention_policy(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.LockBucketRetentionPolicyRequest( + bucket="bucket_value", + if_metageneration_match=2413, + ) + + # Make the request + response = await client.lock_bucket_retention_policy(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.LockBucketRetentionPolicyRequest, dict]]): + The request object. Request message for + LockBucketRetentionPolicyRequest. + bucket (:class:`str`): + Required. Name of a bucket. + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Bucket: + A bucket. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.LockBucketRetentionPolicyRequest): + request = storage.LockBucketRetentionPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.lock_bucket_retention_policy + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, + *, + resource: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM policy for a specified bucket. The ``resource`` + field in the request should be ``projects/_/buckets/{bucket}`` + for a bucket, or + ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` + for a managed folder. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_get_iam_policy(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.get_iam_policy(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): + The request object. Request message for ``GetIamPolicy`` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + elif not request: + request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_iam_policy + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, + *, + resource: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Updates an IAM policy for the specified bucket. The ``resource`` + field in the request should be ``projects/_/buckets/{bucket}`` + for a bucket, or + ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` + for a managed folder. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_set_iam_policy(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.set_iam_policy(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): + The request object. Request message for ``SetIamPolicy`` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + elif not request: + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.set_iam_policy + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, + *, + resource: Optional[str] = None, + permissions: Optional[MutableSequence[str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests a set of permissions on the given bucket, object, or + managed folder to see which, if any, are held by the caller. The + ``resource`` field in the request should be + ``projects/_/buckets/{bucket}`` for a bucket, + ``projects/_/buckets/{bucket}/objects/{object}`` for an object, + or + ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` + for a managed folder. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_test_iam_permissions(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = await client.test_iam_permissions(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): + The request object. Request message for ``TestIamPermissions`` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (:class:`MutableSequence[str]`): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource, permissions] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + elif not request: + request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.test_iam_permissions + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/buckets/[^/]+)/managedFolders(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_bucket( + self, + request: Optional[Union[storage.UpdateBucketRequest, dict]] = None, + *, + bucket: Optional[storage.Bucket] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Bucket: + r"""Updates a bucket. Equivalent to JSON API's + storage.buckets.patch method. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_update_bucket(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.UpdateBucketRequest( + ) + + # Make the request + response = await client.update_bucket(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.UpdateBucketRequest, dict]]): + The request object. Request for UpdateBucket method. + bucket (:class:`google.cloud.storage_v2.types.Bucket`): + Required. The bucket to update. The bucket's ``name`` + field will be used to identify the bucket. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. List of fields to be updated. + + To specify ALL fields, equivalent to the JSON API's + "update" function, specify a single field with the value + ``*``. Note: not recommended. If a new field is + introduced at a later time, an older client updating + with the ``*`` may accidentally reset the new field's + value. + + Not specifying any fields is an error. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Bucket: + A bucket. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.UpdateBucketRequest): + request = storage.UpdateBucketRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_bucket + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket.name) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def compose_object( + self, + request: Optional[Union[storage.ComposeObjectRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Object: + r"""Concatenates a list of existing objects into a new + object in the same bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_compose_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.ComposeObjectRequest( + ) + + # Make the request + response = await client.compose_object(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.ComposeObjectRequest, dict]]): + The request object. Request message for ComposeObject. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Object: + An object. + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.ComposeObjectRequest): + request = storage.ComposeObjectRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.compose_object + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.destination.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_object( + self, + request: Optional[Union[storage.DeleteObjectRequest, dict]] = None, + *, + bucket: Optional[str] = None, + object_: Optional[str] = None, + generation: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes an object and its metadata. Deletions are permanent if + versioning is not enabled for the bucket, or if the generation + parameter is used, or if `soft + delete `__ is + not enabled for the bucket. When this API is used to delete an + object from a bucket that has soft delete policy enabled, the + object becomes soft deleted, and the ``softDeleteTime`` and + ``hardDeleteTime`` properties are set on the object. This API + cannot be used to permanently delete soft-deleted objects. + Soft-deleted objects are permanently deleted according to their + ``hardDeleteTime``. + + You can use the + [``RestoreObject``][google.storage.v2.Storage.RestoreObject] API + to restore soft-deleted objects until the soft delete retention + period has passed. + + **IAM Permissions**: + + Requires ``storage.objects.delete`` `IAM + permission `__ + on the bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_delete_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.DeleteObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Make the request + await client.delete_object(request=request) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.DeleteObjectRequest, dict]]): + The request object. Message for deleting an object. ``bucket`` and + ``object`` **must** be set. + bucket (:class:`str`): + Required. Name of the bucket in which + the object resides. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + object_ (:class:`str`): + Required. The name of the finalized object to delete. + Note: If you want to delete an unfinalized resumable + upload please use ``CancelResumableWrite``. + + This corresponds to the ``object_`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + generation (:class:`int`): + Optional. If present, permanently + deletes a specific revision of this + object (as opposed to the latest + version, the default). + + This corresponds to the ``generation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket, object_, generation] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.DeleteObjectRequest): + request = storage.DeleteObjectRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + if object_ is not None: + request.object_ = object_ + if generation is not None: + request.generation = generation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_object + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def restore_object( + self, + request: Optional[Union[storage.RestoreObjectRequest, dict]] = None, + *, + bucket: Optional[str] = None, + object_: Optional[str] = None, + generation: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Object: + r"""Restores a soft-deleted object. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_restore_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.RestoreObjectRequest( + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + # Make the request + response = await client.restore_object(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.RestoreObjectRequest, dict]]): + The request object. Message for restoring an object. ``bucket``, ``object``, + and ``generation`` **must** be set. + bucket (:class:`str`): + Required. Name of the bucket in which + the object resides. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + object_ (:class:`str`): + Required. The name of the object to + restore. + + This corresponds to the ``object_`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + generation (:class:`int`): + Required. The specific revision of + the object to restore. + + This corresponds to the ``generation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Object: + An object. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket, object_, generation] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.RestoreObjectRequest): + request = storage.RestoreObjectRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + if object_ is not None: + request.object_ = object_ + if generation is not None: + request.generation = generation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.restore_object + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def cancel_resumable_write( + self, + request: Optional[Union[storage.CancelResumableWriteRequest, dict]] = None, + *, + upload_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.CancelResumableWriteResponse: + r"""Cancels an in-progress resumable upload. + + Any attempts to write to the resumable upload after + cancelling the upload will fail. + + The behavior for currently in progress write operations + is not guaranteed - they could either complete before + the cancellation or fail if the cancellation completes + first. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_cancel_resumable_write(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.CancelResumableWriteRequest( + upload_id="upload_id_value", + ) + + # Make the request + response = await client.cancel_resumable_write(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.CancelResumableWriteRequest, dict]]): + The request object. Message for canceling an in-progress resumable upload. + ``upload_id`` **must** be set. + upload_id (:class:`str`): + Required. The upload_id of the resumable upload to + cancel. This should be copied from the ``upload_id`` + field of ``StartResumableWriteResponse``. + + This corresponds to the ``upload_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.CancelResumableWriteResponse: + Empty response message for canceling + an in-progress resumable upload, will be + extended as needed. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [upload_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.CancelResumableWriteRequest): + request = storage.CancelResumableWriteRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if upload_id is not None: + request.upload_id = upload_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.cancel_resumable_write + ] + + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.upload_id) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_object( + self, + request: Optional[Union[storage.GetObjectRequest, dict]] = None, + *, + bucket: Optional[str] = None, + object_: Optional[str] = None, + generation: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Object: + r"""Retrieves object metadata. + + **IAM Permissions**: + + Requires ``storage.objects.get`` `IAM + permission `__ + on the bucket. To return object ACLs, the authenticated user + must also have the ``storage.objects.getIamPolicy`` permission. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_get_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.GetObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Make the request + response = await client.get_object(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.GetObjectRequest, dict]]): + The request object. Request message for GetObject. + bucket (:class:`str`): + Required. Name of the bucket in which + the object resides. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + object_ (:class:`str`): + Required. Name of the object. + This corresponds to the ``object_`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + generation (:class:`int`): + Optional. If present, selects a + specific revision of this object (as + opposed to the latest version, the + default). + + This corresponds to the ``generation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Object: + An object. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket, object_, generation] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.GetObjectRequest): + request = storage.GetObjectRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + if object_ is not None: + request.object_ = object_ + if generation is not None: + request.generation = generation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_object + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_object( + self, + request: Optional[Union[storage.ReadObjectRequest, dict]] = None, + *, + bucket: Optional[str] = None, + object_: Optional[str] = None, + generation: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> Awaitable[AsyncIterable[storage.ReadObjectResponse]]: + r"""Retrieves object data. + + **IAM Permissions**: + + Requires ``storage.objects.get`` `IAM + permission `__ + on the bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_read_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.ReadObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Make the request + stream = await client.read_object(request=request) + + # Handle the response + async for response in stream: + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.ReadObjectRequest, dict]]): + The request object. Request message for ReadObject. + bucket (:class:`str`): + Required. The name of the bucket + containing the object to read. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + object_ (:class:`str`): + Required. The name of the object to + read. + + This corresponds to the ``object_`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + generation (:class:`int`): + Optional. If present, selects a + specific revision of this object (as + opposed to the latest version, the + default). + + This corresponds to the ``generation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + AsyncIterable[google.cloud.storage_v2.types.ReadObjectResponse]: + Response message for ReadObject. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket, object_, generation] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.ReadObjectRequest): + request = storage.ReadObjectRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + if object_ is not None: + request.object_ = object_ + if generation is not None: + request.generation = generation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.read_object + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def bidi_read_object( + self, + requests: Optional[AsyncIterator[storage.BidiReadObjectRequest]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> Awaitable[AsyncIterable[storage.BidiReadObjectResponse]]: + r"""Reads an object's data. + + This is a bi-directional API with the added support for reading + multiple ranges within one stream both within and across + multiple messages. If the server encountered an error for any of + the inputs, the stream will be closed with the relevant error + code. Because the API allows for multiple outstanding requests, + when the stream is closed the error response will contain a + BidiReadObjectRangesError proto in the error extension + describing the error for each outstanding read_id. + + **IAM Permissions**: + + Requires ``storage.objects.get`` + + `IAM + permission `__ + on the bucket. + + This API is currently in preview and is not yet available for + general use. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_bidi_read_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.BidiReadObjectRequest( + ) + + # This method expects an iterator which contains + # 'storage_v2.BidiReadObjectRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + stream = await client.bidi_read_object(requests=request_generator()) + + # Handle the response + async for response in stream: + print(response) + + Args: + requests (AsyncIterator[`google.cloud.storage_v2.types.BidiReadObjectRequest`]): + The request object AsyncIterator. Request message for BidiReadObject. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + AsyncIterable[google.cloud.storage_v2.types.BidiReadObjectResponse]: + Response message for BidiReadObject. + """ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.bidi_read_object + ] + + header_params = {} + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = rpc( + requests, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_object( + self, + request: Optional[Union[storage.UpdateObjectRequest, dict]] = None, + *, + object_: Optional[storage.Object] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Object: + r"""Updates an object's metadata. + Equivalent to JSON API's storage.objects.patch. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_update_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.UpdateObjectRequest( + ) + + # Make the request + response = await client.update_object(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.UpdateObjectRequest, dict]]): + The request object. Request message for UpdateObject. + object_ (:class:`google.cloud.storage_v2.types.Object`): + Required. The object to update. + The object's bucket and name fields are + used to identify the object to update. + If present, the object's generation + field selects a specific revision of + this object whose metadata should be + updated. Otherwise, assumes the live + version of the object. + + This corresponds to the ``object_`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. List of fields to be updated. + + To specify ALL fields, equivalent to the JSON API's + "update" function, specify a single field with the value + ``*``. Note: not recommended. If a new field is + introduced at a later time, an older client updating + with the ``*`` may accidentally reset the new field's + value. + + Not specifying any fields is an error. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Object: + An object. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [object_, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.UpdateObjectRequest): + request = storage.UpdateObjectRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if object_ is not None: + request.object_ = object_ + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_object + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.object.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def write_object( + self, + requests: Optional[AsyncIterator[storage.WriteObjectRequest]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.WriteObjectResponse: + r"""Stores a new object and metadata. + + An object can be written either in a single message stream or in + a resumable sequence of message streams. To write using a single + stream, the client should include in the first message of the + stream an ``WriteObjectSpec`` describing the destination bucket, + object, and any preconditions. Additionally, the final message + must set 'finish_write' to true, or else it is an error. + + For a resumable write, the client should instead call + ``StartResumableWrite()``, populating a ``WriteObjectSpec`` into + that request. They should then attach the returned ``upload_id`` + to the first message of each following call to ``WriteObject``. + If the stream is closed before finishing the upload (either + explicitly by the client or due to a network error or an error + response from the server), the client should do as follows: + + - Check the result Status of the stream, to determine if + writing can be resumed on this stream or must be restarted + from scratch (by calling ``StartResumableWrite()``). The + resumable errors are DEADLINE_EXCEEDED, INTERNAL, and + UNAVAILABLE. For each case, the client should use binary + exponential backoff before retrying. Additionally, writes can + be resumed after RESOURCE_EXHAUSTED errors, but only after + taking appropriate measures, which may include reducing + aggregate send rate across clients and/or requesting a quota + increase for your project. + - If the call to ``WriteObject`` returns ``ABORTED``, that + indicates concurrent attempts to update the resumable write, + caused either by multiple racing clients or by a single + client where the previous request was timed out on the client + side but nonetheless reached the server. In this case the + client should take steps to prevent further concurrent writes + (e.g., increase the timeouts, stop using more than one + process to perform the upload, etc.), and then should follow + the steps below for resuming the upload. + - For resumable errors, the client should call + ``QueryWriteStatus()`` and then continue writing from the + returned ``persisted_size``. This may be less than the amount + of data the client previously sent. Note also that it is + acceptable to send data starting at an offset earlier than + the returned ``persisted_size``; in this case, the service + will skip data at offsets that were already persisted + (without checking that it matches the previously written + data), and write only the data starting from the persisted + offset. Even though the data isn't written, it may still + incur a performance cost over resuming at the correct write + offset. This behavior can make client-side handling simpler + in some cases. + - Clients must only send data that is a multiple of 256 KiB per + message, unless the object is being finished with + ``finish_write`` set to ``true``. + + The service will not view the object as complete until the + client has sent a ``WriteObjectRequest`` with ``finish_write`` + set to ``true``. Sending any requests on a stream after sending + a request with ``finish_write`` set to ``true`` will cause an + error. The client **should** check the response it receives to + determine how much data the service was able to commit and + whether the service views the object as complete. + + Attempting to resume an already finalized object will result in + an OK status, with a ``WriteObjectResponse`` containing the + finalized object's metadata. + + Alternatively, the BidiWriteObject operation may be used to + write an object with controls over flushing and the ability to + fetch the ability to determine the current persisted size. + + **IAM Permissions**: + + Requires ``storage.objects.create`` `IAM + permission `__ + on the bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_write_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.WriteObjectRequest( + upload_id="upload_id_value", + write_offset=1297, + ) + + # This method expects an iterator which contains + # 'storage_v2.WriteObjectRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + response = await client.write_object(requests=request_generator()) + + # Handle the response + print(response) + + Args: + requests (AsyncIterator[`google.cloud.storage_v2.types.WriteObjectRequest`]): + The request object AsyncIterator. Request message for WriteObject. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.WriteObjectResponse: + Response message for WriteObject. + """ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.write_object + ] + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + requests, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def bidi_write_object( + self, + requests: Optional[AsyncIterator[storage.BidiWriteObjectRequest]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> Awaitable[AsyncIterable[storage.BidiWriteObjectResponse]]: + r"""Stores a new object and metadata. + + This is similar to the WriteObject call with the added support + for manual flushing of persisted state, and the ability to + determine current persisted size without closing the stream. + + The client may specify one or both of the ``state_lookup`` and + ``flush`` fields in each BidiWriteObjectRequest. If ``flush`` is + specified, the data written so far will be persisted to storage. + If ``state_lookup`` is specified, the service will respond with + a BidiWriteObjectResponse that contains the persisted size. If + both ``flush`` and ``state_lookup`` are specified, the flush + will always occur before a ``state_lookup``, so that both may be + set in the same request and the returned state will be the state + of the object post-flush. When the stream is closed, a + BidiWriteObjectResponse will always be sent to the client, + regardless of the value of ``state_lookup``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_bidi_write_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.BidiWriteObjectRequest( + upload_id="upload_id_value", + write_offset=1297, + ) + + # This method expects an iterator which contains + # 'storage_v2.BidiWriteObjectRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + stream = await client.bidi_write_object(requests=request_generator()) + + # Handle the response + async for response in stream: + print(response) + + Args: + requests (AsyncIterator[`google.cloud.storage_v2.types.BidiWriteObjectRequest`]): + The request object AsyncIterator. Request message for BidiWriteObject. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + AsyncIterable[google.cloud.storage_v2.types.BidiWriteObjectResponse]: + Response message for BidiWriteObject. + """ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.bidi_write_object + ] + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = rpc( + requests, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_objects( + self, + request: Optional[Union[storage.ListObjectsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListObjectsAsyncPager: + r"""Retrieves a list of objects matching the criteria. + + **IAM Permissions**: + + The authenticated user requires ``storage.objects.list`` `IAM + permission `__ + to use this method. To return object ACLs, the authenticated + user must also have the ``storage.objects.getIamPolicy`` + permission. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_list_objects(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.ListObjectsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_objects(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.ListObjectsRequest, dict]]): + The request object. Request message for ListObjects. + parent (:class:`str`): + Required. Name of the bucket in which + to look for objects. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.services.storage.pagers.ListObjectsAsyncPager: + The result of a call to + Objects.ListObjects + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.ListObjectsRequest): + request = storage.ListObjectsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_objects + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.parent) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListObjectsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def rewrite_object( + self, + request: Optional[Union[storage.RewriteObjectRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.RewriteResponse: + r"""Rewrites a source object to a destination object. + Optionally overrides metadata. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_rewrite_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.RewriteObjectRequest( + destination_name="destination_name_value", + destination_bucket="destination_bucket_value", + source_bucket="source_bucket_value", + source_object="source_object_value", + ) + + # Make the request + response = await client.rewrite_object(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.RewriteObjectRequest, dict]]): + The request object. Request message for RewriteObject. If the source object + is encrypted using a Customer-Supplied Encryption Key + the key information must be provided in the + copy_source_encryption_algorithm, + copy_source_encryption_key_bytes, and + copy_source_encryption_key_sha256_bytes fields. If the + destination object should be encrypted the keying + information should be provided in the + encryption_algorithm, encryption_key_bytes, and + encryption_key_sha256_bytes fields of the + common_object_request_params.customer_encryption field. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.RewriteResponse: + A rewrite response. + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.RewriteObjectRequest): + request = storage.RewriteObjectRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.rewrite_object + ] + + header_params = {} + + if request.source_bucket: + header_params["source_bucket"] = request.source_bucket + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.destination_bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def start_resumable_write( + self, + request: Optional[Union[storage.StartResumableWriteRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.StartResumableWriteResponse: + r"""Starts a resumable write operation. This method is part of the + `Resumable + upload `__ + feature. This allows you to upload large objects in multiple + chunks, which is more resilient to network interruptions than a + single upload. The validity duration of the write operation, and + the consequences of it becoming invalid, are service-dependent. + + **IAM Permissions**: + + Requires ``storage.objects.create`` `IAM + permission `__ + on the bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_start_resumable_write(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.StartResumableWriteRequest( + ) + + # Make the request + response = await client.start_resumable_write(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.StartResumableWriteRequest, dict]]): + The request object. Request message StartResumableWrite. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.StartResumableWriteResponse: + Response object for StartResumableWrite. + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.StartResumableWriteRequest): + request = storage.StartResumableWriteRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.start_resumable_write + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match( + request.write_object_spec.resource.bucket + ) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_write_status( + self, + request: Optional[Union[storage.QueryWriteStatusRequest, dict]] = None, + *, + upload_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.QueryWriteStatusResponse: + r"""Determines the ``persisted_size`` of an object that is being + written. This method is part of the `resumable + upload `__ + feature. The returned value is the size of the object that has + been persisted so far. The value can be used as the + ``write_offset`` for the next ``Write()`` call. + + If the object does not exist, meaning if it was deleted, or the + first ``Write()`` has not yet reached the service, this method + returns the error ``NOT_FOUND``. + + This method is useful for clients that buffer data and need to + know which data can be safely evicted. The client can call + ``QueryWriteStatus()`` at any time to determine how much data + has been logged for this object. For any sequence of + ``QueryWriteStatus()`` calls for a given object name, the + sequence of returned ``persisted_size`` values are + non-decreasing. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_query_write_status(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.QueryWriteStatusRequest( + upload_id="upload_id_value", + ) + + # Make the request + response = await client.query_write_status(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.QueryWriteStatusRequest, dict]]): + The request object. Request object for ``QueryWriteStatus``. + upload_id (:class:`str`): + Required. The name of the resume + token for the object whose write status + is being requested. + + This corresponds to the ``upload_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.QueryWriteStatusResponse: + Response object for QueryWriteStatus. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [upload_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.QueryWriteStatusRequest): + request = storage.QueryWriteStatusRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if upload_id is not None: + request.upload_id = upload_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.query_write_status + ] + + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.upload_id) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def move_object( + self, + request: Optional[Union[storage.MoveObjectRequest, dict]] = None, + *, + bucket: Optional[str] = None, + source_object: Optional[str] = None, + destination_object: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Object: + r"""Moves the source object to the destination object in + the same bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + async def sample_move_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.MoveObjectRequest( + bucket="bucket_value", + source_object="source_object_value", + destination_object="destination_object_value", + ) + + # Make the request + response = await client.move_object(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.storage_v2.types.MoveObjectRequest, dict]]): + The request object. Request message for MoveObject. + bucket (:class:`str`): + Required. Name of the bucket in which + the object resides. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_object (:class:`str`): + Required. Name of the source object. + This corresponds to the ``source_object`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + destination_object (:class:`str`): + Required. Name of the destination + object. + + This corresponds to the ``destination_object`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Object: + An object. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket, source_object, destination_object] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.MoveObjectRequest): + request = storage.MoveObjectRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + if source_object is not None: + request.source_object = source_object + if destination_object is not None: + request.destination_object = destination_object + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.move_object + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "StorageAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +__all__ = ("StorageAsyncClient",) diff --git a/google/cloud/storage_v2/services/storage/client.py b/google/cloud/storage_v2/services/storage/client.py new file mode 100644 index 000000000..2026a8918 --- /dev/null +++ b/google/cloud/storage_v2/services/storage/client.py @@ -0,0 +1,3836 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from http import HTTPStatus +import json +import logging as std_logging +import os +import re +from typing import ( + Dict, + Callable, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Iterable, + Iterator, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.storage_v2 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore +import google.protobuf + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + +from google.cloud.storage_v2.services.storage import pagers +from google.cloud.storage_v2.types import storage +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import StorageTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import StorageGrpcTransport +from .transports.grpc_asyncio import StorageGrpcAsyncIOTransport + + +class StorageClientMeta(type): + """Metaclass for the Storage client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[StorageTransport]] + _transport_registry["grpc"] = StorageGrpcTransport + _transport_registry["grpc_asyncio"] = StorageGrpcAsyncIOTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[StorageTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class StorageClient(metaclass=StorageClientMeta): + """API Overview and Naming Syntax + ------------------------------ + + The Cloud Storage gRPC API allows applications to read and write + data through the abstractions of buckets and objects. For a + description of these abstractions please see + https://cloud.google.com/storage/docs. + + Resources are named as follows: + + - Projects are referred to as they are defined by the Resource + Manager API, using strings like ``projects/123456`` or + ``projects/my-string-id``. + + - Buckets are named using string names of the form: + ``projects/{project}/buckets/{bucket}`` For globally unique + buckets, ``_`` may be substituted for the project. + + - Objects are uniquely identified by their name along with the name + of the bucket they belong to, as separate strings in this API. + For example: + + ReadObjectRequest { bucket: 'projects/_/buckets/my-bucket' + object: 'my-object' } Note that object names can contain ``/`` + characters, which are treated as any other character (no special + directory semantics). + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "storage.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "storage.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + StorageClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + StorageClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> StorageTransport: + """Returns the transport used by the client instance. + + Returns: + StorageTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def bucket_path( + project: str, + bucket: str, + ) -> str: + """Returns a fully-qualified bucket string.""" + return "projects/{project}/buckets/{bucket}".format( + project=project, + bucket=bucket, + ) + + @staticmethod + def parse_bucket_path(path: str) -> Dict[str, str]: + """Parses a bucket path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/buckets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def crypto_key_path( + project: str, + location: str, + key_ring: str, + crypto_key: str, + ) -> str: + """Returns a fully-qualified crypto_key string.""" + return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format( + project=project, + location=location, + key_ring=key_ring, + crypto_key=crypto_key, + ) + + @staticmethod + def parse_crypto_key_path(path: str) -> Dict[str, str]: + """Parses a crypto_key path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = StorageClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = StorageClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = StorageClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = StorageClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + + # NOTE (b/349488459): universe validation is disabled until further notice. + return True + + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[str, StorageTransport, Callable[..., StorageTransport]] + ] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the storage client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,StorageTransport,Callable[..., StorageTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the StorageTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = StorageClient._read_environment_variables() + self._client_cert_source = StorageClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = StorageClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, StorageTransport) + if transport_provided: + # transport is a StorageTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(StorageTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = self._api_endpoint or StorageClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + + if not transport_provided: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + transport_init: Union[ + Type[StorageTransport], Callable[..., StorageTransport] + ] = ( + StorageClient.get_transport_class(transport) + if isinstance(transport, str) or transport is None + else cast(Callable[..., StorageTransport], transport) + ) + # initialize with the provided callable or the passed in class + self._transport = transport_init( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.storage_v2.StorageClient`.", + extra={ + "serviceName": "google.storage.v2.Storage", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.storage.v2.Storage", + "credentialsType": None, + }, + ) + + def delete_bucket( + self, + request: Optional[Union[storage.DeleteBucketRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Permanently deletes an empty bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_delete_bucket(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.DeleteBucketRequest( + name="name_value", + ) + + # Make the request + client.delete_bucket(request=request) + + Args: + request (Union[google.cloud.storage_v2.types.DeleteBucketRequest, dict]): + The request object. Request message for DeleteBucket. + name (str): + Required. Name of a bucket to delete. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.DeleteBucketRequest): + request = storage.DeleteBucketRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_bucket] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.name) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def get_bucket( + self, + request: Optional[Union[storage.GetBucketRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Bucket: + r"""Returns metadata for the specified bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_get_bucket(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.GetBucketRequest( + name="name_value", + ) + + # Make the request + response = client.get_bucket(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.GetBucketRequest, dict]): + The request object. Request message for GetBucket. + name (str): + Required. Name of a bucket. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Bucket: + A bucket. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.GetBucketRequest): + request = storage.GetBucketRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_bucket] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.name) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_bucket( + self, + request: Optional[Union[storage.CreateBucketRequest, dict]] = None, + *, + parent: Optional[str] = None, + bucket: Optional[storage.Bucket] = None, + bucket_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Bucket: + r"""Creates a new bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_create_bucket(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.CreateBucketRequest( + parent="parent_value", + bucket_id="bucket_id_value", + ) + + # Make the request + response = client.create_bucket(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.CreateBucketRequest, dict]): + The request object. Request message for CreateBucket. + parent (str): + Required. The project to which this bucket will belong. + This field must either be empty or ``projects/_``. The + project ID that owns this bucket should be specified in + the ``bucket.project`` field. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + bucket (google.cloud.storage_v2.types.Bucket): + Optional. Properties of the new bucket being inserted. + The name of the bucket is specified in the ``bucket_id`` + field. Populating ``bucket.name`` field will result in + an error. The project of the bucket must be specified in + the ``bucket.project`` field. This field must be in + ``projects/{projectIdentifier}`` format, + {projectIdentifier} can be the project ID or project + number. The ``parent`` field must be either empty or + ``projects/_``. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + bucket_id (str): + Required. The ID to use for this bucket, which will + become the final component of the bucket's resource + name. For example, the value ``foo`` might result in a + bucket with the name ``projects/123456/buckets/foo``. + + This corresponds to the ``bucket_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Bucket: + A bucket. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, bucket, bucket_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.CreateBucketRequest): + request = storage.CreateBucketRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if bucket is not None: + request.bucket = bucket + if bucket_id is not None: + request.bucket_id = bucket_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_bucket] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.parent) + if regex_match and regex_match.group("project"): + header_params["project"] = regex_match.group("project") + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket.project) + if regex_match and regex_match.group("project"): + header_params["project"] = regex_match.group("project") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_buckets( + self, + request: Optional[Union[storage.ListBucketsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListBucketsPager: + r"""Retrieves a list of buckets for a given project. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_list_buckets(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.ListBucketsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_buckets(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.ListBucketsRequest, dict]): + The request object. Request message for ListBuckets. + parent (str): + Required. The project whose buckets + we are listing. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.services.storage.pagers.ListBucketsPager: + The result of a call to + Buckets.ListBuckets + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.ListBucketsRequest): + request = storage.ListBucketsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_buckets] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.parent) + if regex_match and regex_match.group("project"): + header_params["project"] = regex_match.group("project") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListBucketsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def lock_bucket_retention_policy( + self, + request: Optional[Union[storage.LockBucketRetentionPolicyRequest, dict]] = None, + *, + bucket: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Bucket: + r"""Locks retention policy on a bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_lock_bucket_retention_policy(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.LockBucketRetentionPolicyRequest( + bucket="bucket_value", + if_metageneration_match=2413, + ) + + # Make the request + response = client.lock_bucket_retention_policy(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.LockBucketRetentionPolicyRequest, dict]): + The request object. Request message for + LockBucketRetentionPolicyRequest. + bucket (str): + Required. Name of a bucket. + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Bucket: + A bucket. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.LockBucketRetentionPolicyRequest): + request = storage.LockBucketRetentionPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.lock_bucket_retention_policy + ] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, + *, + resource: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM policy for a specified bucket. The ``resource`` + field in the request should be ``projects/_/buckets/{bucket}`` + for a bucket, or + ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` + for a managed folder. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_get_iam_policy(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.get_iam_policy(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): + The request object. Request message for ``GetIamPolicy`` method. + resource (str): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + if isinstance(request, dict): + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + request = iam_policy_pb2.GetIamPolicyRequest(**request) + elif not request: + # Null request, just make one. + request = iam_policy_pb2.GetIamPolicyRequest() + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, + *, + resource: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Updates an IAM policy for the specified bucket. The ``resource`` + field in the request should be ``projects/_/buckets/{bucket}`` + for a bucket, or + ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` + for a managed folder. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_set_iam_policy(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.set_iam_policy(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): + The request object. Request message for ``SetIamPolicy`` method. + resource (str): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + if isinstance(request, dict): + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + request = iam_policy_pb2.SetIamPolicyRequest(**request) + elif not request: + # Null request, just make one. + request = iam_policy_pb2.SetIamPolicyRequest() + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, + *, + resource: Optional[str] = None, + permissions: Optional[MutableSequence[str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests a set of permissions on the given bucket, object, or + managed folder to see which, if any, are held by the caller. The + ``resource`` field in the request should be + ``projects/_/buckets/{bucket}`` for a bucket, + ``projects/_/buckets/{bucket}/objects/{object}`` for an object, + or + ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` + for a managed folder. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_test_iam_permissions(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = client.test_iam_permissions(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): + The request object. Request message for ``TestIamPermissions`` method. + resource (str): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (MutableSequence[str]): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource, permissions] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + if isinstance(request, dict): + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + elif not request: + # Null request, just make one. + request = iam_policy_pb2.TestIamPermissionsRequest() + if resource is not None: + request.resource = resource + if permissions: + request.permissions.extend(permissions) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/buckets/[^/]+)/managedFolders(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.resource) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_bucket( + self, + request: Optional[Union[storage.UpdateBucketRequest, dict]] = None, + *, + bucket: Optional[storage.Bucket] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Bucket: + r"""Updates a bucket. Equivalent to JSON API's + storage.buckets.patch method. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_update_bucket(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.UpdateBucketRequest( + ) + + # Make the request + response = client.update_bucket(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.UpdateBucketRequest, dict]): + The request object. Request for UpdateBucket method. + bucket (google.cloud.storage_v2.types.Bucket): + Required. The bucket to update. The bucket's ``name`` + field will be used to identify the bucket. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. List of fields to be updated. + + To specify ALL fields, equivalent to the JSON API's + "update" function, specify a single field with the value + ``*``. Note: not recommended. If a new field is + introduced at a later time, an older client updating + with the ``*`` may accidentally reset the new field's + value. + + Not specifying any fields is an error. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Bucket: + A bucket. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.UpdateBucketRequest): + request = storage.UpdateBucketRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_bucket] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket.name) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def compose_object( + self, + request: Optional[Union[storage.ComposeObjectRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Object: + r"""Concatenates a list of existing objects into a new + object in the same bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_compose_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.ComposeObjectRequest( + ) + + # Make the request + response = client.compose_object(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.ComposeObjectRequest, dict]): + The request object. Request message for ComposeObject. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Object: + An object. + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.ComposeObjectRequest): + request = storage.ComposeObjectRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.compose_object] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.destination.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_object( + self, + request: Optional[Union[storage.DeleteObjectRequest, dict]] = None, + *, + bucket: Optional[str] = None, + object_: Optional[str] = None, + generation: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes an object and its metadata. Deletions are permanent if + versioning is not enabled for the bucket, or if the generation + parameter is used, or if `soft + delete `__ is + not enabled for the bucket. When this API is used to delete an + object from a bucket that has soft delete policy enabled, the + object becomes soft deleted, and the ``softDeleteTime`` and + ``hardDeleteTime`` properties are set on the object. This API + cannot be used to permanently delete soft-deleted objects. + Soft-deleted objects are permanently deleted according to their + ``hardDeleteTime``. + + You can use the + [``RestoreObject``][google.storage.v2.Storage.RestoreObject] API + to restore soft-deleted objects until the soft delete retention + period has passed. + + **IAM Permissions**: + + Requires ``storage.objects.delete`` `IAM + permission `__ + on the bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_delete_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.DeleteObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Make the request + client.delete_object(request=request) + + Args: + request (Union[google.cloud.storage_v2.types.DeleteObjectRequest, dict]): + The request object. Message for deleting an object. ``bucket`` and + ``object`` **must** be set. + bucket (str): + Required. Name of the bucket in which + the object resides. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + object_ (str): + Required. The name of the finalized object to delete. + Note: If you want to delete an unfinalized resumable + upload please use ``CancelResumableWrite``. + + This corresponds to the ``object_`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + generation (int): + Optional. If present, permanently + deletes a specific revision of this + object (as opposed to the latest + version, the default). + + This corresponds to the ``generation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket, object_, generation] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.DeleteObjectRequest): + request = storage.DeleteObjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + if object_ is not None: + request.object_ = object_ + if generation is not None: + request.generation = generation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_object] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def restore_object( + self, + request: Optional[Union[storage.RestoreObjectRequest, dict]] = None, + *, + bucket: Optional[str] = None, + object_: Optional[str] = None, + generation: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Object: + r"""Restores a soft-deleted object. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_restore_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.RestoreObjectRequest( + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + # Make the request + response = client.restore_object(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.RestoreObjectRequest, dict]): + The request object. Message for restoring an object. ``bucket``, ``object``, + and ``generation`` **must** be set. + bucket (str): + Required. Name of the bucket in which + the object resides. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + object_ (str): + Required. The name of the object to + restore. + + This corresponds to the ``object_`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + generation (int): + Required. The specific revision of + the object to restore. + + This corresponds to the ``generation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Object: + An object. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket, object_, generation] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.RestoreObjectRequest): + request = storage.RestoreObjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + if object_ is not None: + request.object_ = object_ + if generation is not None: + request.generation = generation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.restore_object] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def cancel_resumable_write( + self, + request: Optional[Union[storage.CancelResumableWriteRequest, dict]] = None, + *, + upload_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.CancelResumableWriteResponse: + r"""Cancels an in-progress resumable upload. + + Any attempts to write to the resumable upload after + cancelling the upload will fail. + + The behavior for currently in progress write operations + is not guaranteed - they could either complete before + the cancellation or fail if the cancellation completes + first. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_cancel_resumable_write(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.CancelResumableWriteRequest( + upload_id="upload_id_value", + ) + + # Make the request + response = client.cancel_resumable_write(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.CancelResumableWriteRequest, dict]): + The request object. Message for canceling an in-progress resumable upload. + ``upload_id`` **must** be set. + upload_id (str): + Required. The upload_id of the resumable upload to + cancel. This should be copied from the ``upload_id`` + field of ``StartResumableWriteResponse``. + + This corresponds to the ``upload_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.CancelResumableWriteResponse: + Empty response message for canceling + an in-progress resumable upload, will be + extended as needed. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [upload_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.CancelResumableWriteRequest): + request = storage.CancelResumableWriteRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if upload_id is not None: + request.upload_id = upload_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_resumable_write] + + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.upload_id) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_object( + self, + request: Optional[Union[storage.GetObjectRequest, dict]] = None, + *, + bucket: Optional[str] = None, + object_: Optional[str] = None, + generation: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Object: + r"""Retrieves object metadata. + + **IAM Permissions**: + + Requires ``storage.objects.get`` `IAM + permission `__ + on the bucket. To return object ACLs, the authenticated user + must also have the ``storage.objects.getIamPolicy`` permission. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_get_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.GetObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Make the request + response = client.get_object(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.GetObjectRequest, dict]): + The request object. Request message for GetObject. + bucket (str): + Required. Name of the bucket in which + the object resides. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + object_ (str): + Required. Name of the object. + This corresponds to the ``object_`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + generation (int): + Optional. If present, selects a + specific revision of this object (as + opposed to the latest version, the + default). + + This corresponds to the ``generation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Object: + An object. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket, object_, generation] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.GetObjectRequest): + request = storage.GetObjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + if object_ is not None: + request.object_ = object_ + if generation is not None: + request.generation = generation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_object] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_object( + self, + request: Optional[Union[storage.ReadObjectRequest, dict]] = None, + *, + bucket: Optional[str] = None, + object_: Optional[str] = None, + generation: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> Iterable[storage.ReadObjectResponse]: + r"""Retrieves object data. + + **IAM Permissions**: + + Requires ``storage.objects.get`` `IAM + permission `__ + on the bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_read_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.ReadObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Make the request + stream = client.read_object(request=request) + + # Handle the response + for response in stream: + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.ReadObjectRequest, dict]): + The request object. Request message for ReadObject. + bucket (str): + Required. The name of the bucket + containing the object to read. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + object_ (str): + Required. The name of the object to + read. + + This corresponds to the ``object_`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + generation (int): + Optional. If present, selects a + specific revision of this object (as + opposed to the latest version, the + default). + + This corresponds to the ``generation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + Iterable[google.cloud.storage_v2.types.ReadObjectResponse]: + Response message for ReadObject. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket, object_, generation] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.ReadObjectRequest): + request = storage.ReadObjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + if object_ is not None: + request.object_ = object_ + if generation is not None: + request.generation = generation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_object] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def bidi_read_object( + self, + requests: Optional[Iterator[storage.BidiReadObjectRequest]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> Iterable[storage.BidiReadObjectResponse]: + r"""Reads an object's data. + + This is a bi-directional API with the added support for reading + multiple ranges within one stream both within and across + multiple messages. If the server encountered an error for any of + the inputs, the stream will be closed with the relevant error + code. Because the API allows for multiple outstanding requests, + when the stream is closed the error response will contain a + BidiReadObjectRangesError proto in the error extension + describing the error for each outstanding read_id. + + **IAM Permissions**: + + Requires ``storage.objects.get`` + + `IAM + permission `__ + on the bucket. + + This API is currently in preview and is not yet available for + general use. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_bidi_read_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.BidiReadObjectRequest( + ) + + # This method expects an iterator which contains + # 'storage_v2.BidiReadObjectRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + stream = client.bidi_read_object(requests=request_generator()) + + # Handle the response + for response in stream: + print(response) + + Args: + requests (Iterator[google.cloud.storage_v2.types.BidiReadObjectRequest]): + The request object iterator. Request message for BidiReadObject. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + Iterable[google.cloud.storage_v2.types.BidiReadObjectResponse]: + Response message for BidiReadObject. + """ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.bidi_read_object] + + header_params = {} + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + requests, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_object( + self, + request: Optional[Union[storage.UpdateObjectRequest, dict]] = None, + *, + object_: Optional[storage.Object] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Object: + r"""Updates an object's metadata. + Equivalent to JSON API's storage.objects.patch. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_update_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.UpdateObjectRequest( + ) + + # Make the request + response = client.update_object(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.UpdateObjectRequest, dict]): + The request object. Request message for UpdateObject. + object_ (google.cloud.storage_v2.types.Object): + Required. The object to update. + The object's bucket and name fields are + used to identify the object to update. + If present, the object's generation + field selects a specific revision of + this object whose metadata should be + updated. Otherwise, assumes the live + version of the object. + + This corresponds to the ``object_`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. List of fields to be updated. + + To specify ALL fields, equivalent to the JSON API's + "update" function, specify a single field with the value + ``*``. Note: not recommended. If a new field is + introduced at a later time, an older client updating + with the ``*`` may accidentally reset the new field's + value. + + Not specifying any fields is an error. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Object: + An object. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [object_, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.UpdateObjectRequest): + request = storage.UpdateObjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if object_ is not None: + request.object_ = object_ + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_object] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.object.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def write_object( + self, + requests: Optional[Iterator[storage.WriteObjectRequest]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.WriteObjectResponse: + r"""Stores a new object and metadata. + + An object can be written either in a single message stream or in + a resumable sequence of message streams. To write using a single + stream, the client should include in the first message of the + stream an ``WriteObjectSpec`` describing the destination bucket, + object, and any preconditions. Additionally, the final message + must set 'finish_write' to true, or else it is an error. + + For a resumable write, the client should instead call + ``StartResumableWrite()``, populating a ``WriteObjectSpec`` into + that request. They should then attach the returned ``upload_id`` + to the first message of each following call to ``WriteObject``. + If the stream is closed before finishing the upload (either + explicitly by the client or due to a network error or an error + response from the server), the client should do as follows: + + - Check the result Status of the stream, to determine if + writing can be resumed on this stream or must be restarted + from scratch (by calling ``StartResumableWrite()``). The + resumable errors are DEADLINE_EXCEEDED, INTERNAL, and + UNAVAILABLE. For each case, the client should use binary + exponential backoff before retrying. Additionally, writes can + be resumed after RESOURCE_EXHAUSTED errors, but only after + taking appropriate measures, which may include reducing + aggregate send rate across clients and/or requesting a quota + increase for your project. + - If the call to ``WriteObject`` returns ``ABORTED``, that + indicates concurrent attempts to update the resumable write, + caused either by multiple racing clients or by a single + client where the previous request was timed out on the client + side but nonetheless reached the server. In this case the + client should take steps to prevent further concurrent writes + (e.g., increase the timeouts, stop using more than one + process to perform the upload, etc.), and then should follow + the steps below for resuming the upload. + - For resumable errors, the client should call + ``QueryWriteStatus()`` and then continue writing from the + returned ``persisted_size``. This may be less than the amount + of data the client previously sent. Note also that it is + acceptable to send data starting at an offset earlier than + the returned ``persisted_size``; in this case, the service + will skip data at offsets that were already persisted + (without checking that it matches the previously written + data), and write only the data starting from the persisted + offset. Even though the data isn't written, it may still + incur a performance cost over resuming at the correct write + offset. This behavior can make client-side handling simpler + in some cases. + - Clients must only send data that is a multiple of 256 KiB per + message, unless the object is being finished with + ``finish_write`` set to ``true``. + + The service will not view the object as complete until the + client has sent a ``WriteObjectRequest`` with ``finish_write`` + set to ``true``. Sending any requests on a stream after sending + a request with ``finish_write`` set to ``true`` will cause an + error. The client **should** check the response it receives to + determine how much data the service was able to commit and + whether the service views the object as complete. + + Attempting to resume an already finalized object will result in + an OK status, with a ``WriteObjectResponse`` containing the + finalized object's metadata. + + Alternatively, the BidiWriteObject operation may be used to + write an object with controls over flushing and the ability to + fetch the ability to determine the current persisted size. + + **IAM Permissions**: + + Requires ``storage.objects.create`` `IAM + permission `__ + on the bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_write_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.WriteObjectRequest( + upload_id="upload_id_value", + write_offset=1297, + ) + + # This method expects an iterator which contains + # 'storage_v2.WriteObjectRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + response = client.write_object(requests=request_generator()) + + # Handle the response + print(response) + + Args: + requests (Iterator[google.cloud.storage_v2.types.WriteObjectRequest]): + The request object iterator. Request message for WriteObject. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.WriteObjectResponse: + Response message for WriteObject. + """ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.write_object] + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + requests, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def bidi_write_object( + self, + requests: Optional[Iterator[storage.BidiWriteObjectRequest]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> Iterable[storage.BidiWriteObjectResponse]: + r"""Stores a new object and metadata. + + This is similar to the WriteObject call with the added support + for manual flushing of persisted state, and the ability to + determine current persisted size without closing the stream. + + The client may specify one or both of the ``state_lookup`` and + ``flush`` fields in each BidiWriteObjectRequest. If ``flush`` is + specified, the data written so far will be persisted to storage. + If ``state_lookup`` is specified, the service will respond with + a BidiWriteObjectResponse that contains the persisted size. If + both ``flush`` and ``state_lookup`` are specified, the flush + will always occur before a ``state_lookup``, so that both may be + set in the same request and the returned state will be the state + of the object post-flush. When the stream is closed, a + BidiWriteObjectResponse will always be sent to the client, + regardless of the value of ``state_lookup``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_bidi_write_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.BidiWriteObjectRequest( + upload_id="upload_id_value", + write_offset=1297, + ) + + # This method expects an iterator which contains + # 'storage_v2.BidiWriteObjectRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + stream = client.bidi_write_object(requests=request_generator()) + + # Handle the response + for response in stream: + print(response) + + Args: + requests (Iterator[google.cloud.storage_v2.types.BidiWriteObjectRequest]): + The request object iterator. Request message for BidiWriteObject. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + Iterable[google.cloud.storage_v2.types.BidiWriteObjectResponse]: + Response message for BidiWriteObject. + """ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.bidi_write_object] + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + requests, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_objects( + self, + request: Optional[Union[storage.ListObjectsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListObjectsPager: + r"""Retrieves a list of objects matching the criteria. + + **IAM Permissions**: + + The authenticated user requires ``storage.objects.list`` `IAM + permission `__ + to use this method. To return object ACLs, the authenticated + user must also have the ``storage.objects.getIamPolicy`` + permission. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_list_objects(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.ListObjectsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_objects(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.ListObjectsRequest, dict]): + The request object. Request message for ListObjects. + parent (str): + Required. Name of the bucket in which + to look for objects. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.services.storage.pagers.ListObjectsPager: + The result of a call to + Objects.ListObjects + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.ListObjectsRequest): + request = storage.ListObjectsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_objects] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.parent) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListObjectsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def rewrite_object( + self, + request: Optional[Union[storage.RewriteObjectRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.RewriteResponse: + r"""Rewrites a source object to a destination object. + Optionally overrides metadata. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_rewrite_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.RewriteObjectRequest( + destination_name="destination_name_value", + destination_bucket="destination_bucket_value", + source_bucket="source_bucket_value", + source_object="source_object_value", + ) + + # Make the request + response = client.rewrite_object(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.RewriteObjectRequest, dict]): + The request object. Request message for RewriteObject. If the source object + is encrypted using a Customer-Supplied Encryption Key + the key information must be provided in the + copy_source_encryption_algorithm, + copy_source_encryption_key_bytes, and + copy_source_encryption_key_sha256_bytes fields. If the + destination object should be encrypted the keying + information should be provided in the + encryption_algorithm, encryption_key_bytes, and + encryption_key_sha256_bytes fields of the + common_object_request_params.customer_encryption field. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.RewriteResponse: + A rewrite response. + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.RewriteObjectRequest): + request = storage.RewriteObjectRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.rewrite_object] + + header_params = {} + + if request.source_bucket: + header_params["source_bucket"] = request.source_bucket + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.destination_bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def start_resumable_write( + self, + request: Optional[Union[storage.StartResumableWriteRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.StartResumableWriteResponse: + r"""Starts a resumable write operation. This method is part of the + `Resumable + upload `__ + feature. This allows you to upload large objects in multiple + chunks, which is more resilient to network interruptions than a + single upload. The validity duration of the write operation, and + the consequences of it becoming invalid, are service-dependent. + + **IAM Permissions**: + + Requires ``storage.objects.create`` `IAM + permission `__ + on the bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_start_resumable_write(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.StartResumableWriteRequest( + ) + + # Make the request + response = client.start_resumable_write(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.StartResumableWriteRequest, dict]): + The request object. Request message StartResumableWrite. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.StartResumableWriteResponse: + Response object for StartResumableWrite. + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.StartResumableWriteRequest): + request = storage.StartResumableWriteRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.start_resumable_write] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match( + request.write_object_spec.resource.bucket + ) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def query_write_status( + self, + request: Optional[Union[storage.QueryWriteStatusRequest, dict]] = None, + *, + upload_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.QueryWriteStatusResponse: + r"""Determines the ``persisted_size`` of an object that is being + written. This method is part of the `resumable + upload `__ + feature. The returned value is the size of the object that has + been persisted so far. The value can be used as the + ``write_offset`` for the next ``Write()`` call. + + If the object does not exist, meaning if it was deleted, or the + first ``Write()`` has not yet reached the service, this method + returns the error ``NOT_FOUND``. + + This method is useful for clients that buffer data and need to + know which data can be safely evicted. The client can call + ``QueryWriteStatus()`` at any time to determine how much data + has been logged for this object. For any sequence of + ``QueryWriteStatus()`` calls for a given object name, the + sequence of returned ``persisted_size`` values are + non-decreasing. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_query_write_status(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.QueryWriteStatusRequest( + upload_id="upload_id_value", + ) + + # Make the request + response = client.query_write_status(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.QueryWriteStatusRequest, dict]): + The request object. Request object for ``QueryWriteStatus``. + upload_id (str): + Required. The name of the resume + token for the object whose write status + is being requested. + + This corresponds to the ``upload_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.QueryWriteStatusResponse: + Response object for QueryWriteStatus. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [upload_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.QueryWriteStatusRequest): + request = storage.QueryWriteStatusRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if upload_id is not None: + request.upload_id = upload_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_write_status] + + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.upload_id) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def move_object( + self, + request: Optional[Union[storage.MoveObjectRequest, dict]] = None, + *, + bucket: Optional[str] = None, + source_object: Optional[str] = None, + destination_object: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> storage.Object: + r"""Moves the source object to the destination object in + the same bucket. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import storage_v2 + + def sample_move_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.MoveObjectRequest( + bucket="bucket_value", + source_object="source_object_value", + destination_object="destination_object_value", + ) + + # Make the request + response = client.move_object(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.storage_v2.types.MoveObjectRequest, dict]): + The request object. Request message for MoveObject. + bucket (str): + Required. Name of the bucket in which + the object resides. + + This corresponds to the ``bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_object (str): + Required. Name of the source object. + This corresponds to the ``source_object`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + destination_object (str): + Required. Name of the destination + object. + + This corresponds to the ``destination_object`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.storage_v2.types.Object: + An object. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [bucket, source_object, destination_object] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, storage.MoveObjectRequest): + request = storage.MoveObjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if bucket is not None: + request.bucket = bucket + if source_object is not None: + request.source_object = source_object + if destination_object is not None: + request.destination_object = destination_object + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.move_object] + + header_params = {} + + routing_param_regex = re.compile("^(?P.*)$") + regex_match = routing_param_regex.match(request.bucket) + if regex_match and regex_match.group("bucket"): + header_params["bucket"] = regex_match.group("bucket") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "StorageClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + +__all__ = ("StorageClient",) diff --git a/google/cloud/storage_v2/services/storage/pagers.py b/google/cloud/storage_v2/services/storage/pagers.py new file mode 100644 index 000000000..79aa18984 --- /dev/null +++ b/google/cloud/storage_v2/services/storage/pagers.py @@ -0,0 +1,352 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import retry_async as retries_async +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Sequence, + Tuple, + Optional, + Iterator, + Union, +) + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] + OptionalAsyncRetry = Union[ + retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None + ] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore + +from google.cloud.storage_v2.types import storage + + +class ListBucketsPager: + """A pager for iterating through ``list_buckets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.storage_v2.types.ListBucketsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``buckets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListBuckets`` requests and continue to iterate + through the ``buckets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.storage_v2.types.ListBucketsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., storage.ListBucketsResponse], + request: storage.ListBucketsRequest, + response: storage.ListBucketsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.storage_v2.types.ListBucketsRequest): + The initial request object. + response (google.cloud.storage_v2.types.ListBucketsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = storage.ListBucketsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[storage.ListBucketsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[storage.Bucket]: + for page in self.pages: + yield from page.buckets + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListBucketsAsyncPager: + """A pager for iterating through ``list_buckets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.storage_v2.types.ListBucketsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``buckets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListBuckets`` requests and continue to iterate + through the ``buckets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.storage_v2.types.ListBucketsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[storage.ListBucketsResponse]], + request: storage.ListBucketsRequest, + response: storage.ListBucketsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.storage_v2.types.ListBucketsRequest): + The initial request object. + response (google.cloud.storage_v2.types.ListBucketsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = storage.ListBucketsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[storage.ListBucketsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[storage.Bucket]: + async def async_generator(): + async for page in self.pages: + for response in page.buckets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListObjectsPager: + """A pager for iterating through ``list_objects`` requests. + + This class thinly wraps an initial + :class:`google.cloud.storage_v2.types.ListObjectsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``objects`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListObjects`` requests and continue to iterate + through the ``objects`` field on the + corresponding responses. + + All the usual :class:`google.cloud.storage_v2.types.ListObjectsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., storage.ListObjectsResponse], + request: storage.ListObjectsRequest, + response: storage.ListObjectsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.storage_v2.types.ListObjectsRequest): + The initial request object. + response (google.cloud.storage_v2.types.ListObjectsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = storage.ListObjectsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[storage.ListObjectsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[storage.Object]: + for page in self.pages: + yield from page.objects + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListObjectsAsyncPager: + """A pager for iterating through ``list_objects`` requests. + + This class thinly wraps an initial + :class:`google.cloud.storage_v2.types.ListObjectsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``objects`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListObjects`` requests and continue to iterate + through the ``objects`` field on the + corresponding responses. + + All the usual :class:`google.cloud.storage_v2.types.ListObjectsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[storage.ListObjectsResponse]], + request: storage.ListObjectsRequest, + response: storage.ListObjectsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.storage_v2.types.ListObjectsRequest): + The initial request object. + response (google.cloud.storage_v2.types.ListObjectsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = storage.ListObjectsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[storage.ListObjectsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[storage.Object]: + async def async_generator(): + async for page in self.pages: + for response in page.objects: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/storage_v2/services/storage/transports/README.rst b/google/cloud/storage_v2/services/storage/transports/README.rst new file mode 100644 index 000000000..803b98632 --- /dev/null +++ b/google/cloud/storage_v2/services/storage/transports/README.rst @@ -0,0 +1,9 @@ + +transport inheritance structure +_______________________________ + +`StorageTransport` is the ABC for all transports. +- public child `StorageGrpcTransport` for sync gRPC transport (defined in `grpc.py`). +- public child `StorageGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`). +- private child `_BaseStorageRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`). +- public child `StorageRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`). diff --git a/google/cloud/storage_v2/services/storage/transports/__init__.py b/google/cloud/storage_v2/services/storage/transports/__init__.py new file mode 100644 index 000000000..51802f9c5 --- /dev/null +++ b/google/cloud/storage_v2/services/storage/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import StorageTransport +from .grpc import StorageGrpcTransport +from .grpc_asyncio import StorageGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[StorageTransport]] +_transport_registry["grpc"] = StorageGrpcTransport +_transport_registry["grpc_asyncio"] = StorageGrpcAsyncIOTransport + +__all__ = ( + "StorageTransport", + "StorageGrpcTransport", + "StorageGrpcAsyncIOTransport", +) diff --git a/google/cloud/storage_v2/services/storage/transports/base.py b/google/cloud/storage_v2/services/storage/transports/base.py new file mode 100644 index 000000000..ea82adddc --- /dev/null +++ b/google/cloud/storage_v2/services/storage/transports/base.py @@ -0,0 +1,506 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.storage_v2 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore +import google.protobuf + +from google.cloud.storage_v2.types import storage +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +class StorageTransport(abc.ABC): + """Abstract transport class for Storage.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + ) + + DEFAULT_HOST: str = "storage.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'storage.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None and not self._ignore_credentials: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete_bucket: gapic_v1.method.wrap_method( + self.delete_bucket, + default_timeout=None, + client_info=client_info, + ), + self.get_bucket: gapic_v1.method.wrap_method( + self.get_bucket, + default_timeout=None, + client_info=client_info, + ), + self.create_bucket: gapic_v1.method.wrap_method( + self.create_bucket, + default_timeout=None, + client_info=client_info, + ), + self.list_buckets: gapic_v1.method.wrap_method( + self.list_buckets, + default_timeout=None, + client_info=client_info, + ), + self.lock_bucket_retention_policy: gapic_v1.method.wrap_method( + self.lock_bucket_retention_policy, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + self.update_bucket: gapic_v1.method.wrap_method( + self.update_bucket, + default_timeout=None, + client_info=client_info, + ), + self.compose_object: gapic_v1.method.wrap_method( + self.compose_object, + default_timeout=None, + client_info=client_info, + ), + self.delete_object: gapic_v1.method.wrap_method( + self.delete_object, + default_timeout=None, + client_info=client_info, + ), + self.restore_object: gapic_v1.method.wrap_method( + self.restore_object, + default_timeout=None, + client_info=client_info, + ), + self.cancel_resumable_write: gapic_v1.method.wrap_method( + self.cancel_resumable_write, + default_timeout=None, + client_info=client_info, + ), + self.get_object: gapic_v1.method.wrap_method( + self.get_object, + default_timeout=None, + client_info=client_info, + ), + self.read_object: gapic_v1.method.wrap_method( + self.read_object, + default_timeout=None, + client_info=client_info, + ), + self.bidi_read_object: gapic_v1.method.wrap_method( + self.bidi_read_object, + default_timeout=None, + client_info=client_info, + ), + self.update_object: gapic_v1.method.wrap_method( + self.update_object, + default_timeout=None, + client_info=client_info, + ), + self.write_object: gapic_v1.method.wrap_method( + self.write_object, + default_timeout=None, + client_info=client_info, + ), + self.bidi_write_object: gapic_v1.method.wrap_method( + self.bidi_write_object, + default_timeout=None, + client_info=client_info, + ), + self.list_objects: gapic_v1.method.wrap_method( + self.list_objects, + default_timeout=None, + client_info=client_info, + ), + self.rewrite_object: gapic_v1.method.wrap_method( + self.rewrite_object, + default_timeout=None, + client_info=client_info, + ), + self.start_resumable_write: gapic_v1.method.wrap_method( + self.start_resumable_write, + default_timeout=None, + client_info=client_info, + ), + self.query_write_status: gapic_v1.method.wrap_method( + self.query_write_status, + default_timeout=None, + client_info=client_info, + ), + self.move_object: gapic_v1.method.wrap_method( + self.move_object, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete_bucket( + self, + ) -> Callable[ + [storage.DeleteBucketRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def get_bucket( + self, + ) -> Callable[ + [storage.GetBucketRequest], Union[storage.Bucket, Awaitable[storage.Bucket]] + ]: + raise NotImplementedError() + + @property + def create_bucket( + self, + ) -> Callable[ + [storage.CreateBucketRequest], Union[storage.Bucket, Awaitable[storage.Bucket]] + ]: + raise NotImplementedError() + + @property + def list_buckets( + self, + ) -> Callable[ + [storage.ListBucketsRequest], + Union[storage.ListBucketsResponse, Awaitable[storage.ListBucketsResponse]], + ]: + raise NotImplementedError() + + @property + def lock_bucket_retention_policy( + self, + ) -> Callable[ + [storage.LockBucketRetentionPolicyRequest], + Union[storage.Bucket, Awaitable[storage.Bucket]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_bucket( + self, + ) -> Callable[ + [storage.UpdateBucketRequest], Union[storage.Bucket, Awaitable[storage.Bucket]] + ]: + raise NotImplementedError() + + @property + def compose_object( + self, + ) -> Callable[ + [storage.ComposeObjectRequest], Union[storage.Object, Awaitable[storage.Object]] + ]: + raise NotImplementedError() + + @property + def delete_object( + self, + ) -> Callable[ + [storage.DeleteObjectRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def restore_object( + self, + ) -> Callable[ + [storage.RestoreObjectRequest], Union[storage.Object, Awaitable[storage.Object]] + ]: + raise NotImplementedError() + + @property + def cancel_resumable_write( + self, + ) -> Callable[ + [storage.CancelResumableWriteRequest], + Union[ + storage.CancelResumableWriteResponse, + Awaitable[storage.CancelResumableWriteResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_object( + self, + ) -> Callable[ + [storage.GetObjectRequest], Union[storage.Object, Awaitable[storage.Object]] + ]: + raise NotImplementedError() + + @property + def read_object( + self, + ) -> Callable[ + [storage.ReadObjectRequest], + Union[storage.ReadObjectResponse, Awaitable[storage.ReadObjectResponse]], + ]: + raise NotImplementedError() + + @property + def bidi_read_object( + self, + ) -> Callable[ + [storage.BidiReadObjectRequest], + Union[ + storage.BidiReadObjectResponse, Awaitable[storage.BidiReadObjectResponse] + ], + ]: + raise NotImplementedError() + + @property + def update_object( + self, + ) -> Callable[ + [storage.UpdateObjectRequest], Union[storage.Object, Awaitable[storage.Object]] + ]: + raise NotImplementedError() + + @property + def write_object( + self, + ) -> Callable[ + [storage.WriteObjectRequest], + Union[storage.WriteObjectResponse, Awaitable[storage.WriteObjectResponse]], + ]: + raise NotImplementedError() + + @property + def bidi_write_object( + self, + ) -> Callable[ + [storage.BidiWriteObjectRequest], + Union[ + storage.BidiWriteObjectResponse, Awaitable[storage.BidiWriteObjectResponse] + ], + ]: + raise NotImplementedError() + + @property + def list_objects( + self, + ) -> Callable[ + [storage.ListObjectsRequest], + Union[storage.ListObjectsResponse, Awaitable[storage.ListObjectsResponse]], + ]: + raise NotImplementedError() + + @property + def rewrite_object( + self, + ) -> Callable[ + [storage.RewriteObjectRequest], + Union[storage.RewriteResponse, Awaitable[storage.RewriteResponse]], + ]: + raise NotImplementedError() + + @property + def start_resumable_write( + self, + ) -> Callable[ + [storage.StartResumableWriteRequest], + Union[ + storage.StartResumableWriteResponse, + Awaitable[storage.StartResumableWriteResponse], + ], + ]: + raise NotImplementedError() + + @property + def query_write_status( + self, + ) -> Callable[ + [storage.QueryWriteStatusRequest], + Union[ + storage.QueryWriteStatusResponse, + Awaitable[storage.QueryWriteStatusResponse], + ], + ]: + raise NotImplementedError() + + @property + def move_object( + self, + ) -> Callable[ + [storage.MoveObjectRequest], Union[storage.Object, Awaitable[storage.Object]] + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("StorageTransport",) diff --git a/google/cloud/storage_v2/services/storage/transports/grpc.py b/google/cloud/storage_v2/services/storage/transports/grpc.py new file mode 100644 index 000000000..6d5bbca85 --- /dev/null +++ b/google/cloud/storage_v2/services/storage/transports/grpc.py @@ -0,0 +1,1185 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json +import logging as std_logging +import pickle +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message + +import grpc # type: ignore +import proto # type: ignore + +from google.cloud.storage_v2.types import storage +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import StorageTransport, DEFAULT_CLIENT_INFO + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.storage.v2.Storage", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.storage.v2.Storage", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + + +class StorageGrpcTransport(StorageTransport): + """gRPC backend transport for Storage. + + API Overview and Naming Syntax + ------------------------------ + + The Cloud Storage gRPC API allows applications to read and write + data through the abstractions of buckets and objects. For a + description of these abstractions please see + https://cloud.google.com/storage/docs. + + Resources are named as follows: + + - Projects are referred to as they are defined by the Resource + Manager API, using strings like ``projects/123456`` or + ``projects/my-string-id``. + + - Buckets are named using string names of the form: + ``projects/{project}/buckets/{bucket}`` For globally unique + buckets, ``_`` may be substituted for the project. + + - Objects are uniquely identified by their name along with the name + of the bucket they belong to, as separate strings in this API. + For example: + + ReadObjectRequest { bucket: 'projects/_/buckets/my-bucket' + object: 'my-object' } Note that object names can contain ``/`` + characters, which are treated as any other character (no special + directory semantics). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "storage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'storage.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if a ``channel`` instance is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if a ``channel`` instance is provided. + channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if a ``channel`` instance is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if isinstance(channel, grpc.Channel): + # Ignore credentials if a channel was passed. + credentials = None + self._ignore_credentials = True + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "storage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def delete_bucket(self) -> Callable[[storage.DeleteBucketRequest], empty_pb2.Empty]: + r"""Return a callable for the delete bucket method over gRPC. + + Permanently deletes an empty bucket. + + Returns: + Callable[[~.DeleteBucketRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_bucket" not in self._stubs: + self._stubs["delete_bucket"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/DeleteBucket", + request_serializer=storage.DeleteBucketRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_bucket"] + + @property + def get_bucket(self) -> Callable[[storage.GetBucketRequest], storage.Bucket]: + r"""Return a callable for the get bucket method over gRPC. + + Returns metadata for the specified bucket. + + Returns: + Callable[[~.GetBucketRequest], + ~.Bucket]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_bucket" not in self._stubs: + self._stubs["get_bucket"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/GetBucket", + request_serializer=storage.GetBucketRequest.serialize, + response_deserializer=storage.Bucket.deserialize, + ) + return self._stubs["get_bucket"] + + @property + def create_bucket(self) -> Callable[[storage.CreateBucketRequest], storage.Bucket]: + r"""Return a callable for the create bucket method over gRPC. + + Creates a new bucket. + + Returns: + Callable[[~.CreateBucketRequest], + ~.Bucket]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_bucket" not in self._stubs: + self._stubs["create_bucket"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/CreateBucket", + request_serializer=storage.CreateBucketRequest.serialize, + response_deserializer=storage.Bucket.deserialize, + ) + return self._stubs["create_bucket"] + + @property + def list_buckets( + self, + ) -> Callable[[storage.ListBucketsRequest], storage.ListBucketsResponse]: + r"""Return a callable for the list buckets method over gRPC. + + Retrieves a list of buckets for a given project. + + Returns: + Callable[[~.ListBucketsRequest], + ~.ListBucketsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_buckets" not in self._stubs: + self._stubs["list_buckets"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/ListBuckets", + request_serializer=storage.ListBucketsRequest.serialize, + response_deserializer=storage.ListBucketsResponse.deserialize, + ) + return self._stubs["list_buckets"] + + @property + def lock_bucket_retention_policy( + self, + ) -> Callable[[storage.LockBucketRetentionPolicyRequest], storage.Bucket]: + r"""Return a callable for the lock bucket retention policy method over gRPC. + + Locks retention policy on a bucket. + + Returns: + Callable[[~.LockBucketRetentionPolicyRequest], + ~.Bucket]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "lock_bucket_retention_policy" not in self._stubs: + self._stubs[ + "lock_bucket_retention_policy" + ] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/LockBucketRetentionPolicy", + request_serializer=storage.LockBucketRetentionPolicyRequest.serialize, + response_deserializer=storage.Bucket.deserialize, + ) + return self._stubs["lock_bucket_retention_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the IAM policy for a specified bucket. The ``resource`` + field in the request should be ``projects/_/buckets/{bucket}`` + for a bucket, or + ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` + for a managed folder. + + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + + Updates an IAM policy for the specified bucket. The ``resource`` + field in the request should be ``projects/_/buckets/{bucket}`` + for a bucket, or + ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` + for a managed folder. + + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Tests a set of permissions on the given bucket, object, or + managed folder to see which, if any, are held by the caller. The + ``resource`` field in the request should be + ``projects/_/buckets/{bucket}`` for a bucket, + ``projects/_/buckets/{bucket}/objects/{object}`` for an object, + or + ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` + for a managed folder. + + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def update_bucket(self) -> Callable[[storage.UpdateBucketRequest], storage.Bucket]: + r"""Return a callable for the update bucket method over gRPC. + + Updates a bucket. Equivalent to JSON API's + storage.buckets.patch method. + + Returns: + Callable[[~.UpdateBucketRequest], + ~.Bucket]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_bucket" not in self._stubs: + self._stubs["update_bucket"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/UpdateBucket", + request_serializer=storage.UpdateBucketRequest.serialize, + response_deserializer=storage.Bucket.deserialize, + ) + return self._stubs["update_bucket"] + + @property + def compose_object( + self, + ) -> Callable[[storage.ComposeObjectRequest], storage.Object]: + r"""Return a callable for the compose object method over gRPC. + + Concatenates a list of existing objects into a new + object in the same bucket. + + Returns: + Callable[[~.ComposeObjectRequest], + ~.Object]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "compose_object" not in self._stubs: + self._stubs["compose_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/ComposeObject", + request_serializer=storage.ComposeObjectRequest.serialize, + response_deserializer=storage.Object.deserialize, + ) + return self._stubs["compose_object"] + + @property + def delete_object(self) -> Callable[[storage.DeleteObjectRequest], empty_pb2.Empty]: + r"""Return a callable for the delete object method over gRPC. + + Deletes an object and its metadata. Deletions are permanent if + versioning is not enabled for the bucket, or if the generation + parameter is used, or if `soft + delete `__ is + not enabled for the bucket. When this API is used to delete an + object from a bucket that has soft delete policy enabled, the + object becomes soft deleted, and the ``softDeleteTime`` and + ``hardDeleteTime`` properties are set on the object. This API + cannot be used to permanently delete soft-deleted objects. + Soft-deleted objects are permanently deleted according to their + ``hardDeleteTime``. + + You can use the + [``RestoreObject``][google.storage.v2.Storage.RestoreObject] API + to restore soft-deleted objects until the soft delete retention + period has passed. + + **IAM Permissions**: + + Requires ``storage.objects.delete`` `IAM + permission `__ + on the bucket. + + Returns: + Callable[[~.DeleteObjectRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_object" not in self._stubs: + self._stubs["delete_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/DeleteObject", + request_serializer=storage.DeleteObjectRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_object"] + + @property + def restore_object( + self, + ) -> Callable[[storage.RestoreObjectRequest], storage.Object]: + r"""Return a callable for the restore object method over gRPC. + + Restores a soft-deleted object. + + Returns: + Callable[[~.RestoreObjectRequest], + ~.Object]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "restore_object" not in self._stubs: + self._stubs["restore_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/RestoreObject", + request_serializer=storage.RestoreObjectRequest.serialize, + response_deserializer=storage.Object.deserialize, + ) + return self._stubs["restore_object"] + + @property + def cancel_resumable_write( + self, + ) -> Callable[ + [storage.CancelResumableWriteRequest], storage.CancelResumableWriteResponse + ]: + r"""Return a callable for the cancel resumable write method over gRPC. + + Cancels an in-progress resumable upload. + + Any attempts to write to the resumable upload after + cancelling the upload will fail. + + The behavior for currently in progress write operations + is not guaranteed - they could either complete before + the cancellation or fail if the cancellation completes + first. + + Returns: + Callable[[~.CancelResumableWriteRequest], + ~.CancelResumableWriteResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_resumable_write" not in self._stubs: + self._stubs["cancel_resumable_write"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/CancelResumableWrite", + request_serializer=storage.CancelResumableWriteRequest.serialize, + response_deserializer=storage.CancelResumableWriteResponse.deserialize, + ) + return self._stubs["cancel_resumable_write"] + + @property + def get_object(self) -> Callable[[storage.GetObjectRequest], storage.Object]: + r"""Return a callable for the get object method over gRPC. + + Retrieves object metadata. + + **IAM Permissions**: + + Requires ``storage.objects.get`` `IAM + permission `__ + on the bucket. To return object ACLs, the authenticated user + must also have the ``storage.objects.getIamPolicy`` permission. + + Returns: + Callable[[~.GetObjectRequest], + ~.Object]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_object" not in self._stubs: + self._stubs["get_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/GetObject", + request_serializer=storage.GetObjectRequest.serialize, + response_deserializer=storage.Object.deserialize, + ) + return self._stubs["get_object"] + + @property + def read_object( + self, + ) -> Callable[[storage.ReadObjectRequest], storage.ReadObjectResponse]: + r"""Return a callable for the read object method over gRPC. + + Retrieves object data. + + **IAM Permissions**: + + Requires ``storage.objects.get`` `IAM + permission `__ + on the bucket. + + Returns: + Callable[[~.ReadObjectRequest], + ~.ReadObjectResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_object" not in self._stubs: + self._stubs["read_object"] = self._logged_channel.unary_stream( + "/google.storage.v2.Storage/ReadObject", + request_serializer=storage.ReadObjectRequest.serialize, + response_deserializer=storage.ReadObjectResponse.deserialize, + ) + return self._stubs["read_object"] + + @property + def bidi_read_object( + self, + ) -> Callable[[storage.BidiReadObjectRequest], storage.BidiReadObjectResponse]: + r"""Return a callable for the bidi read object method over gRPC. + + Reads an object's data. + + This is a bi-directional API with the added support for reading + multiple ranges within one stream both within and across + multiple messages. If the server encountered an error for any of + the inputs, the stream will be closed with the relevant error + code. Because the API allows for multiple outstanding requests, + when the stream is closed the error response will contain a + BidiReadObjectRangesError proto in the error extension + describing the error for each outstanding read_id. + + **IAM Permissions**: + + Requires ``storage.objects.get`` + + `IAM + permission `__ + on the bucket. + + This API is currently in preview and is not yet available for + general use. + + Returns: + Callable[[~.BidiReadObjectRequest], + ~.BidiReadObjectResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "bidi_read_object" not in self._stubs: + self._stubs["bidi_read_object"] = self._logged_channel.stream_stream( + "/google.storage.v2.Storage/BidiReadObject", + request_serializer=storage.BidiReadObjectRequest.serialize, + response_deserializer=storage.BidiReadObjectResponse.deserialize, + ) + return self._stubs["bidi_read_object"] + + @property + def update_object(self) -> Callable[[storage.UpdateObjectRequest], storage.Object]: + r"""Return a callable for the update object method over gRPC. + + Updates an object's metadata. + Equivalent to JSON API's storage.objects.patch. + + Returns: + Callable[[~.UpdateObjectRequest], + ~.Object]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_object" not in self._stubs: + self._stubs["update_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/UpdateObject", + request_serializer=storage.UpdateObjectRequest.serialize, + response_deserializer=storage.Object.deserialize, + ) + return self._stubs["update_object"] + + @property + def write_object( + self, + ) -> Callable[[storage.WriteObjectRequest], storage.WriteObjectResponse]: + r"""Return a callable for the write object method over gRPC. + + Stores a new object and metadata. + + An object can be written either in a single message stream or in + a resumable sequence of message streams. To write using a single + stream, the client should include in the first message of the + stream an ``WriteObjectSpec`` describing the destination bucket, + object, and any preconditions. Additionally, the final message + must set 'finish_write' to true, or else it is an error. + + For a resumable write, the client should instead call + ``StartResumableWrite()``, populating a ``WriteObjectSpec`` into + that request. They should then attach the returned ``upload_id`` + to the first message of each following call to ``WriteObject``. + If the stream is closed before finishing the upload (either + explicitly by the client or due to a network error or an error + response from the server), the client should do as follows: + + - Check the result Status of the stream, to determine if + writing can be resumed on this stream or must be restarted + from scratch (by calling ``StartResumableWrite()``). The + resumable errors are DEADLINE_EXCEEDED, INTERNAL, and + UNAVAILABLE. For each case, the client should use binary + exponential backoff before retrying. Additionally, writes can + be resumed after RESOURCE_EXHAUSTED errors, but only after + taking appropriate measures, which may include reducing + aggregate send rate across clients and/or requesting a quota + increase for your project. + - If the call to ``WriteObject`` returns ``ABORTED``, that + indicates concurrent attempts to update the resumable write, + caused either by multiple racing clients or by a single + client where the previous request was timed out on the client + side but nonetheless reached the server. In this case the + client should take steps to prevent further concurrent writes + (e.g., increase the timeouts, stop using more than one + process to perform the upload, etc.), and then should follow + the steps below for resuming the upload. + - For resumable errors, the client should call + ``QueryWriteStatus()`` and then continue writing from the + returned ``persisted_size``. This may be less than the amount + of data the client previously sent. Note also that it is + acceptable to send data starting at an offset earlier than + the returned ``persisted_size``; in this case, the service + will skip data at offsets that were already persisted + (without checking that it matches the previously written + data), and write only the data starting from the persisted + offset. Even though the data isn't written, it may still + incur a performance cost over resuming at the correct write + offset. This behavior can make client-side handling simpler + in some cases. + - Clients must only send data that is a multiple of 256 KiB per + message, unless the object is being finished with + ``finish_write`` set to ``true``. + + The service will not view the object as complete until the + client has sent a ``WriteObjectRequest`` with ``finish_write`` + set to ``true``. Sending any requests on a stream after sending + a request with ``finish_write`` set to ``true`` will cause an + error. The client **should** check the response it receives to + determine how much data the service was able to commit and + whether the service views the object as complete. + + Attempting to resume an already finalized object will result in + an OK status, with a ``WriteObjectResponse`` containing the + finalized object's metadata. + + Alternatively, the BidiWriteObject operation may be used to + write an object with controls over flushing and the ability to + fetch the ability to determine the current persisted size. + + **IAM Permissions**: + + Requires ``storage.objects.create`` `IAM + permission `__ + on the bucket. + + Returns: + Callable[[~.WriteObjectRequest], + ~.WriteObjectResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "write_object" not in self._stubs: + self._stubs["write_object"] = self._logged_channel.stream_unary( + "/google.storage.v2.Storage/WriteObject", + request_serializer=storage.WriteObjectRequest.serialize, + response_deserializer=storage.WriteObjectResponse.deserialize, + ) + return self._stubs["write_object"] + + @property + def bidi_write_object( + self, + ) -> Callable[[storage.BidiWriteObjectRequest], storage.BidiWriteObjectResponse]: + r"""Return a callable for the bidi write object method over gRPC. + + Stores a new object and metadata. + + This is similar to the WriteObject call with the added support + for manual flushing of persisted state, and the ability to + determine current persisted size without closing the stream. + + The client may specify one or both of the ``state_lookup`` and + ``flush`` fields in each BidiWriteObjectRequest. If ``flush`` is + specified, the data written so far will be persisted to storage. + If ``state_lookup`` is specified, the service will respond with + a BidiWriteObjectResponse that contains the persisted size. If + both ``flush`` and ``state_lookup`` are specified, the flush + will always occur before a ``state_lookup``, so that both may be + set in the same request and the returned state will be the state + of the object post-flush. When the stream is closed, a + BidiWriteObjectResponse will always be sent to the client, + regardless of the value of ``state_lookup``. + + Returns: + Callable[[~.BidiWriteObjectRequest], + ~.BidiWriteObjectResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "bidi_write_object" not in self._stubs: + self._stubs["bidi_write_object"] = self._logged_channel.stream_stream( + "/google.storage.v2.Storage/BidiWriteObject", + request_serializer=storage.BidiWriteObjectRequest.serialize, + response_deserializer=storage.BidiWriteObjectResponse.deserialize, + ) + return self._stubs["bidi_write_object"] + + @property + def list_objects( + self, + ) -> Callable[[storage.ListObjectsRequest], storage.ListObjectsResponse]: + r"""Return a callable for the list objects method over gRPC. + + Retrieves a list of objects matching the criteria. + + **IAM Permissions**: + + The authenticated user requires ``storage.objects.list`` `IAM + permission `__ + to use this method. To return object ACLs, the authenticated + user must also have the ``storage.objects.getIamPolicy`` + permission. + + Returns: + Callable[[~.ListObjectsRequest], + ~.ListObjectsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_objects" not in self._stubs: + self._stubs["list_objects"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/ListObjects", + request_serializer=storage.ListObjectsRequest.serialize, + response_deserializer=storage.ListObjectsResponse.deserialize, + ) + return self._stubs["list_objects"] + + @property + def rewrite_object( + self, + ) -> Callable[[storage.RewriteObjectRequest], storage.RewriteResponse]: + r"""Return a callable for the rewrite object method over gRPC. + + Rewrites a source object to a destination object. + Optionally overrides metadata. + + Returns: + Callable[[~.RewriteObjectRequest], + ~.RewriteResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "rewrite_object" not in self._stubs: + self._stubs["rewrite_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/RewriteObject", + request_serializer=storage.RewriteObjectRequest.serialize, + response_deserializer=storage.RewriteResponse.deserialize, + ) + return self._stubs["rewrite_object"] + + @property + def start_resumable_write( + self, + ) -> Callable[ + [storage.StartResumableWriteRequest], storage.StartResumableWriteResponse + ]: + r"""Return a callable for the start resumable write method over gRPC. + + Starts a resumable write operation. This method is part of the + `Resumable + upload `__ + feature. This allows you to upload large objects in multiple + chunks, which is more resilient to network interruptions than a + single upload. The validity duration of the write operation, and + the consequences of it becoming invalid, are service-dependent. + + **IAM Permissions**: + + Requires ``storage.objects.create`` `IAM + permission `__ + on the bucket. + + Returns: + Callable[[~.StartResumableWriteRequest], + ~.StartResumableWriteResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "start_resumable_write" not in self._stubs: + self._stubs["start_resumable_write"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/StartResumableWrite", + request_serializer=storage.StartResumableWriteRequest.serialize, + response_deserializer=storage.StartResumableWriteResponse.deserialize, + ) + return self._stubs["start_resumable_write"] + + @property + def query_write_status( + self, + ) -> Callable[[storage.QueryWriteStatusRequest], storage.QueryWriteStatusResponse]: + r"""Return a callable for the query write status method over gRPC. + + Determines the ``persisted_size`` of an object that is being + written. This method is part of the `resumable + upload `__ + feature. The returned value is the size of the object that has + been persisted so far. The value can be used as the + ``write_offset`` for the next ``Write()`` call. + + If the object does not exist, meaning if it was deleted, or the + first ``Write()`` has not yet reached the service, this method + returns the error ``NOT_FOUND``. + + This method is useful for clients that buffer data and need to + know which data can be safely evicted. The client can call + ``QueryWriteStatus()`` at any time to determine how much data + has been logged for this object. For any sequence of + ``QueryWriteStatus()`` calls for a given object name, the + sequence of returned ``persisted_size`` values are + non-decreasing. + + Returns: + Callable[[~.QueryWriteStatusRequest], + ~.QueryWriteStatusResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_write_status" not in self._stubs: + self._stubs["query_write_status"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/QueryWriteStatus", + request_serializer=storage.QueryWriteStatusRequest.serialize, + response_deserializer=storage.QueryWriteStatusResponse.deserialize, + ) + return self._stubs["query_write_status"] + + @property + def move_object(self) -> Callable[[storage.MoveObjectRequest], storage.Object]: + r"""Return a callable for the move object method over gRPC. + + Moves the source object to the destination object in + the same bucket. + + Returns: + Callable[[~.MoveObjectRequest], + ~.Object]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "move_object" not in self._stubs: + self._stubs["move_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/MoveObject", + request_serializer=storage.MoveObjectRequest.serialize, + response_deserializer=storage.Object.deserialize, + ) + return self._stubs["move_object"] + + def close(self): + self._logged_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("StorageGrpcTransport",) diff --git a/google/cloud/storage_v2/services/storage/transports/grpc_asyncio.py b/google/cloud/storage_v2/services/storage/transports/grpc_asyncio.py new file mode 100644 index 000000000..d2ff8dc05 --- /dev/null +++ b/google/cloud/storage_v2/services/storage/transports/grpc_asyncio.py @@ -0,0 +1,1349 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import inspect +import json +import pickle +import logging as std_logging +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import exceptions as core_exceptions +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message + +import grpc # type: ignore +import proto # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.storage_v2.types import storage +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import StorageTransport, DEFAULT_CLIENT_INFO +from .grpc import StorageGrpcTransport + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.storage.v2.Storage", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.storage.v2.Storage", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + + +class StorageGrpcAsyncIOTransport(StorageTransport): + """gRPC AsyncIO backend transport for Storage. + + API Overview and Naming Syntax + ------------------------------ + + The Cloud Storage gRPC API allows applications to read and write + data through the abstractions of buckets and objects. For a + description of these abstractions please see + https://cloud.google.com/storage/docs. + + Resources are named as follows: + + - Projects are referred to as they are defined by the Resource + Manager API, using strings like ``projects/123456`` or + ``projects/my-string-id``. + + - Buckets are named using string names of the form: + ``projects/{project}/buckets/{bucket}`` For globally unique + buckets, ``_`` may be substituted for the project. + + - Objects are uniquely identified by their name along with the name + of the bucket they belong to, as separate strings in this API. + For example: + + ReadObjectRequest { bucket: 'projects/_/buckets/my-bucket' + object: 'my-object' } Note that object names can contain ``/`` + characters, which are treated as any other character (no special + directory semantics). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "storage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "storage.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'storage.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if a ``channel`` instance is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if a ``channel`` instance is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if isinstance(channel, aio.Channel): + # Ignore credentials if a channel was passed. + credentials = None + self._ignore_credentials = True + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel + self._wrap_with_kind = ( + "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters + ) + # Wrap messages. This must be done after self._logged_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def delete_bucket( + self, + ) -> Callable[[storage.DeleteBucketRequest], Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete bucket method over gRPC. + + Permanently deletes an empty bucket. + + Returns: + Callable[[~.DeleteBucketRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_bucket" not in self._stubs: + self._stubs["delete_bucket"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/DeleteBucket", + request_serializer=storage.DeleteBucketRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_bucket"] + + @property + def get_bucket( + self, + ) -> Callable[[storage.GetBucketRequest], Awaitable[storage.Bucket]]: + r"""Return a callable for the get bucket method over gRPC. + + Returns metadata for the specified bucket. + + Returns: + Callable[[~.GetBucketRequest], + Awaitable[~.Bucket]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_bucket" not in self._stubs: + self._stubs["get_bucket"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/GetBucket", + request_serializer=storage.GetBucketRequest.serialize, + response_deserializer=storage.Bucket.deserialize, + ) + return self._stubs["get_bucket"] + + @property + def create_bucket( + self, + ) -> Callable[[storage.CreateBucketRequest], Awaitable[storage.Bucket]]: + r"""Return a callable for the create bucket method over gRPC. + + Creates a new bucket. + + Returns: + Callable[[~.CreateBucketRequest], + Awaitable[~.Bucket]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_bucket" not in self._stubs: + self._stubs["create_bucket"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/CreateBucket", + request_serializer=storage.CreateBucketRequest.serialize, + response_deserializer=storage.Bucket.deserialize, + ) + return self._stubs["create_bucket"] + + @property + def list_buckets( + self, + ) -> Callable[[storage.ListBucketsRequest], Awaitable[storage.ListBucketsResponse]]: + r"""Return a callable for the list buckets method over gRPC. + + Retrieves a list of buckets for a given project. + + Returns: + Callable[[~.ListBucketsRequest], + Awaitable[~.ListBucketsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_buckets" not in self._stubs: + self._stubs["list_buckets"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/ListBuckets", + request_serializer=storage.ListBucketsRequest.serialize, + response_deserializer=storage.ListBucketsResponse.deserialize, + ) + return self._stubs["list_buckets"] + + @property + def lock_bucket_retention_policy( + self, + ) -> Callable[ + [storage.LockBucketRetentionPolicyRequest], Awaitable[storage.Bucket] + ]: + r"""Return a callable for the lock bucket retention policy method over gRPC. + + Locks retention policy on a bucket. + + Returns: + Callable[[~.LockBucketRetentionPolicyRequest], + Awaitable[~.Bucket]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "lock_bucket_retention_policy" not in self._stubs: + self._stubs[ + "lock_bucket_retention_policy" + ] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/LockBucketRetentionPolicy", + request_serializer=storage.LockBucketRetentionPolicyRequest.serialize, + response_deserializer=storage.Bucket.deserialize, + ) + return self._stubs["lock_bucket_retention_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the IAM policy for a specified bucket. The ``resource`` + field in the request should be ``projects/_/buckets/{bucket}`` + for a bucket, or + ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` + for a managed folder. + + Returns: + Callable[[~.GetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: + r"""Return a callable for the set iam policy method over gRPC. + + Updates an IAM policy for the specified bucket. The ``resource`` + field in the request should be ``projects/_/buckets/{bucket}`` + for a bucket, or + ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` + for a managed folder. + + Returns: + Callable[[~.SetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Tests a set of permissions on the given bucket, object, or + managed folder to see which, if any, are held by the caller. The + ``resource`` field in the request should be + ``projects/_/buckets/{bucket}`` for a bucket, + ``projects/_/buckets/{bucket}/objects/{object}`` for an object, + or + ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` + for a managed folder. + + Returns: + Callable[[~.TestIamPermissionsRequest], + Awaitable[~.TestIamPermissionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def update_bucket( + self, + ) -> Callable[[storage.UpdateBucketRequest], Awaitable[storage.Bucket]]: + r"""Return a callable for the update bucket method over gRPC. + + Updates a bucket. Equivalent to JSON API's + storage.buckets.patch method. + + Returns: + Callable[[~.UpdateBucketRequest], + Awaitable[~.Bucket]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_bucket" not in self._stubs: + self._stubs["update_bucket"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/UpdateBucket", + request_serializer=storage.UpdateBucketRequest.serialize, + response_deserializer=storage.Bucket.deserialize, + ) + return self._stubs["update_bucket"] + + @property + def compose_object( + self, + ) -> Callable[[storage.ComposeObjectRequest], Awaitable[storage.Object]]: + r"""Return a callable for the compose object method over gRPC. + + Concatenates a list of existing objects into a new + object in the same bucket. + + Returns: + Callable[[~.ComposeObjectRequest], + Awaitable[~.Object]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "compose_object" not in self._stubs: + self._stubs["compose_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/ComposeObject", + request_serializer=storage.ComposeObjectRequest.serialize, + response_deserializer=storage.Object.deserialize, + ) + return self._stubs["compose_object"] + + @property + def delete_object( + self, + ) -> Callable[[storage.DeleteObjectRequest], Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete object method over gRPC. + + Deletes an object and its metadata. Deletions are permanent if + versioning is not enabled for the bucket, or if the generation + parameter is used, or if `soft + delete `__ is + not enabled for the bucket. When this API is used to delete an + object from a bucket that has soft delete policy enabled, the + object becomes soft deleted, and the ``softDeleteTime`` and + ``hardDeleteTime`` properties are set on the object. This API + cannot be used to permanently delete soft-deleted objects. + Soft-deleted objects are permanently deleted according to their + ``hardDeleteTime``. + + You can use the + [``RestoreObject``][google.storage.v2.Storage.RestoreObject] API + to restore soft-deleted objects until the soft delete retention + period has passed. + + **IAM Permissions**: + + Requires ``storage.objects.delete`` `IAM + permission `__ + on the bucket. + + Returns: + Callable[[~.DeleteObjectRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_object" not in self._stubs: + self._stubs["delete_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/DeleteObject", + request_serializer=storage.DeleteObjectRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_object"] + + @property + def restore_object( + self, + ) -> Callable[[storage.RestoreObjectRequest], Awaitable[storage.Object]]: + r"""Return a callable for the restore object method over gRPC. + + Restores a soft-deleted object. + + Returns: + Callable[[~.RestoreObjectRequest], + Awaitable[~.Object]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "restore_object" not in self._stubs: + self._stubs["restore_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/RestoreObject", + request_serializer=storage.RestoreObjectRequest.serialize, + response_deserializer=storage.Object.deserialize, + ) + return self._stubs["restore_object"] + + @property + def cancel_resumable_write( + self, + ) -> Callable[ + [storage.CancelResumableWriteRequest], + Awaitable[storage.CancelResumableWriteResponse], + ]: + r"""Return a callable for the cancel resumable write method over gRPC. + + Cancels an in-progress resumable upload. + + Any attempts to write to the resumable upload after + cancelling the upload will fail. + + The behavior for currently in progress write operations + is not guaranteed - they could either complete before + the cancellation or fail if the cancellation completes + first. + + Returns: + Callable[[~.CancelResumableWriteRequest], + Awaitable[~.CancelResumableWriteResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_resumable_write" not in self._stubs: + self._stubs["cancel_resumable_write"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/CancelResumableWrite", + request_serializer=storage.CancelResumableWriteRequest.serialize, + response_deserializer=storage.CancelResumableWriteResponse.deserialize, + ) + return self._stubs["cancel_resumable_write"] + + @property + def get_object( + self, + ) -> Callable[[storage.GetObjectRequest], Awaitable[storage.Object]]: + r"""Return a callable for the get object method over gRPC. + + Retrieves object metadata. + + **IAM Permissions**: + + Requires ``storage.objects.get`` `IAM + permission `__ + on the bucket. To return object ACLs, the authenticated user + must also have the ``storage.objects.getIamPolicy`` permission. + + Returns: + Callable[[~.GetObjectRequest], + Awaitable[~.Object]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_object" not in self._stubs: + self._stubs["get_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/GetObject", + request_serializer=storage.GetObjectRequest.serialize, + response_deserializer=storage.Object.deserialize, + ) + return self._stubs["get_object"] + + @property + def read_object( + self, + ) -> Callable[[storage.ReadObjectRequest], Awaitable[storage.ReadObjectResponse]]: + r"""Return a callable for the read object method over gRPC. + + Retrieves object data. + + **IAM Permissions**: + + Requires ``storage.objects.get`` `IAM + permission `__ + on the bucket. + + Returns: + Callable[[~.ReadObjectRequest], + Awaitable[~.ReadObjectResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_object" not in self._stubs: + self._stubs["read_object"] = self._logged_channel.unary_stream( + "/google.storage.v2.Storage/ReadObject", + request_serializer=storage.ReadObjectRequest.serialize, + response_deserializer=storage.ReadObjectResponse.deserialize, + ) + return self._stubs["read_object"] + + @property + def bidi_read_object( + self, + ) -> Callable[ + [storage.BidiReadObjectRequest], Awaitable[storage.BidiReadObjectResponse] + ]: + r"""Return a callable for the bidi read object method over gRPC. + + Reads an object's data. + + This is a bi-directional API with the added support for reading + multiple ranges within one stream both within and across + multiple messages. If the server encountered an error for any of + the inputs, the stream will be closed with the relevant error + code. Because the API allows for multiple outstanding requests, + when the stream is closed the error response will contain a + BidiReadObjectRangesError proto in the error extension + describing the error for each outstanding read_id. + + **IAM Permissions**: + + Requires ``storage.objects.get`` + + `IAM + permission `__ + on the bucket. + + This API is currently in preview and is not yet available for + general use. + + Returns: + Callable[[~.BidiReadObjectRequest], + Awaitable[~.BidiReadObjectResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "bidi_read_object" not in self._stubs: + self._stubs["bidi_read_object"] = self._logged_channel.stream_stream( + "/google.storage.v2.Storage/BidiReadObject", + request_serializer=storage.BidiReadObjectRequest.serialize, + response_deserializer=storage.BidiReadObjectResponse.deserialize, + ) + return self._stubs["bidi_read_object"] + + @property + def update_object( + self, + ) -> Callable[[storage.UpdateObjectRequest], Awaitable[storage.Object]]: + r"""Return a callable for the update object method over gRPC. + + Updates an object's metadata. + Equivalent to JSON API's storage.objects.patch. + + Returns: + Callable[[~.UpdateObjectRequest], + Awaitable[~.Object]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_object" not in self._stubs: + self._stubs["update_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/UpdateObject", + request_serializer=storage.UpdateObjectRequest.serialize, + response_deserializer=storage.Object.deserialize, + ) + return self._stubs["update_object"] + + @property + def write_object( + self, + ) -> Callable[[storage.WriteObjectRequest], Awaitable[storage.WriteObjectResponse]]: + r"""Return a callable for the write object method over gRPC. + + Stores a new object and metadata. + + An object can be written either in a single message stream or in + a resumable sequence of message streams. To write using a single + stream, the client should include in the first message of the + stream an ``WriteObjectSpec`` describing the destination bucket, + object, and any preconditions. Additionally, the final message + must set 'finish_write' to true, or else it is an error. + + For a resumable write, the client should instead call + ``StartResumableWrite()``, populating a ``WriteObjectSpec`` into + that request. They should then attach the returned ``upload_id`` + to the first message of each following call to ``WriteObject``. + If the stream is closed before finishing the upload (either + explicitly by the client or due to a network error or an error + response from the server), the client should do as follows: + + - Check the result Status of the stream, to determine if + writing can be resumed on this stream or must be restarted + from scratch (by calling ``StartResumableWrite()``). The + resumable errors are DEADLINE_EXCEEDED, INTERNAL, and + UNAVAILABLE. For each case, the client should use binary + exponential backoff before retrying. Additionally, writes can + be resumed after RESOURCE_EXHAUSTED errors, but only after + taking appropriate measures, which may include reducing + aggregate send rate across clients and/or requesting a quota + increase for your project. + - If the call to ``WriteObject`` returns ``ABORTED``, that + indicates concurrent attempts to update the resumable write, + caused either by multiple racing clients or by a single + client where the previous request was timed out on the client + side but nonetheless reached the server. In this case the + client should take steps to prevent further concurrent writes + (e.g., increase the timeouts, stop using more than one + process to perform the upload, etc.), and then should follow + the steps below for resuming the upload. + - For resumable errors, the client should call + ``QueryWriteStatus()`` and then continue writing from the + returned ``persisted_size``. This may be less than the amount + of data the client previously sent. Note also that it is + acceptable to send data starting at an offset earlier than + the returned ``persisted_size``; in this case, the service + will skip data at offsets that were already persisted + (without checking that it matches the previously written + data), and write only the data starting from the persisted + offset. Even though the data isn't written, it may still + incur a performance cost over resuming at the correct write + offset. This behavior can make client-side handling simpler + in some cases. + - Clients must only send data that is a multiple of 256 KiB per + message, unless the object is being finished with + ``finish_write`` set to ``true``. + + The service will not view the object as complete until the + client has sent a ``WriteObjectRequest`` with ``finish_write`` + set to ``true``. Sending any requests on a stream after sending + a request with ``finish_write`` set to ``true`` will cause an + error. The client **should** check the response it receives to + determine how much data the service was able to commit and + whether the service views the object as complete. + + Attempting to resume an already finalized object will result in + an OK status, with a ``WriteObjectResponse`` containing the + finalized object's metadata. + + Alternatively, the BidiWriteObject operation may be used to + write an object with controls over flushing and the ability to + fetch the ability to determine the current persisted size. + + **IAM Permissions**: + + Requires ``storage.objects.create`` `IAM + permission `__ + on the bucket. + + Returns: + Callable[[~.WriteObjectRequest], + Awaitable[~.WriteObjectResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "write_object" not in self._stubs: + self._stubs["write_object"] = self._logged_channel.stream_unary( + "/google.storage.v2.Storage/WriteObject", + request_serializer=storage.WriteObjectRequest.serialize, + response_deserializer=storage.WriteObjectResponse.deserialize, + ) + return self._stubs["write_object"] + + @property + def bidi_write_object( + self, + ) -> Callable[ + [storage.BidiWriteObjectRequest], Awaitable[storage.BidiWriteObjectResponse] + ]: + r"""Return a callable for the bidi write object method over gRPC. + + Stores a new object and metadata. + + This is similar to the WriteObject call with the added support + for manual flushing of persisted state, and the ability to + determine current persisted size without closing the stream. + + The client may specify one or both of the ``state_lookup`` and + ``flush`` fields in each BidiWriteObjectRequest. If ``flush`` is + specified, the data written so far will be persisted to storage. + If ``state_lookup`` is specified, the service will respond with + a BidiWriteObjectResponse that contains the persisted size. If + both ``flush`` and ``state_lookup`` are specified, the flush + will always occur before a ``state_lookup``, so that both may be + set in the same request and the returned state will be the state + of the object post-flush. When the stream is closed, a + BidiWriteObjectResponse will always be sent to the client, + regardless of the value of ``state_lookup``. + + Returns: + Callable[[~.BidiWriteObjectRequest], + Awaitable[~.BidiWriteObjectResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "bidi_write_object" not in self._stubs: + self._stubs["bidi_write_object"] = self._logged_channel.stream_stream( + "/google.storage.v2.Storage/BidiWriteObject", + request_serializer=storage.BidiWriteObjectRequest.serialize, + response_deserializer=storage.BidiWriteObjectResponse.deserialize, + ) + return self._stubs["bidi_write_object"] + + @property + def list_objects( + self, + ) -> Callable[[storage.ListObjectsRequest], Awaitable[storage.ListObjectsResponse]]: + r"""Return a callable for the list objects method over gRPC. + + Retrieves a list of objects matching the criteria. + + **IAM Permissions**: + + The authenticated user requires ``storage.objects.list`` `IAM + permission `__ + to use this method. To return object ACLs, the authenticated + user must also have the ``storage.objects.getIamPolicy`` + permission. + + Returns: + Callable[[~.ListObjectsRequest], + Awaitable[~.ListObjectsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_objects" not in self._stubs: + self._stubs["list_objects"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/ListObjects", + request_serializer=storage.ListObjectsRequest.serialize, + response_deserializer=storage.ListObjectsResponse.deserialize, + ) + return self._stubs["list_objects"] + + @property + def rewrite_object( + self, + ) -> Callable[[storage.RewriteObjectRequest], Awaitable[storage.RewriteResponse]]: + r"""Return a callable for the rewrite object method over gRPC. + + Rewrites a source object to a destination object. + Optionally overrides metadata. + + Returns: + Callable[[~.RewriteObjectRequest], + Awaitable[~.RewriteResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "rewrite_object" not in self._stubs: + self._stubs["rewrite_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/RewriteObject", + request_serializer=storage.RewriteObjectRequest.serialize, + response_deserializer=storage.RewriteResponse.deserialize, + ) + return self._stubs["rewrite_object"] + + @property + def start_resumable_write( + self, + ) -> Callable[ + [storage.StartResumableWriteRequest], + Awaitable[storage.StartResumableWriteResponse], + ]: + r"""Return a callable for the start resumable write method over gRPC. + + Starts a resumable write operation. This method is part of the + `Resumable + upload `__ + feature. This allows you to upload large objects in multiple + chunks, which is more resilient to network interruptions than a + single upload. The validity duration of the write operation, and + the consequences of it becoming invalid, are service-dependent. + + **IAM Permissions**: + + Requires ``storage.objects.create`` `IAM + permission `__ + on the bucket. + + Returns: + Callable[[~.StartResumableWriteRequest], + Awaitable[~.StartResumableWriteResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "start_resumable_write" not in self._stubs: + self._stubs["start_resumable_write"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/StartResumableWrite", + request_serializer=storage.StartResumableWriteRequest.serialize, + response_deserializer=storage.StartResumableWriteResponse.deserialize, + ) + return self._stubs["start_resumable_write"] + + @property + def query_write_status( + self, + ) -> Callable[ + [storage.QueryWriteStatusRequest], Awaitable[storage.QueryWriteStatusResponse] + ]: + r"""Return a callable for the query write status method over gRPC. + + Determines the ``persisted_size`` of an object that is being + written. This method is part of the `resumable + upload `__ + feature. The returned value is the size of the object that has + been persisted so far. The value can be used as the + ``write_offset`` for the next ``Write()`` call. + + If the object does not exist, meaning if it was deleted, or the + first ``Write()`` has not yet reached the service, this method + returns the error ``NOT_FOUND``. + + This method is useful for clients that buffer data and need to + know which data can be safely evicted. The client can call + ``QueryWriteStatus()`` at any time to determine how much data + has been logged for this object. For any sequence of + ``QueryWriteStatus()`` calls for a given object name, the + sequence of returned ``persisted_size`` values are + non-decreasing. + + Returns: + Callable[[~.QueryWriteStatusRequest], + Awaitable[~.QueryWriteStatusResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_write_status" not in self._stubs: + self._stubs["query_write_status"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/QueryWriteStatus", + request_serializer=storage.QueryWriteStatusRequest.serialize, + response_deserializer=storage.QueryWriteStatusResponse.deserialize, + ) + return self._stubs["query_write_status"] + + @property + def move_object( + self, + ) -> Callable[[storage.MoveObjectRequest], Awaitable[storage.Object]]: + r"""Return a callable for the move object method over gRPC. + + Moves the source object to the destination object in + the same bucket. + + Returns: + Callable[[~.MoveObjectRequest], + Awaitable[~.Object]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "move_object" not in self._stubs: + self._stubs["move_object"] = self._logged_channel.unary_unary( + "/google.storage.v2.Storage/MoveObject", + request_serializer=storage.MoveObjectRequest.serialize, + response_deserializer=storage.Object.deserialize, + ) + return self._stubs["move_object"] + + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.delete_bucket: self._wrap_method( + self.delete_bucket, + default_timeout=None, + client_info=client_info, + ), + self.get_bucket: self._wrap_method( + self.get_bucket, + default_timeout=None, + client_info=client_info, + ), + self.create_bucket: self._wrap_method( + self.create_bucket, + default_timeout=None, + client_info=client_info, + ), + self.list_buckets: self._wrap_method( + self.list_buckets, + default_timeout=None, + client_info=client_info, + ), + self.lock_bucket_retention_policy: self._wrap_method( + self.lock_bucket_retention_policy, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: self._wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: self._wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: self._wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + self.update_bucket: self._wrap_method( + self.update_bucket, + default_timeout=None, + client_info=client_info, + ), + self.compose_object: self._wrap_method( + self.compose_object, + default_timeout=None, + client_info=client_info, + ), + self.delete_object: self._wrap_method( + self.delete_object, + default_timeout=None, + client_info=client_info, + ), + self.restore_object: self._wrap_method( + self.restore_object, + default_timeout=None, + client_info=client_info, + ), + self.cancel_resumable_write: self._wrap_method( + self.cancel_resumable_write, + default_timeout=None, + client_info=client_info, + ), + self.get_object: self._wrap_method( + self.get_object, + default_timeout=None, + client_info=client_info, + ), + self.read_object: self._wrap_method( + self.read_object, + default_timeout=None, + client_info=client_info, + ), + self.bidi_read_object: self._wrap_method( + self.bidi_read_object, + default_timeout=None, + client_info=client_info, + ), + self.update_object: self._wrap_method( + self.update_object, + default_timeout=None, + client_info=client_info, + ), + self.write_object: self._wrap_method( + self.write_object, + default_timeout=None, + client_info=client_info, + ), + self.bidi_write_object: self._wrap_method( + self.bidi_write_object, + default_timeout=None, + client_info=client_info, + ), + self.list_objects: self._wrap_method( + self.list_objects, + default_timeout=None, + client_info=client_info, + ), + self.rewrite_object: self._wrap_method( + self.rewrite_object, + default_timeout=None, + client_info=client_info, + ), + self.start_resumable_write: self._wrap_method( + self.start_resumable_write, + default_timeout=None, + client_info=client_info, + ), + self.query_write_status: self._wrap_method( + self.query_write_status, + default_timeout=None, + client_info=client_info, + ), + self.move_object: self._wrap_method( + self.move_object, + default_timeout=None, + client_info=client_info, + ), + } + + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + + def close(self): + return self._logged_channel.close() + + @property + def kind(self) -> str: + return "grpc_asyncio" + + +__all__ = ("StorageGrpcAsyncIOTransport",) diff --git a/google/cloud/storage_v2/types/__init__.py b/google/cloud/storage_v2/types/__init__.py new file mode 100644 index 000000000..8ef177836 --- /dev/null +++ b/google/cloud/storage_v2/types/__init__.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .storage import ( + AppendObjectSpec, + BidiReadHandle, + BidiReadObjectError, + BidiReadObjectRedirectedError, + BidiReadObjectRequest, + BidiReadObjectResponse, + BidiReadObjectSpec, + BidiWriteHandle, + BidiWriteObjectRedirectedError, + BidiWriteObjectRequest, + BidiWriteObjectResponse, + Bucket, + BucketAccessControl, + CancelResumableWriteRequest, + CancelResumableWriteResponse, + ChecksummedData, + CommonObjectRequestParams, + ComposeObjectRequest, + ContentRange, + CreateBucketRequest, + CustomerEncryption, + DeleteBucketRequest, + DeleteObjectRequest, + GetBucketRequest, + GetObjectRequest, + ListBucketsRequest, + ListBucketsResponse, + ListObjectsRequest, + ListObjectsResponse, + LockBucketRetentionPolicyRequest, + MoveObjectRequest, + Object, + ObjectAccessControl, + ObjectChecksums, + ObjectContexts, + ObjectCustomContextPayload, + ObjectRangeData, + Owner, + ProjectTeam, + QueryWriteStatusRequest, + QueryWriteStatusResponse, + ReadObjectRequest, + ReadObjectResponse, + ReadRange, + ReadRangeError, + RestoreObjectRequest, + RewriteObjectRequest, + RewriteResponse, + ServiceConstants, + StartResumableWriteRequest, + StartResumableWriteResponse, + UpdateBucketRequest, + UpdateObjectRequest, + WriteObjectRequest, + WriteObjectResponse, + WriteObjectSpec, +) + +__all__ = ( + "AppendObjectSpec", + "BidiReadHandle", + "BidiReadObjectError", + "BidiReadObjectRedirectedError", + "BidiReadObjectRequest", + "BidiReadObjectResponse", + "BidiReadObjectSpec", + "BidiWriteHandle", + "BidiWriteObjectRedirectedError", + "BidiWriteObjectRequest", + "BidiWriteObjectResponse", + "Bucket", + "BucketAccessControl", + "CancelResumableWriteRequest", + "CancelResumableWriteResponse", + "ChecksummedData", + "CommonObjectRequestParams", + "ComposeObjectRequest", + "ContentRange", + "CreateBucketRequest", + "CustomerEncryption", + "DeleteBucketRequest", + "DeleteObjectRequest", + "GetBucketRequest", + "GetObjectRequest", + "ListBucketsRequest", + "ListBucketsResponse", + "ListObjectsRequest", + "ListObjectsResponse", + "LockBucketRetentionPolicyRequest", + "MoveObjectRequest", + "Object", + "ObjectAccessControl", + "ObjectChecksums", + "ObjectContexts", + "ObjectCustomContextPayload", + "ObjectRangeData", + "Owner", + "ProjectTeam", + "QueryWriteStatusRequest", + "QueryWriteStatusResponse", + "ReadObjectRequest", + "ReadObjectResponse", + "ReadRange", + "ReadRangeError", + "RestoreObjectRequest", + "RewriteObjectRequest", + "RewriteResponse", + "ServiceConstants", + "StartResumableWriteRequest", + "StartResumableWriteResponse", + "UpdateBucketRequest", + "UpdateObjectRequest", + "WriteObjectRequest", + "WriteObjectResponse", + "WriteObjectSpec", +) diff --git a/google/cloud/storage_v2/types/storage.py b/google/cloud/storage_v2/types/storage.py new file mode 100644 index 000000000..9dc7d6262 --- /dev/null +++ b/google/cloud/storage_v2/types/storage.py @@ -0,0 +1,4964 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import date_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.storage.v2", + manifest={ + "DeleteBucketRequest", + "GetBucketRequest", + "CreateBucketRequest", + "ListBucketsRequest", + "ListBucketsResponse", + "LockBucketRetentionPolicyRequest", + "UpdateBucketRequest", + "ComposeObjectRequest", + "DeleteObjectRequest", + "RestoreObjectRequest", + "CancelResumableWriteRequest", + "CancelResumableWriteResponse", + "ReadObjectRequest", + "GetObjectRequest", + "ReadObjectResponse", + "BidiReadObjectSpec", + "BidiReadObjectRequest", + "BidiReadObjectResponse", + "BidiReadObjectRedirectedError", + "BidiWriteObjectRedirectedError", + "BidiReadObjectError", + "ReadRangeError", + "ReadRange", + "ObjectRangeData", + "BidiReadHandle", + "BidiWriteHandle", + "WriteObjectSpec", + "WriteObjectRequest", + "WriteObjectResponse", + "AppendObjectSpec", + "BidiWriteObjectRequest", + "BidiWriteObjectResponse", + "ListObjectsRequest", + "QueryWriteStatusRequest", + "QueryWriteStatusResponse", + "RewriteObjectRequest", + "RewriteResponse", + "MoveObjectRequest", + "StartResumableWriteRequest", + "StartResumableWriteResponse", + "UpdateObjectRequest", + "CommonObjectRequestParams", + "ServiceConstants", + "Bucket", + "BucketAccessControl", + "ChecksummedData", + "ObjectChecksums", + "ObjectCustomContextPayload", + "ObjectContexts", + "CustomerEncryption", + "Object", + "ObjectAccessControl", + "ListObjectsResponse", + "ProjectTeam", + "Owner", + "ContentRange", + }, +) + + +class DeleteBucketRequest(proto.Message): + r"""Request message for DeleteBucket. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Required. Name of a bucket to delete. + if_metageneration_match (int): + If set, only deletes the bucket if its + metageneration matches this value. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + if_metageneration_not_match (int): + If set, only deletes the bucket if its + metageneration does not match this value. + + This field is a member of `oneof`_ ``_if_metageneration_not_match``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=2, + optional=True, + ) + if_metageneration_not_match: int = proto.Field( + proto.INT64, + number=3, + optional=True, + ) + + +class GetBucketRequest(proto.Message): + r"""Request message for GetBucket. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Required. Name of a bucket. + if_metageneration_match (int): + If set, and if the bucket's current + metageneration does not match the specified + value, the request will return an error. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + if_metageneration_not_match (int): + If set, and if the bucket's current + metageneration matches the specified value, the + request will return an error. + + This field is a member of `oneof`_ ``_if_metageneration_not_match``. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. A "*" field may be + used to indicate all fields. If no mask is specified, will + default to all fields. + + This field is a member of `oneof`_ ``_read_mask``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=2, + optional=True, + ) + if_metageneration_not_match: int = proto.Field( + proto.INT64, + number=3, + optional=True, + ) + read_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=5, + optional=True, + message=field_mask_pb2.FieldMask, + ) + + +class CreateBucketRequest(proto.Message): + r"""Request message for CreateBucket. + + Attributes: + parent (str): + Required. The project to which this bucket will belong. This + field must either be empty or ``projects/_``. The project ID + that owns this bucket should be specified in the + ``bucket.project`` field. + bucket (google.cloud.storage_v2.types.Bucket): + Optional. Properties of the new bucket being inserted. The + name of the bucket is specified in the ``bucket_id`` field. + Populating ``bucket.name`` field will result in an error. + The project of the bucket must be specified in the + ``bucket.project`` field. This field must be in + ``projects/{projectIdentifier}`` format, {projectIdentifier} + can be the project ID or project number. The ``parent`` + field must be either empty or ``projects/_``. + bucket_id (str): + Required. The ID to use for this bucket, which will become + the final component of the bucket's resource name. For + example, the value ``foo`` might result in a bucket with the + name ``projects/123456/buckets/foo``. + predefined_acl (str): + Optional. Apply a predefined set of access + controls to this bucket. Valid values are + "authenticatedRead", "private", + "projectPrivate", "publicRead", or + "publicReadWrite". + predefined_default_object_acl (str): + Optional. Apply a predefined set of default + object access controls to this bucket. Valid + values are "authenticatedRead", + "bucketOwnerFullControl", "bucketOwnerRead", + "private", "projectPrivate", or "publicRead". + enable_object_retention (bool): + Optional. If true, enable object retention on + the bucket. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + bucket: "Bucket" = proto.Field( + proto.MESSAGE, + number=2, + message="Bucket", + ) + bucket_id: str = proto.Field( + proto.STRING, + number=3, + ) + predefined_acl: str = proto.Field( + proto.STRING, + number=6, + ) + predefined_default_object_acl: str = proto.Field( + proto.STRING, + number=7, + ) + enable_object_retention: bool = proto.Field( + proto.BOOL, + number=9, + ) + + +class ListBucketsRequest(proto.Message): + r"""Request message for ListBuckets. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + parent (str): + Required. The project whose buckets we are + listing. + page_size (int): + Optional. Maximum number of buckets to return in a single + response. The service will use this parameter or 1,000 + items, whichever is smaller. If "acl" is present in the + read_mask, the service will use this parameter of 200 items, + whichever is smaller. + page_token (str): + Optional. A previously-returned page token + representing part of the larger set of results + to view. + prefix (str): + Optional. Filter results to buckets whose + names begin with this prefix. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read from each result. If no + mask is specified, will default to all fields except + items.owner, items.acl, and items.default_object_acl. + + - may be used to mean "all fields". + + This field is a member of `oneof`_ ``_read_mask``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + prefix: str = proto.Field( + proto.STRING, + number=4, + ) + read_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=5, + optional=True, + message=field_mask_pb2.FieldMask, + ) + + +class ListBucketsResponse(proto.Message): + r"""The result of a call to Buckets.ListBuckets + + Attributes: + buckets (MutableSequence[google.cloud.storage_v2.types.Bucket]): + The list of items. + next_page_token (str): + The continuation token, used to page through + large result sets. Provide this value in a + subsequent request to return the next page of + results. + """ + + @property + def raw_page(self): + return self + + buckets: MutableSequence["Bucket"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Bucket", + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class LockBucketRetentionPolicyRequest(proto.Message): + r"""Request message for LockBucketRetentionPolicyRequest. + + Attributes: + bucket (str): + Required. Name of a bucket. + if_metageneration_match (int): + Required. Makes the operation conditional on + whether bucket's current metageneration matches + the given value. Must be positive. + """ + + bucket: str = proto.Field( + proto.STRING, + number=1, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=2, + ) + + +class UpdateBucketRequest(proto.Message): + r"""Request for UpdateBucket method. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bucket (google.cloud.storage_v2.types.Bucket): + Required. The bucket to update. The bucket's ``name`` field + will be used to identify the bucket. + if_metageneration_match (int): + If set, will only modify the bucket if its + metageneration matches this value. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + if_metageneration_not_match (int): + If set, will only modify the bucket if its + metageneration does not match this value. + + This field is a member of `oneof`_ ``_if_metageneration_not_match``. + predefined_acl (str): + Optional. Apply a predefined set of access + controls to this bucket. Valid values are + "authenticatedRead", "private", + "projectPrivate", "publicRead", or + "publicReadWrite". + predefined_default_object_acl (str): + Optional. Apply a predefined set of default + object access controls to this bucket. Valid + values are "authenticatedRead", + "bucketOwnerFullControl", "bucketOwnerRead", + "private", "projectPrivate", or "publicRead". + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. List of fields to be updated. + + To specify ALL fields, equivalent to the JSON API's "update" + function, specify a single field with the value ``*``. Note: + not recommended. If a new field is introduced at a later + time, an older client updating with the ``*`` may + accidentally reset the new field's value. + + Not specifying any fields is an error. + """ + + bucket: "Bucket" = proto.Field( + proto.MESSAGE, + number=1, + message="Bucket", + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=2, + optional=True, + ) + if_metageneration_not_match: int = proto.Field( + proto.INT64, + number=3, + optional=True, + ) + predefined_acl: str = proto.Field( + proto.STRING, + number=8, + ) + predefined_default_object_acl: str = proto.Field( + proto.STRING, + number=9, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ComposeObjectRequest(proto.Message): + r"""Request message for ComposeObject. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + destination (google.cloud.storage_v2.types.Object): + Required. Properties of the resulting object. + source_objects (MutableSequence[google.cloud.storage_v2.types.ComposeObjectRequest.SourceObject]): + Optional. The list of source objects that + will be concatenated into a single object. + destination_predefined_acl (str): + Optional. Apply a predefined set of access + controls to the destination object. Valid values + are "authenticatedRead", + "bucketOwnerFullControl", "bucketOwnerRead", + "private", "projectPrivate", or "publicRead". + if_generation_match (int): + Makes the operation conditional on whether + the object's current generation matches the + given value. Setting to 0 makes the operation + succeed only if there are no live versions of + the object. + + This field is a member of `oneof`_ ``_if_generation_match``. + if_metageneration_match (int): + Makes the operation conditional on whether + the object's current metageneration matches the + given value. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + kms_key (str): + Optional. Resource name of the Cloud KMS key, of the form + ``projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key``, + that will be used to encrypt the object. Overrides the + object metadata's ``kms_key_name`` value, if any. + common_object_request_params (google.cloud.storage_v2.types.CommonObjectRequestParams): + Optional. A set of parameters common to + Storage API requests concerning an object. + object_checksums (google.cloud.storage_v2.types.ObjectChecksums): + Optional. The checksums of the complete + object. This will be validated against the + combined checksums of the component objects. + """ + + class SourceObject(proto.Message): + r"""Description of a source object for a composition request. + + Attributes: + name (str): + Required. The source object's name. All + source objects must reside in the same bucket. + generation (int): + Optional. The generation of this object to + use as the source. + object_preconditions (google.cloud.storage_v2.types.ComposeObjectRequest.SourceObject.ObjectPreconditions): + Optional. Conditions that must be met for + this operation to execute. + """ + + class ObjectPreconditions(proto.Message): + r"""Preconditions for a source object of a composition request. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + if_generation_match (int): + Only perform the composition if the + generation of the source object that would be + used matches this value. If this value and a + generation are both specified, they must be the + same value or the call will fail. + + This field is a member of `oneof`_ ``_if_generation_match``. + """ + + if_generation_match: int = proto.Field( + proto.INT64, + number=1, + optional=True, + ) + + name: str = proto.Field( + proto.STRING, + number=1, + ) + generation: int = proto.Field( + proto.INT64, + number=2, + ) + object_preconditions: "ComposeObjectRequest.SourceObject.ObjectPreconditions" = proto.Field( + proto.MESSAGE, + number=3, + message="ComposeObjectRequest.SourceObject.ObjectPreconditions", + ) + + destination: "Object" = proto.Field( + proto.MESSAGE, + number=1, + message="Object", + ) + source_objects: MutableSequence[SourceObject] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=SourceObject, + ) + destination_predefined_acl: str = proto.Field( + proto.STRING, + number=9, + ) + if_generation_match: int = proto.Field( + proto.INT64, + number=4, + optional=True, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=5, + optional=True, + ) + kms_key: str = proto.Field( + proto.STRING, + number=6, + ) + common_object_request_params: "CommonObjectRequestParams" = proto.Field( + proto.MESSAGE, + number=7, + message="CommonObjectRequestParams", + ) + object_checksums: "ObjectChecksums" = proto.Field( + proto.MESSAGE, + number=10, + message="ObjectChecksums", + ) + + +class DeleteObjectRequest(proto.Message): + r"""Message for deleting an object. ``bucket`` and ``object`` **must** + be set. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bucket (str): + Required. Name of the bucket in which the + object resides. + object_ (str): + Required. The name of the finalized object to delete. Note: + If you want to delete an unfinalized resumable upload please + use ``CancelResumableWrite``. + generation (int): + Optional. If present, permanently deletes a + specific revision of this object (as opposed to + the latest version, the default). + if_generation_match (int): + Makes the operation conditional on whether + the object's current generation matches the + given value. Setting to 0 makes the operation + succeed only if there are no live versions of + the object. + + This field is a member of `oneof`_ ``_if_generation_match``. + if_generation_not_match (int): + Makes the operation conditional on whether + the object's live generation does not match the + given value. If no live object exists, the + precondition fails. Setting to 0 makes the + operation succeed only if there is a live + version of the object. + + This field is a member of `oneof`_ ``_if_generation_not_match``. + if_metageneration_match (int): + Makes the operation conditional on whether + the object's current metageneration matches the + given value. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + if_metageneration_not_match (int): + Makes the operation conditional on whether + the object's current metageneration does not + match the given value. + + This field is a member of `oneof`_ ``_if_metageneration_not_match``. + common_object_request_params (google.cloud.storage_v2.types.CommonObjectRequestParams): + Optional. A set of parameters common to + Storage API requests concerning an object. + """ + + bucket: str = proto.Field( + proto.STRING, + number=1, + ) + object_: str = proto.Field( + proto.STRING, + number=2, + ) + generation: int = proto.Field( + proto.INT64, + number=4, + ) + if_generation_match: int = proto.Field( + proto.INT64, + number=5, + optional=True, + ) + if_generation_not_match: int = proto.Field( + proto.INT64, + number=6, + optional=True, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=7, + optional=True, + ) + if_metageneration_not_match: int = proto.Field( + proto.INT64, + number=8, + optional=True, + ) + common_object_request_params: "CommonObjectRequestParams" = proto.Field( + proto.MESSAGE, + number=10, + message="CommonObjectRequestParams", + ) + + +class RestoreObjectRequest(proto.Message): + r"""Message for restoring an object. ``bucket``, ``object``, and + ``generation`` **must** be set. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bucket (str): + Required. Name of the bucket in which the + object resides. + object_ (str): + Required. The name of the object to restore. + generation (int): + Required. The specific revision of the object + to restore. + restore_token (str): + Optional. Restore token used to differentiate + soft-deleted objects with the same name and + generation. Only applicable for hierarchical + namespace buckets. This parameter is optional, + and is only required in the rare case when there + are multiple soft-deleted objects with the same + name and generation. + if_generation_match (int): + Makes the operation conditional on whether + the object's current generation matches the + given value. Setting to 0 makes the operation + succeed only if there are no live versions of + the object. + + This field is a member of `oneof`_ ``_if_generation_match``. + if_generation_not_match (int): + Makes the operation conditional on whether + the object's live generation does not match the + given value. If no live object exists, the + precondition fails. Setting to 0 makes the + operation succeed only if there is a live + version of the object. + + This field is a member of `oneof`_ ``_if_generation_not_match``. + if_metageneration_match (int): + Makes the operation conditional on whether + the object's current metageneration matches the + given value. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + if_metageneration_not_match (int): + Makes the operation conditional on whether + the object's current metageneration does not + match the given value. + + This field is a member of `oneof`_ ``_if_metageneration_not_match``. + copy_source_acl (bool): + If false or unset, the bucket's default + object ACL will be used. If true, copy the + source object's access controls. Return an error + if bucket has UBLA enabled. + + This field is a member of `oneof`_ ``_copy_source_acl``. + common_object_request_params (google.cloud.storage_v2.types.CommonObjectRequestParams): + Optional. A set of parameters common to + Storage API requests concerning an object. + """ + + bucket: str = proto.Field( + proto.STRING, + number=1, + ) + object_: str = proto.Field( + proto.STRING, + number=2, + ) + generation: int = proto.Field( + proto.INT64, + number=3, + ) + restore_token: str = proto.Field( + proto.STRING, + number=11, + ) + if_generation_match: int = proto.Field( + proto.INT64, + number=4, + optional=True, + ) + if_generation_not_match: int = proto.Field( + proto.INT64, + number=5, + optional=True, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=6, + optional=True, + ) + if_metageneration_not_match: int = proto.Field( + proto.INT64, + number=7, + optional=True, + ) + copy_source_acl: bool = proto.Field( + proto.BOOL, + number=9, + optional=True, + ) + common_object_request_params: "CommonObjectRequestParams" = proto.Field( + proto.MESSAGE, + number=8, + message="CommonObjectRequestParams", + ) + + +class CancelResumableWriteRequest(proto.Message): + r"""Message for canceling an in-progress resumable upload. ``upload_id`` + **must** be set. + + Attributes: + upload_id (str): + Required. The upload_id of the resumable upload to cancel. + This should be copied from the ``upload_id`` field of + ``StartResumableWriteResponse``. + """ + + upload_id: str = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelResumableWriteResponse(proto.Message): + r"""Empty response message for canceling an in-progress resumable + upload, will be extended as needed. + + """ + + +class ReadObjectRequest(proto.Message): + r"""Request message for ReadObject. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bucket (str): + Required. The name of the bucket containing + the object to read. + object_ (str): + Required. The name of the object to read. + generation (int): + Optional. If present, selects a specific + revision of this object (as opposed to the + latest version, the default). + read_offset (int): + Optional. The offset for the first byte to return in the + read, relative to the start of the object. + + A negative ``read_offset`` value will be interpreted as the + number of bytes back from the end of the object to be + returned. For example, if an object's length is 15 bytes, a + ReadObjectRequest with ``read_offset`` = -5 and + ``read_limit`` = 3 would return bytes 10 through 12 of the + object. Requesting a negative offset with magnitude larger + than the size of the object will return the entire object. + read_limit (int): + Optional. The maximum number of ``data`` bytes the server is + allowed to return in the sum of all ``Object`` messages. A + ``read_limit`` of zero indicates that there is no limit, and + a negative ``read_limit`` will cause an error. + + If the stream returns fewer bytes than allowed by the + ``read_limit`` and no error occurred, the stream includes + all data from the ``read_offset`` to the end of the + resource. + if_generation_match (int): + Makes the operation conditional on whether + the object's current generation matches the + given value. Setting to 0 makes the operation + succeed only if there are no live versions of + the object. + + This field is a member of `oneof`_ ``_if_generation_match``. + if_generation_not_match (int): + Makes the operation conditional on whether + the object's live generation does not match the + given value. If no live object exists, the + precondition fails. Setting to 0 makes the + operation succeed only if there is a live + version of the object. + + This field is a member of `oneof`_ ``_if_generation_not_match``. + if_metageneration_match (int): + Makes the operation conditional on whether + the object's current metageneration matches the + given value. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + if_metageneration_not_match (int): + Makes the operation conditional on whether + the object's current metageneration does not + match the given value. + + This field is a member of `oneof`_ ``_if_metageneration_not_match``. + common_object_request_params (google.cloud.storage_v2.types.CommonObjectRequestParams): + Optional. A set of parameters common to + Storage API requests concerning an object. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. The checksummed_data + field and its children will always be present. If no mask is + specified, will default to all fields except metadata.owner + and metadata.acl. + + - may be used to mean "all fields". + + This field is a member of `oneof`_ ``_read_mask``. + """ + + bucket: str = proto.Field( + proto.STRING, + number=1, + ) + object_: str = proto.Field( + proto.STRING, + number=2, + ) + generation: int = proto.Field( + proto.INT64, + number=3, + ) + read_offset: int = proto.Field( + proto.INT64, + number=4, + ) + read_limit: int = proto.Field( + proto.INT64, + number=5, + ) + if_generation_match: int = proto.Field( + proto.INT64, + number=6, + optional=True, + ) + if_generation_not_match: int = proto.Field( + proto.INT64, + number=7, + optional=True, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=8, + optional=True, + ) + if_metageneration_not_match: int = proto.Field( + proto.INT64, + number=9, + optional=True, + ) + common_object_request_params: "CommonObjectRequestParams" = proto.Field( + proto.MESSAGE, + number=10, + message="CommonObjectRequestParams", + ) + read_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=12, + optional=True, + message=field_mask_pb2.FieldMask, + ) + + +class GetObjectRequest(proto.Message): + r"""Request message for GetObject. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bucket (str): + Required. Name of the bucket in which the + object resides. + object_ (str): + Required. Name of the object. + generation (int): + Optional. If present, selects a specific + revision of this object (as opposed to the + latest version, the default). + soft_deleted (bool): + If true, return the soft-deleted version of + this object. + + This field is a member of `oneof`_ ``_soft_deleted``. + if_generation_match (int): + Makes the operation conditional on whether + the object's current generation matches the + given value. Setting to 0 makes the operation + succeed only if there are no live versions of + the object. + + This field is a member of `oneof`_ ``_if_generation_match``. + if_generation_not_match (int): + Makes the operation conditional on whether + the object's live generation does not match the + given value. If no live object exists, the + precondition fails. Setting to 0 makes the + operation succeed only if there is a live + version of the object. + + This field is a member of `oneof`_ ``_if_generation_not_match``. + if_metageneration_match (int): + Makes the operation conditional on whether + the object's current metageneration matches the + given value. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + if_metageneration_not_match (int): + Makes the operation conditional on whether + the object's current metageneration does not + match the given value. + + This field is a member of `oneof`_ ``_if_metageneration_not_match``. + common_object_request_params (google.cloud.storage_v2.types.CommonObjectRequestParams): + Optional. A set of parameters common to + Storage API requests concerning an object. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. If no mask is + specified, will default to all fields except metadata.acl + and metadata.owner. + + - may be used to mean "all fields". + + This field is a member of `oneof`_ ``_read_mask``. + restore_token (str): + Optional. Restore token used to differentiate soft-deleted + objects with the same name and generation. Only applicable + for hierarchical namespace buckets and if soft_deleted is + set to true. This parameter is optional, and is only + required in the rare case when there are multiple + soft-deleted objects with the same name and generation. + """ + + bucket: str = proto.Field( + proto.STRING, + number=1, + ) + object_: str = proto.Field( + proto.STRING, + number=2, + ) + generation: int = proto.Field( + proto.INT64, + number=3, + ) + soft_deleted: bool = proto.Field( + proto.BOOL, + number=11, + optional=True, + ) + if_generation_match: int = proto.Field( + proto.INT64, + number=4, + optional=True, + ) + if_generation_not_match: int = proto.Field( + proto.INT64, + number=5, + optional=True, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=6, + optional=True, + ) + if_metageneration_not_match: int = proto.Field( + proto.INT64, + number=7, + optional=True, + ) + common_object_request_params: "CommonObjectRequestParams" = proto.Field( + proto.MESSAGE, + number=8, + message="CommonObjectRequestParams", + ) + read_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=10, + optional=True, + message=field_mask_pb2.FieldMask, + ) + restore_token: str = proto.Field( + proto.STRING, + number=12, + ) + + +class ReadObjectResponse(proto.Message): + r"""Response message for ReadObject. + + Attributes: + checksummed_data (google.cloud.storage_v2.types.ChecksummedData): + A portion of the data for the object. The service **may** + leave ``data`` empty for any given ``ReadResponse``. This + enables the service to inform the client that the request is + still live while it is running an operation to generate more + data. + object_checksums (google.cloud.storage_v2.types.ObjectChecksums): + The checksums of the complete object. If the + object is downloaded in full, the client should + compute one of these checksums over the + downloaded object and compare it against the + value provided here. + content_range (google.cloud.storage_v2.types.ContentRange): + If read_offset and or read_limit was specified on the + ReadObjectRequest, ContentRange will be populated on the + first ReadObjectResponse message of the read stream. + metadata (google.cloud.storage_v2.types.Object): + Metadata of the object whose media is being + returned. Only populated in the first response + in the stream. + """ + + checksummed_data: "ChecksummedData" = proto.Field( + proto.MESSAGE, + number=1, + message="ChecksummedData", + ) + object_checksums: "ObjectChecksums" = proto.Field( + proto.MESSAGE, + number=2, + message="ObjectChecksums", + ) + content_range: "ContentRange" = proto.Field( + proto.MESSAGE, + number=3, + message="ContentRange", + ) + metadata: "Object" = proto.Field( + proto.MESSAGE, + number=4, + message="Object", + ) + + +class BidiReadObjectSpec(proto.Message): + r"""Describes the object to read in a BidiReadObject request. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bucket (str): + Required. The name of the bucket containing + the object to read. + object_ (str): + Required. The name of the object to read. + generation (int): + Optional. If present, selects a specific + revision of this object (as opposed to the + latest version, the default). + if_generation_match (int): + Makes the operation conditional on whether + the object's current generation matches the + given value. Setting to 0 makes the operation + succeed only if there are no live versions of + the object. + + This field is a member of `oneof`_ ``_if_generation_match``. + if_generation_not_match (int): + Makes the operation conditional on whether + the object's live generation does not match the + given value. If no live object exists, the + precondition fails. Setting to 0 makes the + operation succeed only if there is a live + version of the object. + + This field is a member of `oneof`_ ``_if_generation_not_match``. + if_metageneration_match (int): + Makes the operation conditional on whether + the object's current metageneration matches the + given value. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + if_metageneration_not_match (int): + Makes the operation conditional on whether + the object's current metageneration does not + match the given value. + + This field is a member of `oneof`_ ``_if_metageneration_not_match``. + common_object_request_params (google.cloud.storage_v2.types.CommonObjectRequestParams): + Optional. A set of parameters common to + Storage API requests concerning an object. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. The checksummed_data + field and its children will always be present. If no mask is + specified, will default to all fields except metadata.owner + and metadata.acl. + + - may be used to mean "all fields". As per + https://google.aip.dev/161, this field is deprecated. As + an alternative, grpc metadata can be used: + https://cloud.google.com/apis/docs/system-parameters#definitions + + This field is a member of `oneof`_ ``_read_mask``. + read_handle (google.cloud.storage_v2.types.BidiReadHandle): + The client can optionally set this field. The + read handle is an optimized way of creating new + streams. Read handles are generated and + periodically refreshed from prior reads. + + This field is a member of `oneof`_ ``_read_handle``. + routing_token (str): + The routing token that influences request + routing for the stream. Must be provided if a + BidiReadObjectRedirectedError is returned. + + This field is a member of `oneof`_ ``_routing_token``. + """ + + bucket: str = proto.Field( + proto.STRING, + number=1, + ) + object_: str = proto.Field( + proto.STRING, + number=2, + ) + generation: int = proto.Field( + proto.INT64, + number=3, + ) + if_generation_match: int = proto.Field( + proto.INT64, + number=4, + optional=True, + ) + if_generation_not_match: int = proto.Field( + proto.INT64, + number=5, + optional=True, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=6, + optional=True, + ) + if_metageneration_not_match: int = proto.Field( + proto.INT64, + number=7, + optional=True, + ) + common_object_request_params: "CommonObjectRequestParams" = proto.Field( + proto.MESSAGE, + number=8, + message="CommonObjectRequestParams", + ) + read_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=12, + optional=True, + message=field_mask_pb2.FieldMask, + ) + read_handle: "BidiReadHandle" = proto.Field( + proto.MESSAGE, + number=13, + optional=True, + message="BidiReadHandle", + ) + routing_token: str = proto.Field( + proto.STRING, + number=14, + optional=True, + ) + + +class BidiReadObjectRequest(proto.Message): + r"""Request message for BidiReadObject. + + Attributes: + read_object_spec (google.cloud.storage_v2.types.BidiReadObjectSpec): + Optional. The first message of each stream + should set this field. If this is not the first + message, an error will be returned. Describes + the object to read. + read_ranges (MutableSequence[google.cloud.storage_v2.types.ReadRange]): + Optional. Provides a list of 0 or more (up to + 100) ranges to read. If a single range is large + enough to require multiple responses, they are + guaranteed to be delivered in increasing offset + order. There are no ordering guarantees across + ranges. When no ranges are provided, the + response message will not include + ObjectRangeData. For full object downloads, the + offset and size can be set to 0. + """ + + read_object_spec: "BidiReadObjectSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="BidiReadObjectSpec", + ) + read_ranges: MutableSequence["ReadRange"] = proto.RepeatedField( + proto.MESSAGE, + number=8, + message="ReadRange", + ) + + +class BidiReadObjectResponse(proto.Message): + r"""Response message for BidiReadObject. + + Attributes: + object_data_ranges (MutableSequence[google.cloud.storage_v2.types.ObjectRangeData]): + A portion of the object's data. The service **may** leave + data empty for any given ReadResponse. This enables the + service to inform the client that the request is still live + while it is running an operation to generate more data. The + service **may** pipeline multiple responses belonging to + different read requests. Each ObjectRangeData entry will + have a read_id set to the same value as the corresponding + source read request. + metadata (google.cloud.storage_v2.types.Object): + Metadata of the object whose media is being + returned. Only populated in the first response + in the stream and not populated when the stream + is opened with a read handle. + read_handle (google.cloud.storage_v2.types.BidiReadHandle): + This field will be periodically refreshed, + however it may not be set in every response. It + allows the client to more efficiently open + subsequent bidirectional streams to the same + object. + """ + + object_data_ranges: MutableSequence["ObjectRangeData"] = proto.RepeatedField( + proto.MESSAGE, + number=6, + message="ObjectRangeData", + ) + metadata: "Object" = proto.Field( + proto.MESSAGE, + number=4, + message="Object", + ) + read_handle: "BidiReadHandle" = proto.Field( + proto.MESSAGE, + number=7, + message="BidiReadHandle", + ) + + +class BidiReadObjectRedirectedError(proto.Message): + r"""Error proto containing details for a redirected read. This + error may be attached as details for an ABORTED response to + BidiReadObject. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + read_handle (google.cloud.storage_v2.types.BidiReadHandle): + The read handle for the redirected read. If + set, the client may use this in the + BidiReadObjectSpec when retrying the read + stream. + routing_token (str): + The routing token the client must use when retrying the read + stream. This value must be provided in the header + ``x-goog-request-params``, with key ``routing_token`` and + this string verbatim as the value. + + This field is a member of `oneof`_ ``_routing_token``. + """ + + read_handle: "BidiReadHandle" = proto.Field( + proto.MESSAGE, + number=1, + message="BidiReadHandle", + ) + routing_token: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +class BidiWriteObjectRedirectedError(proto.Message): + r"""Error proto containing details for a redirected write. This + error may be attached as details for an ABORTED response to + BidiWriteObject. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + routing_token (str): + The routing token the client must use when retrying the + write stream. This value must be provided in the header + ``x-goog-request-params``, with key ``routing_token`` and + this string verbatim as the value. + + This field is a member of `oneof`_ ``_routing_token``. + write_handle (google.cloud.storage_v2.types.BidiWriteHandle): + Opaque value describing a previous write. If set, the client + must use this in an AppendObjectSpec first_message when + retrying the write stream. If not set, clients may retry the + original request. + + This field is a member of `oneof`_ ``_write_handle``. + generation (int): + The generation of the object that triggered the redirect. + This will be set iff write_handle is set. If set, the client + must use this in an AppendObjectSpec first_message when + retrying the write stream. + + This field is a member of `oneof`_ ``_generation``. + """ + + routing_token: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + write_handle: "BidiWriteHandle" = proto.Field( + proto.MESSAGE, + number=2, + optional=True, + message="BidiWriteHandle", + ) + generation: int = proto.Field( + proto.INT64, + number=3, + optional=True, + ) + + +class BidiReadObjectError(proto.Message): + r"""Error extension proto containing details for all outstanding + reads on the failed stream + + Attributes: + read_range_errors (MutableSequence[google.cloud.storage_v2.types.ReadRangeError]): + The error code for each outstanding read_range + """ + + read_range_errors: MutableSequence["ReadRangeError"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="ReadRangeError", + ) + + +class ReadRangeError(proto.Message): + r"""Error extension proto containing details for a single range + read + + Attributes: + read_id (int): + The id of the corresponding read_range + status (google.rpc.status_pb2.Status): + The status which should be an enum value of + [google.rpc.Code]. + """ + + read_id: int = proto.Field( + proto.INT64, + number=1, + ) + status: status_pb2.Status = proto.Field( + proto.MESSAGE, + number=2, + message=status_pb2.Status, + ) + + +class ReadRange(proto.Message): + r"""Describes a range of bytes to read in a BidiReadObjectRanges + request. + + Attributes: + read_offset (int): + Required. The offset for the first byte to return in the + read, relative to the start of the object. + + A negative read_offset value will be interpreted as the + number of bytes back from the end of the object to be + returned. For example, if an object's length is 15 bytes, a + ReadObjectRequest with read_offset = -5 and read_length = 3 + would return bytes 10 through 12 of the object. Requesting a + negative offset with magnitude larger than the size of the + object will return the entire object. A read_offset larger + than the size of the object will result in an OutOfRange + error. + read_length (int): + Optional. The maximum number of data bytes the server is + allowed to return across all response messages with the same + read_id. A read_length of zero indicates to read until the + resource end, and a negative read_length will cause an + error. If the stream returns fewer bytes than allowed by the + read_length and no error occurred, the stream includes all + data from the read_offset to the resource end. + read_id (int): + Required. Read identifier provided by the client. When the + client issues more than one outstanding ReadRange on the + same stream, responses can be mapped back to their + corresponding requests using this value. Clients must ensure + that all outstanding requests have different read_id values. + The server may close the stream with an error if this + condition is not met. + """ + + read_offset: int = proto.Field( + proto.INT64, + number=1, + ) + read_length: int = proto.Field( + proto.INT64, + number=2, + ) + read_id: int = proto.Field( + proto.INT64, + number=3, + ) + + +class ObjectRangeData(proto.Message): + r"""Contains data and metadata for a range of an object. + + Attributes: + checksummed_data (google.cloud.storage_v2.types.ChecksummedData): + A portion of the data for the object. + read_range (google.cloud.storage_v2.types.ReadRange): + The ReadRange describes the content being returned with + read_id set to the corresponding ReadObjectRequest in the + stream. Multiple ObjectRangeData messages may have the same + read_id but increasing offsets. ReadObjectResponse messages + with the same read_id are guaranteed to be delivered in + increasing offset order. + range_end (bool): + If set, indicates there are no more bytes to + read for the given ReadRange. + """ + + checksummed_data: "ChecksummedData" = proto.Field( + proto.MESSAGE, + number=1, + message="ChecksummedData", + ) + read_range: "ReadRange" = proto.Field( + proto.MESSAGE, + number=2, + message="ReadRange", + ) + range_end: bool = proto.Field( + proto.BOOL, + number=3, + ) + + +class BidiReadHandle(proto.Message): + r"""BidiReadHandle contains a handle from a previous + BiDiReadObject invocation. The client can use this instead of + BidiReadObjectSpec as an optimized way of opening subsequent + bidirectional streams to the same object. + + Attributes: + handle (bytes): + Required. Opaque value describing a previous + read. + """ + + handle: bytes = proto.Field( + proto.BYTES, + number=1, + ) + + +class BidiWriteHandle(proto.Message): + r"""BidiWriteHandle contains a handle from a previous + BidiWriteObject invocation. The client can use this as an + optimized way of opening subsequent bidirectional streams to the + same object. + + Attributes: + handle (bytes): + Required. Opaque value describing a previous + write. + """ + + handle: bytes = proto.Field( + proto.BYTES, + number=1, + ) + + +class WriteObjectSpec(proto.Message): + r"""Describes an attempt to insert an object, possibly over + multiple requests. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + resource (google.cloud.storage_v2.types.Object): + Required. Destination object, including its + name and its metadata. + predefined_acl (str): + Optional. Apply a predefined set of access + controls to this object. Valid values are + "authenticatedRead", "bucketOwnerFullControl", + "bucketOwnerRead", "private", "projectPrivate", + or "publicRead". + if_generation_match (int): + Makes the operation conditional on whether + the object's current generation matches the + given value. Setting to 0 makes the operation + succeed only if there are no live versions of + the object. + + This field is a member of `oneof`_ ``_if_generation_match``. + if_generation_not_match (int): + Makes the operation conditional on whether + the object's live generation does not match the + given value. If no live object exists, the + precondition fails. Setting to 0 makes the + operation succeed only if there is a live + version of the object. + + This field is a member of `oneof`_ ``_if_generation_not_match``. + if_metageneration_match (int): + Makes the operation conditional on whether + the object's current metageneration matches the + given value. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + if_metageneration_not_match (int): + Makes the operation conditional on whether + the object's current metageneration does not + match the given value. + + This field is a member of `oneof`_ ``_if_metageneration_not_match``. + object_size (int): + The expected final object size being uploaded. If this value + is set, closing the stream after writing fewer or more than + ``object_size`` bytes will result in an OUT_OF_RANGE error. + + This situation is considered a client error, and if such an + error occurs you must start the upload over from scratch, + this time sending the correct number of bytes. + + This field is a member of `oneof`_ ``_object_size``. + appendable (bool): + If true, the object will be created in + appendable mode. This field may only be set when + using BidiWriteObject. + + This field is a member of `oneof`_ ``_appendable``. + """ + + resource: "Object" = proto.Field( + proto.MESSAGE, + number=1, + message="Object", + ) + predefined_acl: str = proto.Field( + proto.STRING, + number=7, + ) + if_generation_match: int = proto.Field( + proto.INT64, + number=3, + optional=True, + ) + if_generation_not_match: int = proto.Field( + proto.INT64, + number=4, + optional=True, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=5, + optional=True, + ) + if_metageneration_not_match: int = proto.Field( + proto.INT64, + number=6, + optional=True, + ) + object_size: int = proto.Field( + proto.INT64, + number=8, + optional=True, + ) + appendable: bool = proto.Field( + proto.BOOL, + number=9, + optional=True, + ) + + +class WriteObjectRequest(proto.Message): + r"""Request message for WriteObject. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + upload_id (str): + For resumable uploads. This should be the ``upload_id`` + returned from a call to ``StartResumableWriteResponse``. + + This field is a member of `oneof`_ ``first_message``. + write_object_spec (google.cloud.storage_v2.types.WriteObjectSpec): + For non-resumable uploads. Describes the + overall upload, including the destination bucket + and object name, preconditions, etc. + + This field is a member of `oneof`_ ``first_message``. + write_offset (int): + Required. The offset from the beginning of the object at + which the data should be written. + + In the first ``WriteObjectRequest`` of a ``WriteObject()`` + action, it indicates the initial offset for the ``Write()`` + call. The value **must** be equal to the ``persisted_size`` + that a call to ``QueryWriteStatus()`` would return (0 if + this is the first write to the object). + + On subsequent calls, this value **must** be no larger than + the sum of the first ``write_offset`` and the sizes of all + ``data`` chunks sent previously on this stream. + + An incorrect value will cause an error. + checksummed_data (google.cloud.storage_v2.types.ChecksummedData): + The data to insert. If a crc32c checksum is + provided that doesn't match the checksum + computed by the service, the request will fail. + + This field is a member of `oneof`_ ``data``. + object_checksums (google.cloud.storage_v2.types.ObjectChecksums): + Optional. Checksums for the complete object. If the + checksums computed by the service don't match the specified + checksums the call will fail. May only be provided in the + first or last request (either with first_message, or + finish_write set). + finish_write (bool): + Optional. If ``true``, this indicates that the write is + complete. Sending any ``WriteObjectRequest``\ s subsequent + to one in which ``finish_write`` is ``true`` will cause an + error. For a non-resumable write (where the upload_id was + not set in the first message), it is an error not to set + this field in the final message of the stream. + common_object_request_params (google.cloud.storage_v2.types.CommonObjectRequestParams): + Optional. A set of parameters common to + Storage API requests concerning an object. + """ + + upload_id: str = proto.Field( + proto.STRING, + number=1, + oneof="first_message", + ) + write_object_spec: "WriteObjectSpec" = proto.Field( + proto.MESSAGE, + number=2, + oneof="first_message", + message="WriteObjectSpec", + ) + write_offset: int = proto.Field( + proto.INT64, + number=3, + ) + checksummed_data: "ChecksummedData" = proto.Field( + proto.MESSAGE, + number=4, + oneof="data", + message="ChecksummedData", + ) + object_checksums: "ObjectChecksums" = proto.Field( + proto.MESSAGE, + number=6, + message="ObjectChecksums", + ) + finish_write: bool = proto.Field( + proto.BOOL, + number=7, + ) + common_object_request_params: "CommonObjectRequestParams" = proto.Field( + proto.MESSAGE, + number=8, + message="CommonObjectRequestParams", + ) + + +class WriteObjectResponse(proto.Message): + r"""Response message for WriteObject. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + persisted_size (int): + The total number of bytes that have been processed for the + given object from all ``WriteObject`` calls. Only set if the + upload has not finalized. + + This field is a member of `oneof`_ ``write_status``. + resource (google.cloud.storage_v2.types.Object): + A resource containing the metadata for the + uploaded object. Only set if the upload has + finalized. + + This field is a member of `oneof`_ ``write_status``. + """ + + persisted_size: int = proto.Field( + proto.INT64, + number=1, + oneof="write_status", + ) + resource: "Object" = proto.Field( + proto.MESSAGE, + number=2, + oneof="write_status", + message="Object", + ) + + +class AppendObjectSpec(proto.Message): + r"""Describes an attempt to append to an object, possibly over + multiple requests. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bucket (str): + Required. The name of the bucket containing + the object to write. + object_ (str): + Required. The name of the object to open for + writing. + generation (int): + Required. The generation number of the object + to open for writing. + if_metageneration_match (int): + Makes the operation conditional on whether the object's + current metageneration matches the given value. + + Note that metageneration preconditions are only checked if + ``write_handle`` is empty. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + if_metageneration_not_match (int): + Makes the operation conditional on whether the object's + current metageneration does not match the given value. + + Note that metageneration preconditions are only checked if + ``write_handle`` is empty. + + This field is a member of `oneof`_ ``_if_metageneration_not_match``. + routing_token (str): + An optional routing token that influences + request routing for the stream. Must be provided + if a BidiWriteObjectRedirectedError is returned. + + This field is a member of `oneof`_ ``_routing_token``. + write_handle (google.cloud.storage_v2.types.BidiWriteHandle): + An optional write handle returned from a previous + BidiWriteObjectResponse message or a + BidiWriteObjectRedirectedError error. + + Note that metageneration preconditions are only checked if + ``write_handle`` is empty. + + This field is a member of `oneof`_ ``_write_handle``. + """ + + bucket: str = proto.Field( + proto.STRING, + number=1, + ) + object_: str = proto.Field( + proto.STRING, + number=2, + ) + generation: int = proto.Field( + proto.INT64, + number=3, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=4, + optional=True, + ) + if_metageneration_not_match: int = proto.Field( + proto.INT64, + number=5, + optional=True, + ) + routing_token: str = proto.Field( + proto.STRING, + number=6, + optional=True, + ) + write_handle: "BidiWriteHandle" = proto.Field( + proto.MESSAGE, + number=7, + optional=True, + message="BidiWriteHandle", + ) + + +class BidiWriteObjectRequest(proto.Message): + r"""Request message for BidiWriteObject. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + upload_id (str): + For resumable uploads. This should be the ``upload_id`` + returned from a call to ``StartResumableWriteResponse``. + + This field is a member of `oneof`_ ``first_message``. + write_object_spec (google.cloud.storage_v2.types.WriteObjectSpec): + For non-resumable uploads. Describes the + overall upload, including the destination bucket + and object name, preconditions, etc. + + This field is a member of `oneof`_ ``first_message``. + append_object_spec (google.cloud.storage_v2.types.AppendObjectSpec): + For appendable uploads. Describes the object + to append to. + + This field is a member of `oneof`_ ``first_message``. + write_offset (int): + Required. The offset from the beginning of the object at + which the data should be written. + + In the first ``WriteObjectRequest`` of a ``WriteObject()`` + action, it indicates the initial offset for the ``Write()`` + call. The value **must** be equal to the ``persisted_size`` + that a call to ``QueryWriteStatus()`` would return (0 if + this is the first write to the object). + + On subsequent calls, this value **must** be no larger than + the sum of the first ``write_offset`` and the sizes of all + ``data`` chunks sent previously on this stream. + + An invalid value will cause an error. + checksummed_data (google.cloud.storage_v2.types.ChecksummedData): + The data to insert. If a crc32c checksum is + provided that doesn't match the checksum + computed by the service, the request will fail. + + This field is a member of `oneof`_ ``data``. + object_checksums (google.cloud.storage_v2.types.ObjectChecksums): + Optional. Checksums for the complete object. If the + checksums computed by the service don't match the specified + checksums the call will fail. May only be provided in the + first request or the last request (with finish_write set). + state_lookup (bool): + Optional. For each BidiWriteObjectRequest where state_lookup + is ``true`` or the client closes the stream, the service + will send a BidiWriteObjectResponse containing the current + persisted size. The persisted size sent in responses covers + all the bytes the server has persisted thus far and can be + used to decide what data is safe for the client to drop. + Note that the object's current size reported by the + BidiWriteObjectResponse may lag behind the number of bytes + written by the client. This field is ignored if + ``finish_write`` is set to true. + flush (bool): + Optional. Persists data written on the stream, up to and + including the current message, to permanent storage. This + option should be used sparingly as it may reduce + performance. Ongoing writes will periodically be persisted + on the server even when ``flush`` is not set. This field is + ignored if ``finish_write`` is set to true since there's no + need to checkpoint or flush if this message completes the + write. + finish_write (bool): + Optional. If ``true``, this indicates that the write is + complete. Sending any ``WriteObjectRequest``\ s subsequent + to one in which ``finish_write`` is ``true`` will cause an + error. For a non-resumable write (where the upload_id was + not set in the first message), it is an error not to set + this field in the final message of the stream. + common_object_request_params (google.cloud.storage_v2.types.CommonObjectRequestParams): + Optional. A set of parameters common to + Storage API requests concerning an object. + """ + + upload_id: str = proto.Field( + proto.STRING, + number=1, + oneof="first_message", + ) + write_object_spec: "WriteObjectSpec" = proto.Field( + proto.MESSAGE, + number=2, + oneof="first_message", + message="WriteObjectSpec", + ) + append_object_spec: "AppendObjectSpec" = proto.Field( + proto.MESSAGE, + number=11, + oneof="first_message", + message="AppendObjectSpec", + ) + write_offset: int = proto.Field( + proto.INT64, + number=3, + ) + checksummed_data: "ChecksummedData" = proto.Field( + proto.MESSAGE, + number=4, + oneof="data", + message="ChecksummedData", + ) + object_checksums: "ObjectChecksums" = proto.Field( + proto.MESSAGE, + number=6, + message="ObjectChecksums", + ) + state_lookup: bool = proto.Field( + proto.BOOL, + number=7, + ) + flush: bool = proto.Field( + proto.BOOL, + number=8, + ) + finish_write: bool = proto.Field( + proto.BOOL, + number=9, + ) + common_object_request_params: "CommonObjectRequestParams" = proto.Field( + proto.MESSAGE, + number=10, + message="CommonObjectRequestParams", + ) + + +class BidiWriteObjectResponse(proto.Message): + r"""Response message for BidiWriteObject. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + persisted_size (int): + The total number of bytes that have been processed for the + given object from all ``WriteObject`` calls. Only set if the + upload has not finalized. + + This field is a member of `oneof`_ ``write_status``. + resource (google.cloud.storage_v2.types.Object): + A resource containing the metadata for the + uploaded object. Only set if the upload has + finalized. + + This field is a member of `oneof`_ ``write_status``. + write_handle (google.cloud.storage_v2.types.BidiWriteHandle): + An optional write handle that will + periodically be present in response messages. + Clients should save it for later use in + establishing a new stream if a connection is + interrupted. + + This field is a member of `oneof`_ ``_write_handle``. + """ + + persisted_size: int = proto.Field( + proto.INT64, + number=1, + oneof="write_status", + ) + resource: "Object" = proto.Field( + proto.MESSAGE, + number=2, + oneof="write_status", + message="Object", + ) + write_handle: "BidiWriteHandle" = proto.Field( + proto.MESSAGE, + number=3, + optional=True, + message="BidiWriteHandle", + ) + + +class ListObjectsRequest(proto.Message): + r"""Request message for ListObjects. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + parent (str): + Required. Name of the bucket in which to look + for objects. + page_size (int): + Optional. Maximum number of ``items`` plus ``prefixes`` to + return in a single page of responses. As duplicate + ``prefixes`` are omitted, fewer total results may be + returned than requested. The service will use this parameter + or 1,000 items, whichever is smaller. + page_token (str): + Optional. A previously-returned page token + representing part of the larger set of results + to view. + delimiter (str): + Optional. If set, returns results in a directory-like mode. + ``items`` will contain only objects whose names, aside from + the ``prefix``, do not contain ``delimiter``. Objects whose + names, aside from the ``prefix``, contain ``delimiter`` will + have their name, truncated after the ``delimiter``, returned + in ``prefixes``. Duplicate ``prefixes`` are omitted. + include_trailing_delimiter (bool): + Optional. If true, objects that end in exactly one instance + of ``delimiter`` will have their metadata included in + ``items`` in addition to ``prefixes``. + prefix (str): + Optional. Filter results to objects whose + names begin with this prefix. + versions (bool): + Optional. If ``true``, lists all versions of an object as + distinct results. For more information, see `Object + Versioning `__. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read from each result. If no + mask is specified, will default to all fields except + items.acl and items.owner. + + - may be used to mean "all fields". + + This field is a member of `oneof`_ ``_read_mask``. + lexicographic_start (str): + Optional. Filter results to objects whose names are + lexicographically equal to or after lexicographic_start. If + lexicographic_end is also set, the objects listed have names + between lexicographic_start (inclusive) and + lexicographic_end (exclusive). + lexicographic_end (str): + Optional. Filter results to objects whose names are + lexicographically before lexicographic_end. If + lexicographic_start is also set, the objects listed have + names between lexicographic_start (inclusive) and + lexicographic_end (exclusive). + soft_deleted (bool): + Optional. If true, only list all soft-deleted + versions of the object. Soft delete policy is + required to set this option. + include_folders_as_prefixes (bool): + Optional. If true, will also include folders and managed + folders (besides objects) in the returned ``prefixes``. + Requires ``delimiter`` to be set to '/'. + match_glob (str): + Optional. Filter results to objects and prefixes that match + this glob pattern. See `List Objects Using + Glob `__ + for the full syntax. + filter (str): + Optional. Filter the returned objects. Currently only + supported for the ``contexts`` field. If ``delimiter`` is + set, the returned ``prefixes`` are exempt from this filter. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + delimiter: str = proto.Field( + proto.STRING, + number=4, + ) + include_trailing_delimiter: bool = proto.Field( + proto.BOOL, + number=5, + ) + prefix: str = proto.Field( + proto.STRING, + number=6, + ) + versions: bool = proto.Field( + proto.BOOL, + number=7, + ) + read_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=8, + optional=True, + message=field_mask_pb2.FieldMask, + ) + lexicographic_start: str = proto.Field( + proto.STRING, + number=10, + ) + lexicographic_end: str = proto.Field( + proto.STRING, + number=11, + ) + soft_deleted: bool = proto.Field( + proto.BOOL, + number=12, + ) + include_folders_as_prefixes: bool = proto.Field( + proto.BOOL, + number=13, + ) + match_glob: str = proto.Field( + proto.STRING, + number=14, + ) + filter: str = proto.Field( + proto.STRING, + number=15, + ) + + +class QueryWriteStatusRequest(proto.Message): + r"""Request object for ``QueryWriteStatus``. + + Attributes: + upload_id (str): + Required. The name of the resume token for + the object whose write status is being + requested. + common_object_request_params (google.cloud.storage_v2.types.CommonObjectRequestParams): + Optional. A set of parameters common to + Storage API requests concerning an object. + """ + + upload_id: str = proto.Field( + proto.STRING, + number=1, + ) + common_object_request_params: "CommonObjectRequestParams" = proto.Field( + proto.MESSAGE, + number=2, + message="CommonObjectRequestParams", + ) + + +class QueryWriteStatusResponse(proto.Message): + r"""Response object for ``QueryWriteStatus``. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + persisted_size (int): + The total number of bytes that have been processed for the + given object from all ``WriteObject`` calls. This is the + correct value for the 'write_offset' field to use when + resuming the ``WriteObject`` operation. Only set if the + upload has not finalized. + + This field is a member of `oneof`_ ``write_status``. + resource (google.cloud.storage_v2.types.Object): + A resource containing the metadata for the + uploaded object. Only set if the upload has + finalized. + + This field is a member of `oneof`_ ``write_status``. + """ + + persisted_size: int = proto.Field( + proto.INT64, + number=1, + oneof="write_status", + ) + resource: "Object" = proto.Field( + proto.MESSAGE, + number=2, + oneof="write_status", + message="Object", + ) + + +class RewriteObjectRequest(proto.Message): + r"""Request message for RewriteObject. If the source object is encrypted + using a Customer-Supplied Encryption Key the key information must be + provided in the copy_source_encryption_algorithm, + copy_source_encryption_key_bytes, and + copy_source_encryption_key_sha256_bytes fields. If the destination + object should be encrypted the keying information should be provided + in the encryption_algorithm, encryption_key_bytes, and + encryption_key_sha256_bytes fields of the + common_object_request_params.customer_encryption field. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + destination_name (str): + Required. Immutable. The name of the destination object. See + the `Naming + Guidelines `__. + Example: ``test.txt`` The ``name`` field by itself does not + uniquely identify a Cloud Storage object. A Cloud Storage + object is uniquely identified by the tuple of (bucket, + object, generation). + destination_bucket (str): + Required. Immutable. The name of the bucket + containing the destination object. + destination_kms_key (str): + Optional. The name of the Cloud KMS key that + will be used to encrypt the destination object. + The Cloud KMS key must be located in same + location as the object. If the parameter is not + specified, the request uses the destination + bucket's default encryption key, if any, or else + the Google-managed encryption key. + destination (google.cloud.storage_v2.types.Object): + Optional. Properties of the destination, post-rewrite + object. The ``name``, ``bucket`` and ``kms_key`` fields must + not be populated (these values are specified in the + ``destination_name``, ``destination_bucket``, and + ``destination_kms_key`` fields). If ``destination`` is + present it will be used to construct the destination + object's metadata; otherwise the destination object's + metadata will be copied from the source object. + source_bucket (str): + Required. Name of the bucket in which to find + the source object. + source_object (str): + Required. Name of the source object. + source_generation (int): + Optional. If present, selects a specific + revision of the source object (as opposed to the + latest version, the default). + rewrite_token (str): + Optional. Include this field (from the + previous rewrite response) on each rewrite + request after the first one, until the rewrite + response 'done' flag is true. Calls that provide + a rewriteToken can omit all other request + fields, but if included those fields must match + the values provided in the first rewrite + request. + destination_predefined_acl (str): + Optional. Apply a predefined set of access + controls to the destination object. Valid values + are "authenticatedRead", + "bucketOwnerFullControl", "bucketOwnerRead", + "private", "projectPrivate", or "publicRead". + if_generation_match (int): + Makes the operation conditional on whether + the object's current generation matches the + given value. Setting to 0 makes the operation + succeed only if there are no live versions of + the object. + + This field is a member of `oneof`_ ``_if_generation_match``. + if_generation_not_match (int): + Makes the operation conditional on whether + the object's live generation does not match the + given value. If no live object exists, the + precondition fails. Setting to 0 makes the + operation succeed only if there is a live + version of the object. + + This field is a member of `oneof`_ ``_if_generation_not_match``. + if_metageneration_match (int): + Makes the operation conditional on whether + the destination object's current metageneration + matches the given value. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + if_metageneration_not_match (int): + Makes the operation conditional on whether + the destination object's current metageneration + does not match the given value. + + This field is a member of `oneof`_ ``_if_metageneration_not_match``. + if_source_generation_match (int): + Makes the operation conditional on whether + the source object's live generation matches the + given value. + + This field is a member of `oneof`_ ``_if_source_generation_match``. + if_source_generation_not_match (int): + Makes the operation conditional on whether + the source object's live generation does not + match the given value. + + This field is a member of `oneof`_ ``_if_source_generation_not_match``. + if_source_metageneration_match (int): + Makes the operation conditional on whether + the source object's current metageneration + matches the given value. + + This field is a member of `oneof`_ ``_if_source_metageneration_match``. + if_source_metageneration_not_match (int): + Makes the operation conditional on whether + the source object's current metageneration does + not match the given value. + + This field is a member of `oneof`_ ``_if_source_metageneration_not_match``. + max_bytes_rewritten_per_call (int): + Optional. The maximum number of bytes that will be rewritten + per rewrite request. Most callers shouldn't need to specify + this parameter - it is primarily in place to support + testing. If specified the value must be an integral multiple + of 1 MiB (1048576). Also, this only applies to requests + where the source and destination span locations and/or + storage classes. Finally, this value must not change across + rewrite calls else you'll get an error that the + ``rewriteToken`` is invalid. + copy_source_encryption_algorithm (str): + Optional. The algorithm used to encrypt the + source object, if any. Used if the source object + was encrypted with a Customer-Supplied + Encryption Key. + copy_source_encryption_key_bytes (bytes): + Optional. The raw bytes (not base64-encoded) + AES-256 encryption key used to encrypt the + source object, if it was encrypted with a + Customer-Supplied Encryption Key. + copy_source_encryption_key_sha256_bytes (bytes): + Optional. The raw bytes (not base64-encoded) + SHA256 hash of the encryption key used to + encrypt the source object, if it was encrypted + with a Customer-Supplied Encryption Key. + common_object_request_params (google.cloud.storage_v2.types.CommonObjectRequestParams): + Optional. A set of parameters common to + Storage API requests concerning an object. + object_checksums (google.cloud.storage_v2.types.ObjectChecksums): + Optional. The checksums of the complete + object. This will be used to validate the + destination object after rewriting. + """ + + destination_name: str = proto.Field( + proto.STRING, + number=24, + ) + destination_bucket: str = proto.Field( + proto.STRING, + number=25, + ) + destination_kms_key: str = proto.Field( + proto.STRING, + number=27, + ) + destination: "Object" = proto.Field( + proto.MESSAGE, + number=1, + message="Object", + ) + source_bucket: str = proto.Field( + proto.STRING, + number=2, + ) + source_object: str = proto.Field( + proto.STRING, + number=3, + ) + source_generation: int = proto.Field( + proto.INT64, + number=4, + ) + rewrite_token: str = proto.Field( + proto.STRING, + number=5, + ) + destination_predefined_acl: str = proto.Field( + proto.STRING, + number=28, + ) + if_generation_match: int = proto.Field( + proto.INT64, + number=7, + optional=True, + ) + if_generation_not_match: int = proto.Field( + proto.INT64, + number=8, + optional=True, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=9, + optional=True, + ) + if_metageneration_not_match: int = proto.Field( + proto.INT64, + number=10, + optional=True, + ) + if_source_generation_match: int = proto.Field( + proto.INT64, + number=11, + optional=True, + ) + if_source_generation_not_match: int = proto.Field( + proto.INT64, + number=12, + optional=True, + ) + if_source_metageneration_match: int = proto.Field( + proto.INT64, + number=13, + optional=True, + ) + if_source_metageneration_not_match: int = proto.Field( + proto.INT64, + number=14, + optional=True, + ) + max_bytes_rewritten_per_call: int = proto.Field( + proto.INT64, + number=15, + ) + copy_source_encryption_algorithm: str = proto.Field( + proto.STRING, + number=16, + ) + copy_source_encryption_key_bytes: bytes = proto.Field( + proto.BYTES, + number=21, + ) + copy_source_encryption_key_sha256_bytes: bytes = proto.Field( + proto.BYTES, + number=22, + ) + common_object_request_params: "CommonObjectRequestParams" = proto.Field( + proto.MESSAGE, + number=19, + message="CommonObjectRequestParams", + ) + object_checksums: "ObjectChecksums" = proto.Field( + proto.MESSAGE, + number=29, + message="ObjectChecksums", + ) + + +class RewriteResponse(proto.Message): + r"""A rewrite response. + + Attributes: + total_bytes_rewritten (int): + The total bytes written so far, which can be + used to provide a waiting user with a progress + indicator. This property is always present in + the response. + object_size (int): + The total size of the object being copied in + bytes. This property is always present in the + response. + done (bool): + ``true`` if the copy is finished; otherwise, ``false`` if + the copy is in progress. This property is always present in + the response. + rewrite_token (str): + A token to use in subsequent requests to + continue copying data. This token is present in + the response only when there is more data to + copy. + resource (google.cloud.storage_v2.types.Object): + A resource containing the metadata for the + copied-to object. This property is present in + the response only when copying completes. + """ + + total_bytes_rewritten: int = proto.Field( + proto.INT64, + number=1, + ) + object_size: int = proto.Field( + proto.INT64, + number=2, + ) + done: bool = proto.Field( + proto.BOOL, + number=3, + ) + rewrite_token: str = proto.Field( + proto.STRING, + number=4, + ) + resource: "Object" = proto.Field( + proto.MESSAGE, + number=5, + message="Object", + ) + + +class MoveObjectRequest(proto.Message): + r"""Request message for MoveObject. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bucket (str): + Required. Name of the bucket in which the + object resides. + source_object (str): + Required. Name of the source object. + destination_object (str): + Required. Name of the destination object. + if_source_generation_match (int): + Optional. Makes the operation conditional on whether the + source object's current generation matches the given value. + ``if_source_generation_match`` and + ``if_source_generation_not_match`` conditions are mutually + exclusive: it's an error for both of them to be set in the + request. + + This field is a member of `oneof`_ ``_if_source_generation_match``. + if_source_generation_not_match (int): + Optional. Makes the operation conditional on whether the + source object's current generation does not match the given + value. ``if_source_generation_match`` and + ``if_source_generation_not_match`` conditions are mutually + exclusive: it's an error for both of them to be set in the + request. + + This field is a member of `oneof`_ ``_if_source_generation_not_match``. + if_source_metageneration_match (int): + Optional. Makes the operation conditional on whether the + source object's current metageneration matches the given + value. ``if_source_metageneration_match`` and + ``if_source_metageneration_not_match`` conditions are + mutually exclusive: it's an error for both of them to be set + in the request. + + This field is a member of `oneof`_ ``_if_source_metageneration_match``. + if_source_metageneration_not_match (int): + Optional. Makes the operation conditional on whether the + source object's current metageneration does not match the + given value. ``if_source_metageneration_match`` and + ``if_source_metageneration_not_match`` conditions are + mutually exclusive: it's an error for both of them to be set + in the request. + + This field is a member of `oneof`_ ``_if_source_metageneration_not_match``. + if_generation_match (int): + Optional. Makes the operation conditional on whether the + destination object's current generation matches the given + value. Setting to 0 makes the operation succeed only if + there are no live versions of the object. + ``if_generation_match`` and ``if_generation_not_match`` + conditions are mutually exclusive: it's an error for both of + them to be set in the request. + + This field is a member of `oneof`_ ``_if_generation_match``. + if_generation_not_match (int): + Optional. Makes the operation conditional on whether the + destination object's current generation does not match the + given value. If no live object exists, the precondition + fails. Setting to 0 makes the operation succeed only if + there is a live version of the object. + ``if_generation_match`` and ``if_generation_not_match`` + conditions are mutually exclusive: it's an error for both of + them to be set in the request. + + This field is a member of `oneof`_ ``_if_generation_not_match``. + if_metageneration_match (int): + Optional. Makes the operation conditional on whether the + destination object's current metageneration matches the + given value. ``if_metageneration_match`` and + ``if_metageneration_not_match`` conditions are mutually + exclusive: it's an error for both of them to be set in the + request. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + if_metageneration_not_match (int): + Optional. Makes the operation conditional on whether the + destination object's current metageneration does not match + the given value. ``if_metageneration_match`` and + ``if_metageneration_not_match`` conditions are mutually + exclusive: it's an error for both of them to be set in the + request. + + This field is a member of `oneof`_ ``_if_metageneration_not_match``. + """ + + bucket: str = proto.Field( + proto.STRING, + number=1, + ) + source_object: str = proto.Field( + proto.STRING, + number=2, + ) + destination_object: str = proto.Field( + proto.STRING, + number=3, + ) + if_source_generation_match: int = proto.Field( + proto.INT64, + number=4, + optional=True, + ) + if_source_generation_not_match: int = proto.Field( + proto.INT64, + number=5, + optional=True, + ) + if_source_metageneration_match: int = proto.Field( + proto.INT64, + number=6, + optional=True, + ) + if_source_metageneration_not_match: int = proto.Field( + proto.INT64, + number=7, + optional=True, + ) + if_generation_match: int = proto.Field( + proto.INT64, + number=8, + optional=True, + ) + if_generation_not_match: int = proto.Field( + proto.INT64, + number=9, + optional=True, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=10, + optional=True, + ) + if_metageneration_not_match: int = proto.Field( + proto.INT64, + number=11, + optional=True, + ) + + +class StartResumableWriteRequest(proto.Message): + r"""Request message StartResumableWrite. + + Attributes: + write_object_spec (google.cloud.storage_v2.types.WriteObjectSpec): + Required. Contains the information necessary + to start a resumable write. + common_object_request_params (google.cloud.storage_v2.types.CommonObjectRequestParams): + Optional. A set of parameters common to + Storage API requests related to an object. + object_checksums (google.cloud.storage_v2.types.ObjectChecksums): + Optional. The checksums of the complete object. This is used + to validate the uploaded object. For each upload, + ``object_checksums`` can be provided when initiating a + resumable upload with\ ``StartResumableWriteRequest`` or + when completing a write with ``WriteObjectRequest`` with + ``finish_write`` set to ``true``. + """ + + write_object_spec: "WriteObjectSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="WriteObjectSpec", + ) + common_object_request_params: "CommonObjectRequestParams" = proto.Field( + proto.MESSAGE, + number=3, + message="CommonObjectRequestParams", + ) + object_checksums: "ObjectChecksums" = proto.Field( + proto.MESSAGE, + number=5, + message="ObjectChecksums", + ) + + +class StartResumableWriteResponse(proto.Message): + r"""Response object for ``StartResumableWrite``. + + Attributes: + upload_id (str): + A unique identifier for the initiated resumable write + operation. As the ID grants write access, you should keep it + confidential during the upload to prevent unauthorized + access and data tampering during your upload. This ID should + be included in subsequent ``WriteObject`` requests to upload + the object data. + """ + + upload_id: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdateObjectRequest(proto.Message): + r"""Request message for UpdateObject. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + object_ (google.cloud.storage_v2.types.Object): + Required. The object to update. + The object's bucket and name fields are used to + identify the object to update. If present, the + object's generation field selects a specific + revision of this object whose metadata should be + updated. Otherwise, assumes the live version of + the object. + if_generation_match (int): + Makes the operation conditional on whether + the object's current generation matches the + given value. Setting to 0 makes the operation + succeed only if there are no live versions of + the object. + + This field is a member of `oneof`_ ``_if_generation_match``. + if_generation_not_match (int): + Makes the operation conditional on whether + the object's live generation does not match the + given value. If no live object exists, the + precondition fails. Setting to 0 makes the + operation succeed only if there is a live + version of the object. + + This field is a member of `oneof`_ ``_if_generation_not_match``. + if_metageneration_match (int): + Makes the operation conditional on whether + the object's current metageneration matches the + given value. + + This field is a member of `oneof`_ ``_if_metageneration_match``. + if_metageneration_not_match (int): + Makes the operation conditional on whether + the object's current metageneration does not + match the given value. + + This field is a member of `oneof`_ ``_if_metageneration_not_match``. + predefined_acl (str): + Optional. Apply a predefined set of access + controls to this object. Valid values are + "authenticatedRead", "bucketOwnerFullControl", + "bucketOwnerRead", "private", "projectPrivate", + or "publicRead". + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. List of fields to be updated. + + To specify ALL fields, equivalent to the JSON API's "update" + function, specify a single field with the value ``*``. Note: + not recommended. If a new field is introduced at a later + time, an older client updating with the ``*`` may + accidentally reset the new field's value. + + Not specifying any fields is an error. + common_object_request_params (google.cloud.storage_v2.types.CommonObjectRequestParams): + Optional. A set of parameters common to + Storage API requests concerning an object. + override_unlocked_retention (bool): + Optional. Overrides the unlocked retention + config on the object. + """ + + object_: "Object" = proto.Field( + proto.MESSAGE, + number=1, + message="Object", + ) + if_generation_match: int = proto.Field( + proto.INT64, + number=2, + optional=True, + ) + if_generation_not_match: int = proto.Field( + proto.INT64, + number=3, + optional=True, + ) + if_metageneration_match: int = proto.Field( + proto.INT64, + number=4, + optional=True, + ) + if_metageneration_not_match: int = proto.Field( + proto.INT64, + number=5, + optional=True, + ) + predefined_acl: str = proto.Field( + proto.STRING, + number=10, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=7, + message=field_mask_pb2.FieldMask, + ) + common_object_request_params: "CommonObjectRequestParams" = proto.Field( + proto.MESSAGE, + number=8, + message="CommonObjectRequestParams", + ) + override_unlocked_retention: bool = proto.Field( + proto.BOOL, + number=11, + ) + + +class CommonObjectRequestParams(proto.Message): + r"""Parameters that can be passed to any object request. + + Attributes: + encryption_algorithm (str): + Optional. Encryption algorithm used with the + Customer-Supplied Encryption Keys feature. + encryption_key_bytes (bytes): + Optional. Encryption key used with the + Customer-Supplied Encryption Keys feature. In + raw bytes format (not base64-encoded). + encryption_key_sha256_bytes (bytes): + Optional. SHA256 hash of encryption key used + with the Customer-Supplied Encryption Keys + feature. + """ + + encryption_algorithm: str = proto.Field( + proto.STRING, + number=1, + ) + encryption_key_bytes: bytes = proto.Field( + proto.BYTES, + number=4, + ) + encryption_key_sha256_bytes: bytes = proto.Field( + proto.BYTES, + number=5, + ) + + +class ServiceConstants(proto.Message): + r"""Shared constants.""" + + class Values(proto.Enum): + r"""A collection of constant values meaningful to the Storage + API. + + Values: + VALUES_UNSPECIFIED (0): + Unused. Proto3 requires first enum to be 0. + MAX_READ_CHUNK_BYTES (2097152): + The maximum size chunk that can will be + returned in a single ReadRequest. + 2 MiB. + MAX_WRITE_CHUNK_BYTES (2097152): + The maximum size chunk that can be sent in a + single WriteObjectRequest. 2 MiB. + MAX_OBJECT_SIZE_MB (5242880): + The maximum size of an object in MB - whether + written in a single stream or composed from + multiple other objects. 5 TiB. + MAX_CUSTOM_METADATA_FIELD_NAME_BYTES (1024): + The maximum length field name that can be + sent in a single custom metadata field. + 1 KiB. + MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES (4096): + The maximum length field value that can be sent in a single + custom_metadata field. 4 KiB. + MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES (8192): + The maximum total bytes that can be populated into all field + names and values of the custom_metadata for one object. 8 + KiB. + MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES (20480): + The maximum total bytes that can be populated + into all bucket metadata fields. + 20 KiB. + MAX_NOTIFICATION_CONFIGS_PER_BUCKET (100): + The maximum number of NotificationConfigs + that can be registered for a given bucket. + MAX_LIFECYCLE_RULES_PER_BUCKET (100): + The maximum number of LifecycleRules that can + be registered for a given bucket. + MAX_NOTIFICATION_CUSTOM_ATTRIBUTES (5): + The maximum number of custom attributes per + NotificationConfigs. + MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH (256): + The maximum length of a custom attribute key + included in NotificationConfig. + MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH (1024): + The maximum length of a custom attribute + value included in a NotificationConfig. + MAX_LABELS_ENTRIES_COUNT (64): + The maximum number of key/value entries per + bucket label. + MAX_LABELS_KEY_VALUE_LENGTH (63): + The maximum character length of the key or + value in a bucket label map. + MAX_LABELS_KEY_VALUE_BYTES (128): + The maximum byte size of the key or value in + a bucket label map. + MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST (1000): + The maximum number of object IDs that can be + included in a DeleteObjectsRequest. + SPLIT_TOKEN_MAX_VALID_DAYS (14): + The maximum number of days for which a token + returned by the GetListObjectsSplitPoints RPC is + valid. + """ + _pb_options = {"allow_alias": True} + VALUES_UNSPECIFIED = 0 + MAX_READ_CHUNK_BYTES = 2097152 + MAX_WRITE_CHUNK_BYTES = 2097152 + MAX_OBJECT_SIZE_MB = 5242880 + MAX_CUSTOM_METADATA_FIELD_NAME_BYTES = 1024 + MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES = 4096 + MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES = 8192 + MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES = 20480 + MAX_NOTIFICATION_CONFIGS_PER_BUCKET = 100 + MAX_LIFECYCLE_RULES_PER_BUCKET = 100 + MAX_NOTIFICATION_CUSTOM_ATTRIBUTES = 5 + MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH = 256 + MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH = 1024 + MAX_LABELS_ENTRIES_COUNT = 64 + MAX_LABELS_KEY_VALUE_LENGTH = 63 + MAX_LABELS_KEY_VALUE_BYTES = 128 + MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST = 1000 + SPLIT_TOKEN_MAX_VALID_DAYS = 14 + + +class Bucket(proto.Message): + r"""A bucket. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Identifier. The name of the bucket. Format: + ``projects/{project}/buckets/{bucket}`` + bucket_id (str): + Output only. The user-chosen part of the bucket name. The + ``{bucket}`` portion of the ``name`` field. For globally + unique buckets, this is equal to the "bucket name" of other + Cloud Storage APIs. Example: "pub". + etag (str): + The etag of the bucket. + If included in the metadata of an + UpdateBucketRequest, the operation will only be + performed if the etag matches that of the + bucket. + project (str): + Immutable. The project which owns this + bucket, in the format of + "projects/{projectIdentifier}". + {projectIdentifier} can be the project ID or + project number. Output values will always be in + project number format. + metageneration (int): + Output only. The metadata generation of this + bucket. + location (str): + Immutable. The location of the bucket. Object data for + objects in the bucket resides in physical storage within + this region. Defaults to ``US``. See the + [https://developers.google.com/storage/docs/concepts-techniques#specifyinglocations"][developer's + guide] for the authoritative list. Attempting to update this + field after the bucket is created will result in an error. + location_type (str): + Output only. The location type of the bucket + (region, dual-region, multi-region, etc). + storage_class (str): + Optional. The bucket's default storage class, used whenever + no storageClass is specified for a newly-created object. + This defines how objects in the bucket are stored and + determines the SLA and the cost of storage. If this value is + not specified when the bucket is created, it will default to + ``STANDARD``. For more information, see + https://developers.google.com/storage/docs/storage-classes. + rpo (str): + Optional. The recovery point objective for cross-region + replication of the bucket. Applicable only for dual- and + multi-region buckets. "DEFAULT" uses default replication. + "ASYNC_TURBO" enables turbo replication, valid for + dual-region buckets only. If rpo is not specified when the + bucket is created, it defaults to "DEFAULT". For more + information, see + https://cloud.google.com/storage/docs/availability-durability#turbo-replication. + acl (MutableSequence[google.cloud.storage_v2.types.BucketAccessControl]): + Optional. Access controls on the bucket. If + iam_config.uniform_bucket_level_access is enabled on this + bucket, requests to set, read, or modify acl is an error. + default_object_acl (MutableSequence[google.cloud.storage_v2.types.ObjectAccessControl]): + Optional. Default access controls to apply to new objects + when no ACL is provided. If + iam_config.uniform_bucket_level_access is enabled on this + bucket, requests to set, read, or modify acl is an error. + lifecycle (google.cloud.storage_v2.types.Bucket.Lifecycle): + Optional. The bucket's lifecycle config. See + [https://developers.google.com/storage/docs/lifecycle]Lifecycle + Management] for more information. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The creation time of the bucket. + cors (MutableSequence[google.cloud.storage_v2.types.Bucket.Cors]): + Optional. The bucket's + [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] + (CORS) config. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The modification time of the + bucket. + default_event_based_hold (bool): + Optional. The default value for event-based + hold on newly created objects in this bucket. + Event-based hold is a way to retain objects + indefinitely until an event occurs, signified by + the hold's release. After being released, such + objects will be subject to bucket-level + retention (if any). One sample use case of this + flag is for banks to hold loan documents for at + least 3 years after loan is paid in full. Here, + bucket-level retention is 3 years and the event + is loan being paid in full. In this example, + these objects will be held intact for any number + of years until the event has occurred + (event-based hold on the object is released) and + then 3 more years after that. That means + retention duration of the objects begins from + the moment event-based hold transitioned from + true to false. Objects under event-based hold + cannot be deleted, overwritten or archived until + the hold is removed. + labels (MutableMapping[str, str]): + Optional. User-provided labels, in key/value + pairs. + website (google.cloud.storage_v2.types.Bucket.Website): + Optional. The bucket's website config, controlling how the + service behaves when accessing bucket contents as a web + site. See the + [https://cloud.google.com/storage/docs/static-website][Static + Website Examples] for more information. + versioning (google.cloud.storage_v2.types.Bucket.Versioning): + Optional. The bucket's versioning config. + logging (google.cloud.storage_v2.types.Bucket.Logging): + Optional. The bucket's logging config, which + defines the destination bucket and name prefix + (if any) for the current bucket's logs. + owner (google.cloud.storage_v2.types.Owner): + Output only. The owner of the bucket. This is + always the project team's owner group. + encryption (google.cloud.storage_v2.types.Bucket.Encryption): + Optional. Encryption config for a bucket. + billing (google.cloud.storage_v2.types.Bucket.Billing): + Optional. The bucket's billing config. + retention_policy (google.cloud.storage_v2.types.Bucket.RetentionPolicy): + Optional. The bucket's retention policy. The retention + policy enforces a minimum retention time for all objects + contained in the bucket, based on their creation time. Any + attempt to overwrite or delete objects younger than the + retention period will result in a PERMISSION_DENIED error. + An unlocked retention policy can be modified or removed from + the bucket via a storage.buckets.update operation. A locked + retention policy cannot be removed or shortened in duration + for the lifetime of the bucket. Attempting to remove or + decrease period of a locked retention policy will result in + a PERMISSION_DENIED error. + iam_config (google.cloud.storage_v2.types.Bucket.IamConfig): + Optional. The bucket's IAM config. + satisfies_pzs (bool): + Optional. Reserved for future use. + custom_placement_config (google.cloud.storage_v2.types.Bucket.CustomPlacementConfig): + Optional. Configuration that, if present, specifies the data + placement for a + [https://cloud.google.com/storage/docs/locations#location-dr][configurable + dual-region]. + autoclass (google.cloud.storage_v2.types.Bucket.Autoclass): + Optional. The bucket's Autoclass + configuration. If there is no configuration, the + Autoclass feature will be disabled and have no + effect on the bucket. + hierarchical_namespace (google.cloud.storage_v2.types.Bucket.HierarchicalNamespace): + Optional. The bucket's hierarchical namespace + configuration. If there is no configuration, the + hierarchical namespace feature will be disabled + and have no effect on the bucket. + soft_delete_policy (google.cloud.storage_v2.types.Bucket.SoftDeletePolicy): + Optional. The bucket's soft delete policy. + The soft delete policy prevents soft-deleted + objects from being permanently deleted. + object_retention (google.cloud.storage_v2.types.Bucket.ObjectRetention): + Optional. The bucket's object retention + configuration. Must be enabled before objects in + the bucket may have retention configured. + ip_filter (google.cloud.storage_v2.types.Bucket.IpFilter): + Optional. The bucket's IP filter + configuration. + + This field is a member of `oneof`_ ``_ip_filter``. + """ + + class Billing(proto.Message): + r"""Billing properties of a bucket. + + Attributes: + requester_pays (bool): + Optional. When set to true, Requester Pays is + enabled for this bucket. + """ + + requester_pays: bool = proto.Field( + proto.BOOL, + number=1, + ) + + class Cors(proto.Message): + r"""Cross-Origin Response sharing (CORS) properties for a bucket. + For more on Cloud Storage and CORS, see + https://cloud.google.com/storage/docs/cross-origin. For more on + CORS in general, see https://tools.ietf.org/html/rfc6454. + + Attributes: + origin (MutableSequence[str]): + Optional. The list of Origins eligible to receive CORS + response headers. See + [https://tools.ietf.org/html/rfc6454][RFC 6454] for more on + origins. Note: "*" is permitted in the list of origins, and + means "any Origin". + method (MutableSequence[str]): + Optional. The list of HTTP methods on which to include CORS + response headers, (``GET``, ``OPTIONS``, ``POST``, etc) + Note: "*" is permitted in the list of methods, and means + "any method". + response_header (MutableSequence[str]): + Optional. The list of HTTP headers other than the + [https://www.w3.org/TR/cors/#simple-response-header][simple + response headers] to give permission for the user-agent to + share across domains. + max_age_seconds (int): + Optional. The value, in seconds, to return in the + [https://www.w3.org/TR/cors/#access-control-max-age-response-header][Access-Control-Max-Age + header] used in preflight responses. + """ + + origin: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + method: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=2, + ) + response_header: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) + max_age_seconds: int = proto.Field( + proto.INT32, + number=4, + ) + + class Encryption(proto.Message): + r"""Encryption properties of a bucket. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + default_kms_key (str): + Optional. The name of the Cloud KMS key that + will be used to encrypt objects inserted into + this bucket, if no encryption method is + specified. + google_managed_encryption_enforcement_config (google.cloud.storage_v2.types.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig): + Optional. If omitted, then new objects with + GMEK encryption-type is allowed. If set, then + new objects created in this bucket must comply + with enforcement config. Changing this has no + effect on existing objects; it applies to new + objects only. + + This field is a member of `oneof`_ ``_google_managed_encryption_enforcement_config``. + customer_managed_encryption_enforcement_config (google.cloud.storage_v2.types.Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig): + Optional. If omitted, then new objects with + CMEK encryption-type is allowed. If set, then + new objects created in this bucket must comply + with enforcement config. Changing this has no + effect on existing objects; it applies to new + objects only. + + This field is a member of `oneof`_ ``_customer_managed_encryption_enforcement_config``. + customer_supplied_encryption_enforcement_config (google.cloud.storage_v2.types.Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig): + Optional. If omitted, then new objects with + CSEK encryption-type is allowed. If set, then + new objects created in this bucket must comply + with enforcement config. Changing this has no + effect on existing objects; it applies to new + objects only. + + This field is a member of `oneof`_ ``_customer_supplied_encryption_enforcement_config``. + """ + + class GoogleManagedEncryptionEnforcementConfig(proto.Message): + r"""Google Managed Encryption (GMEK) enforcement config of a + bucket. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + restriction_mode (str): + Restriction mode for google-managed encryption for new + objects within the bucket. Valid values are: + "NotRestricted", "FullyRestricted". If ``NotRestricted`` or + unset, creation of new objects with google-managed + encryption is allowed. If ``FullyRestricted``, new objects + can't be created using google-managed encryption. + + This field is a member of `oneof`_ ``_restriction_mode``. + effective_time (google.protobuf.timestamp_pb2.Timestamp): + Time from which the config was effective. + This is service-provided. + + This field is a member of `oneof`_ ``_effective_time``. + """ + + restriction_mode: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + effective_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + optional=True, + message=timestamp_pb2.Timestamp, + ) + + class CustomerManagedEncryptionEnforcementConfig(proto.Message): + r"""Customer Managed Encryption (CMEK) enforcement config of a + bucket. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + restriction_mode (str): + Restriction mode for customer-managed encryption for new + objects within the bucket. Valid values are: + "NotRestricted", "FullyRestricted". If ``NotRestricted`` or + unset, creation of new objects with customer-managed + encryption is allowed. If ``FullyRestricted``, new objects + can't be created using customer-managed encryption. + + This field is a member of `oneof`_ ``_restriction_mode``. + effective_time (google.protobuf.timestamp_pb2.Timestamp): + Time from which the config was effective. + This is service-provided. + + This field is a member of `oneof`_ ``_effective_time``. + """ + + restriction_mode: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + effective_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + optional=True, + message=timestamp_pb2.Timestamp, + ) + + class CustomerSuppliedEncryptionEnforcementConfig(proto.Message): + r"""Customer Supplied Encryption (CSEK) enforcement config of a + bucket. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + restriction_mode (str): + Restriction mode for customer-supplied encryption for new + objects within the bucket. Valid values are: + "NotRestricted", "FullyRestricted". If ``NotRestricted`` or + unset, creation of new objects with customer-supplied + encryption is allowed. If ``FullyRestricted``, new objects + can't be created using customer-supplied encryption. + + This field is a member of `oneof`_ ``_restriction_mode``. + effective_time (google.protobuf.timestamp_pb2.Timestamp): + Time from which the config was effective. + This is service-provided. + + This field is a member of `oneof`_ ``_effective_time``. + """ + + restriction_mode: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + effective_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + optional=True, + message=timestamp_pb2.Timestamp, + ) + + default_kms_key: str = proto.Field( + proto.STRING, + number=1, + ) + google_managed_encryption_enforcement_config: "Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig" = proto.Field( + proto.MESSAGE, + number=2, + optional=True, + message="Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig", + ) + customer_managed_encryption_enforcement_config: "Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig" = proto.Field( + proto.MESSAGE, + number=3, + optional=True, + message="Bucket.Encryption.CustomerManagedEncryptionEnforcementConfig", + ) + customer_supplied_encryption_enforcement_config: "Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig" = proto.Field( + proto.MESSAGE, + number=4, + optional=True, + message="Bucket.Encryption.CustomerSuppliedEncryptionEnforcementConfig", + ) + + class IamConfig(proto.Message): + r"""Bucket restriction options. + + Attributes: + uniform_bucket_level_access (google.cloud.storage_v2.types.Bucket.IamConfig.UniformBucketLevelAccess): + Optional. Bucket restriction options + currently enforced on the bucket. + public_access_prevention (str): + Optional. Whether IAM will enforce public + access prevention. Valid values are "enforced" + or "inherited". + """ + + class UniformBucketLevelAccess(proto.Message): + r"""Settings for Uniform Bucket level access. + See + https://cloud.google.com/storage/docs/uniform-bucket-level-access. + + Attributes: + enabled (bool): + Optional. If set, access checks only use + bucket-level IAM policies or above. + lock_time (google.protobuf.timestamp_pb2.Timestamp): + Optional. The deadline time for changing + ``iam_config.uniform_bucket_level_access.enabled`` from + ``true`` to ``false``. Mutable until the specified deadline + is reached, but not afterward. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + lock_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + + uniform_bucket_level_access: "Bucket.IamConfig.UniformBucketLevelAccess" = ( + proto.Field( + proto.MESSAGE, + number=1, + message="Bucket.IamConfig.UniformBucketLevelAccess", + ) + ) + public_access_prevention: str = proto.Field( + proto.STRING, + number=3, + ) + + class Lifecycle(proto.Message): + r"""Lifecycle properties of a bucket. + For more information, see + https://cloud.google.com/storage/docs/lifecycle. + + Attributes: + rule (MutableSequence[google.cloud.storage_v2.types.Bucket.Lifecycle.Rule]): + Optional. A lifecycle management rule, which + is made of an action to take and the + condition(s) under which the action will be + taken. + """ + + class Rule(proto.Message): + r"""A lifecycle Rule, combining an action to take on an object + and a condition which will trigger that action. + + Attributes: + action (google.cloud.storage_v2.types.Bucket.Lifecycle.Rule.Action): + Optional. The action to take. + condition (google.cloud.storage_v2.types.Bucket.Lifecycle.Rule.Condition): + Optional. The condition(s) under which the + action will be taken. + """ + + class Action(proto.Message): + r"""An action to take on an object. + + Attributes: + type_ (str): + Optional. Type of the action. Currently, only ``Delete``, + ``SetStorageClass``, and ``AbortIncompleteMultipartUpload`` + are supported. + storage_class (str): + Optional. Target storage class. Required iff + the type of the action is SetStorageClass. + """ + + type_: str = proto.Field( + proto.STRING, + number=1, + ) + storage_class: str = proto.Field( + proto.STRING, + number=2, + ) + + class Condition(proto.Message): + r"""A condition of an object which triggers some action. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + age_days (int): + Age of an object (in days). This condition is + satisfied when an object reaches the specified + age. A value of 0 indicates that all objects + immediately match this condition. + + This field is a member of `oneof`_ ``_age_days``. + created_before (google.type.date_pb2.Date): + Optional. This condition is satisfied when an + object is created before midnight of the + specified date in UTC. + is_live (bool): + Relevant only for versioned objects. If the value is + ``true``, this condition matches live objects; if the value + is ``false``, it matches archived objects. + + This field is a member of `oneof`_ ``_is_live``. + num_newer_versions (int): + Relevant only for versioned objects. If the + value is N, this condition is satisfied when + there are at least N versions (including the + live version) newer than this version of the + object. + + This field is a member of `oneof`_ ``_num_newer_versions``. + matches_storage_class (MutableSequence[str]): + Optional. Objects having any of the storage classes + specified by this condition will be matched. Values include + ``MULTI_REGIONAL``, ``REGIONAL``, ``NEARLINE``, + ``COLDLINE``, ``STANDARD``, and + ``DURABLE_REDUCED_AVAILABILITY``. + days_since_custom_time (int): + Number of days that have elapsed since the + custom timestamp set on an object. + The value of the field must be a nonnegative + integer. + + This field is a member of `oneof`_ ``_days_since_custom_time``. + custom_time_before (google.type.date_pb2.Date): + Optional. An object matches this condition if + the custom timestamp set on the object is before + the specified date in UTC. + days_since_noncurrent_time (int): + This condition is relevant only for versioned + objects. An object version satisfies this + condition only if these many days have been + passed since it became noncurrent. The value of + the field must be a nonnegative integer. If it's + zero, the object version will become eligible + for Lifecycle action as soon as it becomes + noncurrent. + + This field is a member of `oneof`_ ``_days_since_noncurrent_time``. + noncurrent_time_before (google.type.date_pb2.Date): + Optional. This condition is relevant only for + versioned objects. An object version satisfies + this condition only if it became noncurrent + before the specified date in UTC. + matches_prefix (MutableSequence[str]): + Optional. List of object name prefixes. If + any prefix exactly matches the beginning of the + object name, the condition evaluates to true. + matches_suffix (MutableSequence[str]): + Optional. List of object name suffixes. If + any suffix exactly matches the end of the object + name, the condition evaluates to true. + """ + + age_days: int = proto.Field( + proto.INT32, + number=1, + optional=True, + ) + created_before: date_pb2.Date = proto.Field( + proto.MESSAGE, + number=2, + message=date_pb2.Date, + ) + is_live: bool = proto.Field( + proto.BOOL, + number=3, + optional=True, + ) + num_newer_versions: int = proto.Field( + proto.INT32, + number=4, + optional=True, + ) + matches_storage_class: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=5, + ) + days_since_custom_time: int = proto.Field( + proto.INT32, + number=7, + optional=True, + ) + custom_time_before: date_pb2.Date = proto.Field( + proto.MESSAGE, + number=8, + message=date_pb2.Date, + ) + days_since_noncurrent_time: int = proto.Field( + proto.INT32, + number=9, + optional=True, + ) + noncurrent_time_before: date_pb2.Date = proto.Field( + proto.MESSAGE, + number=10, + message=date_pb2.Date, + ) + matches_prefix: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=11, + ) + matches_suffix: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=12, + ) + + action: "Bucket.Lifecycle.Rule.Action" = proto.Field( + proto.MESSAGE, + number=1, + message="Bucket.Lifecycle.Rule.Action", + ) + condition: "Bucket.Lifecycle.Rule.Condition" = proto.Field( + proto.MESSAGE, + number=2, + message="Bucket.Lifecycle.Rule.Condition", + ) + + rule: MutableSequence["Bucket.Lifecycle.Rule"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Bucket.Lifecycle.Rule", + ) + + class Logging(proto.Message): + r"""Logging-related properties of a bucket. + + Attributes: + log_bucket (str): + Optional. The destination bucket where the current bucket's + logs should be placed, using path format (like + ``projects/123456/buckets/foo``). + log_object_prefix (str): + Optional. A prefix for log object names. + """ + + log_bucket: str = proto.Field( + proto.STRING, + number=1, + ) + log_object_prefix: str = proto.Field( + proto.STRING, + number=2, + ) + + class ObjectRetention(proto.Message): + r"""Object Retention related properties of a bucket. + + Attributes: + enabled (bool): + Optional. Output only. If true, object + retention is enabled for the bucket. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + + class RetentionPolicy(proto.Message): + r"""Retention policy properties of a bucket. + + Attributes: + effective_time (google.protobuf.timestamp_pb2.Timestamp): + Optional. Server-determined value that + indicates the time from which policy was + enforced and effective. + is_locked (bool): + Optional. Once locked, an object retention + policy cannot be modified. + retention_duration (google.protobuf.duration_pb2.Duration): + Optional. The duration that objects need to be retained. + Retention duration must be greater than zero and less than + 100 years. Note that enforcement of retention periods less + than a day is not guaranteed. Such periods should only be + used for testing purposes. Any ``nanos`` value specified + will be rounded down to the nearest second. + """ + + effective_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + is_locked: bool = proto.Field( + proto.BOOL, + number=2, + ) + retention_duration: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + + class SoftDeletePolicy(proto.Message): + r"""Soft delete policy properties of a bucket. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + retention_duration (google.protobuf.duration_pb2.Duration): + The period of time that soft-deleted objects + in the bucket must be retained and cannot be + permanently deleted. The duration must be + greater than or equal to 7 days and less than 1 + year. + + This field is a member of `oneof`_ ``_retention_duration``. + effective_time (google.protobuf.timestamp_pb2.Timestamp): + Time from which the policy was effective. + This is service-provided. + + This field is a member of `oneof`_ ``_effective_time``. + """ + + retention_duration: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + optional=True, + message=duration_pb2.Duration, + ) + effective_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + optional=True, + message=timestamp_pb2.Timestamp, + ) + + class Versioning(proto.Message): + r"""Properties of a bucket related to versioning. + For more on Cloud Storage versioning, see + https://cloud.google.com/storage/docs/object-versioning. + + Attributes: + enabled (bool): + Optional. While set to true, versioning is + fully enabled for this bucket. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + + class Website(proto.Message): + r"""Properties of a bucket related to accessing the contents as a + static website. For more on hosting a static website via Cloud + Storage, see + https://cloud.google.com/storage/docs/hosting-static-website. + + Attributes: + main_page_suffix (str): + Optional. If the requested object path is missing, the + service will ensure the path has a trailing '/', append this + suffix, and attempt to retrieve the resulting object. This + allows the creation of ``index.html`` objects to represent + directory pages. + not_found_page (str): + Optional. If the requested object path is missing, and any + ``mainPageSuffix`` object is missing, if applicable, the + service will return the named object from this bucket as the + content for a + [https://tools.ietf.org/html/rfc7231#section-6.5.4][404 Not + Found] result. + """ + + main_page_suffix: str = proto.Field( + proto.STRING, + number=1, + ) + not_found_page: str = proto.Field( + proto.STRING, + number=2, + ) + + class CustomPlacementConfig(proto.Message): + r"""Configuration for Custom Dual Regions. It should specify precisely + two eligible regions within the same Multiregion. More information + on regions may be found + `here `__. + + Attributes: + data_locations (MutableSequence[str]): + Optional. List of locations to use for data + placement. + """ + + data_locations: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + class Autoclass(proto.Message): + r"""Configuration for a bucket's Autoclass feature. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + enabled (bool): + Optional. Enables Autoclass. + toggle_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Latest instant at which the ``enabled`` field + was set to true after being disabled/unconfigured or set to + false after being enabled. If Autoclass is enabled when the + bucket is created, the toggle_time is set to the bucket + creation time. + terminal_storage_class (str): + An object in an Autoclass bucket will + eventually cool down to the terminal storage + class if there is no access to the object. The + only valid values are NEARLINE and ARCHIVE. + + This field is a member of `oneof`_ ``_terminal_storage_class``. + terminal_storage_class_update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Latest instant at which the + autoclass terminal storage class was updated. + + This field is a member of `oneof`_ ``_terminal_storage_class_update_time``. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + toggle_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + terminal_storage_class: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + terminal_storage_class_update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + optional=True, + message=timestamp_pb2.Timestamp, + ) + + class IpFilter(proto.Message): + r"""The `bucket IP + filtering `__ + configuration. Specifies the network sources that can access the + bucket, as well as its underlying objects. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + mode (str): + The state of the IP filter configuration. Valid values are + ``Enabled`` and ``Disabled``. When set to ``Enabled``, IP + filtering rules are applied to a bucket and all incoming + requests to the bucket are evaluated against these rules. + When set to ``Disabled``, IP filtering rules are not applied + to a bucket.". + + This field is a member of `oneof`_ ``_mode``. + public_network_source (google.cloud.storage_v2.types.Bucket.IpFilter.PublicNetworkSource): + Public IPs allowed to operate or access the + bucket. + + This field is a member of `oneof`_ ``_public_network_source``. + vpc_network_sources (MutableSequence[google.cloud.storage_v2.types.Bucket.IpFilter.VpcNetworkSource]): + Optional. The list of network sources that + are allowed to access operations on the bucket + or the underlying objects. + allow_cross_org_vpcs (bool): + Optional. Whether or not to allow VPCs from + orgs different than the bucket's parent org to + access the bucket. When set to true, validations + on the existence of the VPCs won't be performed. + If set to false, each VPC network source will be + checked to belong to the same org as the bucket + as well as validated for existence. + allow_all_service_agent_access (bool): + Whether or not to allow all P4SA access to + the bucket. When set to true, IP filter config + validation will not apply. + + This field is a member of `oneof`_ ``_allow_all_service_agent_access``. + """ + + class PublicNetworkSource(proto.Message): + r"""The public network IP address ranges that can access the + bucket and its data. + + Attributes: + allowed_ip_cidr_ranges (MutableSequence[str]): + Optional. The list of IPv4 and IPv6 cidr + blocks that are allowed to operate or access the + bucket and its underlying objects. + """ + + allowed_ip_cidr_ranges: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + class VpcNetworkSource(proto.Message): + r"""The list of VPC networks that can access the bucket. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + network (str): + Name of the network. + + Format: ``projects/PROJECT_ID/global/networks/NETWORK_NAME`` + + This field is a member of `oneof`_ ``_network``. + allowed_ip_cidr_ranges (MutableSequence[str]): + Optional. The list of public or private IPv4 and IPv6 CIDR + ranges that can access the bucket. In the CIDR IP address + block, the specified IP address must be properly truncated, + meaning all the host bits must be zero or else the input is + considered malformed. For example, ``192.0.2.0/24`` is + accepted but ``192.0.2.1/24`` is not. Similarly, for IPv6, + ``2001:db8::/32`` is accepted whereas ``2001:db8::1/32`` is + not. + """ + + network: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + allowed_ip_cidr_ranges: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=2, + ) + + mode: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + public_network_source: "Bucket.IpFilter.PublicNetworkSource" = proto.Field( + proto.MESSAGE, + number=2, + optional=True, + message="Bucket.IpFilter.PublicNetworkSource", + ) + vpc_network_sources: MutableSequence[ + "Bucket.IpFilter.VpcNetworkSource" + ] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message="Bucket.IpFilter.VpcNetworkSource", + ) + allow_cross_org_vpcs: bool = proto.Field( + proto.BOOL, + number=4, + ) + allow_all_service_agent_access: bool = proto.Field( + proto.BOOL, + number=5, + optional=True, + ) + + class HierarchicalNamespace(proto.Message): + r"""Configuration for a bucket's hierarchical namespace feature. + + Attributes: + enabled (bool): + Optional. Enables the hierarchical namespace + feature. + """ + + enabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + + name: str = proto.Field( + proto.STRING, + number=1, + ) + bucket_id: str = proto.Field( + proto.STRING, + number=2, + ) + etag: str = proto.Field( + proto.STRING, + number=29, + ) + project: str = proto.Field( + proto.STRING, + number=3, + ) + metageneration: int = proto.Field( + proto.INT64, + number=4, + ) + location: str = proto.Field( + proto.STRING, + number=5, + ) + location_type: str = proto.Field( + proto.STRING, + number=6, + ) + storage_class: str = proto.Field( + proto.STRING, + number=7, + ) + rpo: str = proto.Field( + proto.STRING, + number=27, + ) + acl: MutableSequence["BucketAccessControl"] = proto.RepeatedField( + proto.MESSAGE, + number=8, + message="BucketAccessControl", + ) + default_object_acl: MutableSequence["ObjectAccessControl"] = proto.RepeatedField( + proto.MESSAGE, + number=9, + message="ObjectAccessControl", + ) + lifecycle: Lifecycle = proto.Field( + proto.MESSAGE, + number=10, + message=Lifecycle, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + cors: MutableSequence[Cors] = proto.RepeatedField( + proto.MESSAGE, + number=12, + message=Cors, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + default_event_based_hold: bool = proto.Field( + proto.BOOL, + number=14, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=15, + ) + website: Website = proto.Field( + proto.MESSAGE, + number=16, + message=Website, + ) + versioning: Versioning = proto.Field( + proto.MESSAGE, + number=17, + message=Versioning, + ) + logging: Logging = proto.Field( + proto.MESSAGE, + number=18, + message=Logging, + ) + owner: "Owner" = proto.Field( + proto.MESSAGE, + number=19, + message="Owner", + ) + encryption: Encryption = proto.Field( + proto.MESSAGE, + number=20, + message=Encryption, + ) + billing: Billing = proto.Field( + proto.MESSAGE, + number=21, + message=Billing, + ) + retention_policy: RetentionPolicy = proto.Field( + proto.MESSAGE, + number=22, + message=RetentionPolicy, + ) + iam_config: IamConfig = proto.Field( + proto.MESSAGE, + number=23, + message=IamConfig, + ) + satisfies_pzs: bool = proto.Field( + proto.BOOL, + number=25, + ) + custom_placement_config: CustomPlacementConfig = proto.Field( + proto.MESSAGE, + number=26, + message=CustomPlacementConfig, + ) + autoclass: Autoclass = proto.Field( + proto.MESSAGE, + number=28, + message=Autoclass, + ) + hierarchical_namespace: HierarchicalNamespace = proto.Field( + proto.MESSAGE, + number=32, + message=HierarchicalNamespace, + ) + soft_delete_policy: SoftDeletePolicy = proto.Field( + proto.MESSAGE, + number=31, + message=SoftDeletePolicy, + ) + object_retention: ObjectRetention = proto.Field( + proto.MESSAGE, + number=33, + message=ObjectRetention, + ) + ip_filter: IpFilter = proto.Field( + proto.MESSAGE, + number=38, + optional=True, + message=IpFilter, + ) + + +class BucketAccessControl(proto.Message): + r"""An access-control entry. + + Attributes: + role (str): + Optional. The access permission for the + entity. + id (str): + Optional. The ID of the access-control entry. + entity (str): + Optional. The entity holding the permission, in one of the + following forms: + + - ``user-{userid}`` + - ``user-{email}`` + - ``group-{groupid}`` + - ``group-{email}`` + - ``domain-{domain}`` + - ``project-{team}-{projectnumber}`` + - ``project-{team}-{projectid}`` + - ``allUsers`` + - ``allAuthenticatedUsers`` Examples: + - The user ``liz@example.com`` would be + ``user-liz@example.com``. + - The group ``example@googlegroups.com`` would be + ``group-example@googlegroups.com`` + - All members of the Google Apps for Business domain + ``example.com`` would be ``domain-example.com`` For + project entities, ``project-{team}-{projectnumber}`` + format will be returned on response. + entity_alt (str): + Output only. The alternative entity format, if exists. For + project entities, ``project-{team}-{projectid}`` format will + be returned on response. + entity_id (str): + Optional. The ID for the entity, if any. + etag (str): + Optional. The etag of the + BucketAccessControl. If included in the metadata + of an update or delete request message, the + operation operation will only be performed if + the etag matches that of the bucket's + BucketAccessControl. + email (str): + Optional. The email address associated with + the entity, if any. + domain (str): + Optional. The domain associated with the + entity, if any. + project_team (google.cloud.storage_v2.types.ProjectTeam): + Optional. The project team associated with + the entity, if any. + """ + + role: str = proto.Field( + proto.STRING, + number=1, + ) + id: str = proto.Field( + proto.STRING, + number=2, + ) + entity: str = proto.Field( + proto.STRING, + number=3, + ) + entity_alt: str = proto.Field( + proto.STRING, + number=9, + ) + entity_id: str = proto.Field( + proto.STRING, + number=4, + ) + etag: str = proto.Field( + proto.STRING, + number=8, + ) + email: str = proto.Field( + proto.STRING, + number=5, + ) + domain: str = proto.Field( + proto.STRING, + number=6, + ) + project_team: "ProjectTeam" = proto.Field( + proto.MESSAGE, + number=7, + message="ProjectTeam", + ) + + +class ChecksummedData(proto.Message): + r"""Message used to convey content being read or written, along + with an optional checksum. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + content (bytes): + Optional. The data. + crc32c (int): + If set, the CRC32C digest of the content + field. + + This field is a member of `oneof`_ ``_crc32c``. + """ + + content: bytes = proto.Field( + proto.BYTES, + number=1, + ) + crc32c: int = proto.Field( + proto.FIXED32, + number=2, + optional=True, + ) + + +class ObjectChecksums(proto.Message): + r"""Message used for storing full (not subrange) object + checksums. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + crc32c (int): + CRC32C digest of the object data. Computed by + the Cloud Storage service for all written + objects. If set in a WriteObjectRequest, service + will validate that the stored object matches + this checksum. + + This field is a member of `oneof`_ ``_crc32c``. + md5_hash (bytes): + Optional. 128 bit MD5 hash of the object data. For more + information about using the MD5 hash, see + [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes + and ETags: Best Practices]. Not all objects will provide an + MD5 hash. For example, composite objects provide only crc32c + hashes. This value is equivalent to running + ``cat object.txt | openssl md5 -binary`` + """ + + crc32c: int = proto.Field( + proto.FIXED32, + number=1, + optional=True, + ) + md5_hash: bytes = proto.Field( + proto.BYTES, + number=2, + ) + + +class ObjectCustomContextPayload(proto.Message): + r"""The payload of a single user-defined object context. + + Attributes: + value (str): + Required. The value of the object context. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time at which the object + context was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time at which the object + context was last updated. + """ + + value: str = proto.Field( + proto.STRING, + number=1, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class ObjectContexts(proto.Message): + r"""All contexts of an object grouped by type. + + Attributes: + custom (MutableMapping[str, google.cloud.storage_v2.types.ObjectCustomContextPayload]): + Optional. User-defined object contexts. + """ + + custom: MutableMapping[str, "ObjectCustomContextPayload"] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message="ObjectCustomContextPayload", + ) + + +class CustomerEncryption(proto.Message): + r"""Describes the Customer-Supplied Encryption Key mechanism used + to store an Object's data at rest. + + Attributes: + encryption_algorithm (str): + Optional. The encryption algorithm. + key_sha256_bytes (bytes): + Optional. SHA256 hash value of the encryption + key. In raw bytes format (not base64-encoded). + """ + + encryption_algorithm: str = proto.Field( + proto.STRING, + number=1, + ) + key_sha256_bytes: bytes = proto.Field( + proto.BYTES, + number=3, + ) + + +class Object(proto.Message): + r"""An object. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Immutable. The name of this object. Nearly any sequence of + unicode characters is valid. See + `Guidelines `__. + Example: ``test.txt`` The ``name`` field by itself does not + uniquely identify a Cloud Storage object. A Cloud Storage + object is uniquely identified by the tuple of (bucket, + object, generation). + bucket (str): + Immutable. The name of the bucket containing + this object. + etag (str): + Optional. The etag of the object. + If included in the metadata of an update or + delete request message, the operation will only + be performed if the etag matches that of the + live object. + generation (int): + Immutable. The content generation of this + object. Used for object versioning. + restore_token (str): + Output only. Restore token used to + differentiate deleted objects with the same name + and generation. This field is output only, and + only set for deleted objects in HNS buckets. + + This field is a member of `oneof`_ ``_restore_token``. + metageneration (int): + Output only. The version of the metadata for + this generation of this object. Used for + preconditions and for detecting changes in + metadata. A metageneration number is only + meaningful in the context of a particular + generation of a particular object. + storage_class (str): + Optional. Storage class of the object. + size (int): + Output only. Content-Length of the object data in bytes, + matching + [https://tools.ietf.org/html/rfc7230#section-3.3.2][RFC 7230 + §3.3.2]. + content_encoding (str): + Optional. Content-Encoding of the object data, matching + [https://tools.ietf.org/html/rfc7231#section-3.1.2.2][RFC + 7231 §3.1.2.2] + content_disposition (str): + Optional. Content-Disposition of the object data, matching + [https://tools.ietf.org/html/rfc6266][RFC 6266]. + cache_control (str): + Optional. Cache-Control directive for the object data, + matching + [https://tools.ietf.org/html/rfc7234#section-5.2"][RFC 7234 + §5.2]. If omitted, and the object is accessible to all + anonymous users, the default will be + ``public, max-age=3600``. + acl (MutableSequence[google.cloud.storage_v2.types.ObjectAccessControl]): + Optional. Access controls on the object. If + iam_config.uniform_bucket_level_access is enabled on the + parent bucket, requests to set, read, or modify acl is an + error. + content_language (str): + Optional. Content-Language of the object data, matching + [https://tools.ietf.org/html/rfc7231#section-3.1.3.2][RFC + 7231 §3.1.3.2]. + delete_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. If this object is noncurrent, + this is the time when the object became + noncurrent. + finalize_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when the object was + finalized. + content_type (str): + Optional. Content-Type of the object data, matching + [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC + 7231 §3.1.1.5]. If an object is stored without a + Content-Type, it is served as ``application/octet-stream``. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The creation time of the object. + component_count (int): + Output only. Number of underlying components + that make up this object. Components are + accumulated by compose operations. + checksums (google.cloud.storage_v2.types.ObjectChecksums): + Output only. Hashes for the data part of this + object. This field is used for output only and + will be silently ignored if provided in + requests. The checksums of the complete object + regardless of data range. If the object is + downloaded in full, the client should compute + one of these checksums over the downloaded + object and compare it against the value provided + here. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The modification time of the + object metadata. Set initially to object + creation time and then updated whenever any + metadata of the object changes. This includes + changes made by a requester, such as modifying + custom metadata, as well as changes made by + Cloud Storage on behalf of a requester, such as + changing the storage class based on an Object + Lifecycle Configuration. + kms_key (str): + Optional. Cloud KMS Key used to encrypt this + object, if the object is encrypted by such a + key. + update_storage_class_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time at which the object's storage class + was last changed. When the object is initially created, it + will be set to time_created. + temporary_hold (bool): + Optional. Whether an object is under + temporary hold. While this flag is set to true, + the object is protected against deletion and + overwrites. A common use case of this flag is + regulatory investigations where objects need to + be retained while the investigation is ongoing. + Note that unlike event-based hold, temporary + hold does not impact retention expiration time + of an object. + retention_expire_time (google.protobuf.timestamp_pb2.Timestamp): + Optional. A server-determined value that + specifies the earliest time that the object's + retention period expires. Note 1: This field is + not provided for objects with an active + event-based hold, since retention expiration is + unknown until the hold is removed. Note 2: This + value can be provided even when temporary hold + is set (so that the user can reason about policy + without having to first unset the temporary + hold). + metadata (MutableMapping[str, str]): + Optional. User-provided metadata, in + key/value pairs. + contexts (google.cloud.storage_v2.types.ObjectContexts): + Optional. User-defined or system-defined + object contexts. Each object context is a + key-payload pair, where the key provides the + identification and the payload holds the + associated value and additional metadata. + event_based_hold (bool): + Whether an object is under event-based hold. An event-based + hold is a way to force the retention of an object until + after some event occurs. Once the hold is released by + explicitly setting this field to false, the object will + become subject to any bucket-level retention policy, except + that the retention duration will be calculated from the time + the event based hold was lifted, rather than the time the + object was created. + + In a WriteObject request, not setting this field implies + that the value should be taken from the parent bucket's + "default_event_based_hold" field. In a response, this field + will always be set to true or false. + + This field is a member of `oneof`_ ``_event_based_hold``. + owner (google.cloud.storage_v2.types.Owner): + Output only. The owner of the object. This + will always be the uploader of the object. + customer_encryption (google.cloud.storage_v2.types.CustomerEncryption): + Optional. Metadata of Customer-Supplied + Encryption Key, if the object is encrypted by + such a key. + custom_time (google.protobuf.timestamp_pb2.Timestamp): + Optional. A user-specified timestamp set on + an object. + soft_delete_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. This is the time when the object became + soft-deleted. + + Soft-deleted objects are only accessible if a + soft_delete_policy is enabled. Also see hard_delete_time. + + This field is a member of `oneof`_ ``_soft_delete_time``. + hard_delete_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when the object will be permanently + deleted. + + Only set when an object becomes soft-deleted with a + soft_delete_policy. Otherwise, the object will not be + accessible. + + This field is a member of `oneof`_ ``_hard_delete_time``. + retention (google.cloud.storage_v2.types.Object.Retention): + Optional. Retention configuration of this + object. May only be configured if the bucket has + object retention enabled. + """ + + class Retention(proto.Message): + r"""Specifies retention parameters of the object. Objects under + retention cannot be deleted or overwritten until their retention + expires. + + Attributes: + mode (google.cloud.storage_v2.types.Object.Retention.Mode): + Optional. The mode of the Retention. + retain_until_time (google.protobuf.timestamp_pb2.Timestamp): + Optional. The timestamp that the object needs + to be retained until. Value cannot be set in the + past or more than 100 years in the future. + """ + + class Mode(proto.Enum): + r"""Retention mode values. + + Values: + MODE_UNSPECIFIED (0): + No specified mode. Object is not under + retention. + UNLOCKED (1): + Retention period may be decreased or + increased. The Retention configuration may be + removed. The mode may be changed to locked. + LOCKED (2): + Retention period may be increased. + The Retention configuration cannot be removed. + The mode cannot be changed. + """ + MODE_UNSPECIFIED = 0 + UNLOCKED = 1 + LOCKED = 2 + + mode: "Object.Retention.Mode" = proto.Field( + proto.ENUM, + number=1, + enum="Object.Retention.Mode", + ) + retain_until_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + + name: str = proto.Field( + proto.STRING, + number=1, + ) + bucket: str = proto.Field( + proto.STRING, + number=2, + ) + etag: str = proto.Field( + proto.STRING, + number=27, + ) + generation: int = proto.Field( + proto.INT64, + number=3, + ) + restore_token: str = proto.Field( + proto.STRING, + number=35, + optional=True, + ) + metageneration: int = proto.Field( + proto.INT64, + number=4, + ) + storage_class: str = proto.Field( + proto.STRING, + number=5, + ) + size: int = proto.Field( + proto.INT64, + number=6, + ) + content_encoding: str = proto.Field( + proto.STRING, + number=7, + ) + content_disposition: str = proto.Field( + proto.STRING, + number=8, + ) + cache_control: str = proto.Field( + proto.STRING, + number=9, + ) + acl: MutableSequence["ObjectAccessControl"] = proto.RepeatedField( + proto.MESSAGE, + number=10, + message="ObjectAccessControl", + ) + content_language: str = proto.Field( + proto.STRING, + number=11, + ) + delete_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + finalize_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=36, + message=timestamp_pb2.Timestamp, + ) + content_type: str = proto.Field( + proto.STRING, + number=13, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + component_count: int = proto.Field( + proto.INT32, + number=15, + ) + checksums: "ObjectChecksums" = proto.Field( + proto.MESSAGE, + number=16, + message="ObjectChecksums", + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=17, + message=timestamp_pb2.Timestamp, + ) + kms_key: str = proto.Field( + proto.STRING, + number=18, + ) + update_storage_class_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=19, + message=timestamp_pb2.Timestamp, + ) + temporary_hold: bool = proto.Field( + proto.BOOL, + number=20, + ) + retention_expire_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=21, + message=timestamp_pb2.Timestamp, + ) + metadata: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=22, + ) + contexts: "ObjectContexts" = proto.Field( + proto.MESSAGE, + number=38, + message="ObjectContexts", + ) + event_based_hold: bool = proto.Field( + proto.BOOL, + number=23, + optional=True, + ) + owner: "Owner" = proto.Field( + proto.MESSAGE, + number=24, + message="Owner", + ) + customer_encryption: "CustomerEncryption" = proto.Field( + proto.MESSAGE, + number=25, + message="CustomerEncryption", + ) + custom_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=26, + message=timestamp_pb2.Timestamp, + ) + soft_delete_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=28, + optional=True, + message=timestamp_pb2.Timestamp, + ) + hard_delete_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=29, + optional=True, + message=timestamp_pb2.Timestamp, + ) + retention: Retention = proto.Field( + proto.MESSAGE, + number=30, + message=Retention, + ) + + +class ObjectAccessControl(proto.Message): + r"""An access-control entry. + + Attributes: + role (str): + Optional. The access permission for the entity. One of the + following values: + + - ``READER`` + - ``WRITER`` + - ``OWNER`` + id (str): + Optional. The ID of the access-control entry. + entity (str): + Optional. The entity holding the permission, in one of the + following forms: + + - ``user-{userid}`` + - ``user-{email}`` + - ``group-{groupid}`` + - ``group-{email}`` + - ``domain-{domain}`` + - ``project-{team}-{projectnumber}`` + - ``project-{team}-{projectid}`` + - ``allUsers`` + - ``allAuthenticatedUsers`` Examples: + - The user ``liz@example.com`` would be + ``user-liz@example.com``. + - The group ``example@googlegroups.com`` would be + ``group-example@googlegroups.com``. + - All members of the Google Apps for Business domain + ``example.com`` would be ``domain-example.com``. For + project entities, ``project-{team}-{projectnumber}`` + format will be returned on response. + entity_alt (str): + Output only. The alternative entity format, if exists. For + project entities, ``project-{team}-{projectid}`` format will + be returned on response. + entity_id (str): + Optional. The ID for the entity, if any. + etag (str): + Optional. The etag of the + ObjectAccessControl. If included in the metadata + of an update or delete request message, the + operation will only be performed if the etag + matches that of the live object's + ObjectAccessControl. + email (str): + Optional. The email address associated with + the entity, if any. + domain (str): + Optional. The domain associated with the + entity, if any. + project_team (google.cloud.storage_v2.types.ProjectTeam): + Optional. The project team associated with + the entity, if any. + """ + + role: str = proto.Field( + proto.STRING, + number=1, + ) + id: str = proto.Field( + proto.STRING, + number=2, + ) + entity: str = proto.Field( + proto.STRING, + number=3, + ) + entity_alt: str = proto.Field( + proto.STRING, + number=9, + ) + entity_id: str = proto.Field( + proto.STRING, + number=4, + ) + etag: str = proto.Field( + proto.STRING, + number=8, + ) + email: str = proto.Field( + proto.STRING, + number=5, + ) + domain: str = proto.Field( + proto.STRING, + number=6, + ) + project_team: "ProjectTeam" = proto.Field( + proto.MESSAGE, + number=7, + message="ProjectTeam", + ) + + +class ListObjectsResponse(proto.Message): + r"""The result of a call to Objects.ListObjects + + Attributes: + objects (MutableSequence[google.cloud.storage_v2.types.Object]): + The list of items. + prefixes (MutableSequence[str]): + The list of prefixes of objects + matching-but-not-listed up to and including the + requested delimiter. + next_page_token (str): + The continuation token, used to page through + large result sets. Provide this value in a + subsequent request to return the next page of + results. + """ + + @property + def raw_page(self): + return self + + objects: MutableSequence["Object"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Object", + ) + prefixes: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=2, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ProjectTeam(proto.Message): + r"""Represents the Viewers, Editors, or Owners of a given + project. + + Attributes: + project_number (str): + Optional. The project number. + team (str): + Optional. The team. + """ + + project_number: str = proto.Field( + proto.STRING, + number=1, + ) + team: str = proto.Field( + proto.STRING, + number=2, + ) + + +class Owner(proto.Message): + r"""The owner of a specific resource. + + Attributes: + entity (str): + Optional. The entity, in the form ``user-``\ *userId*. + entity_id (str): + Optional. The ID for the entity. + """ + + entity: str = proto.Field( + proto.STRING, + number=1, + ) + entity_id: str = proto.Field( + proto.STRING, + number=2, + ) + + +class ContentRange(proto.Message): + r"""Specifies a requested range of bytes to download. + + Attributes: + start (int): + The starting offset of the object data. This + value is inclusive. + end (int): + The ending offset of the object data. This + value is exclusive. + complete_length (int): + The complete length of the object data. + """ + + start: int = proto.Field( + proto.INT64, + number=1, + ) + end: int = proto.Field( + proto.INT64, + number=2, + ) + complete_length: int = proto.Field( + proto.INT64, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 000000000..574c5aed3 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.7 +namespace_packages = True diff --git a/noxfile.py b/noxfile.py index 693fcb1b4..451fced3e 100644 --- a/noxfile.py +++ b/noxfile.py @@ -101,7 +101,19 @@ def default(session, install_extras=True): CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) # Install all test dependencies, then install this package in-place. - session.install("mock", "pytest", "pytest-cov", "pytest-asyncio", "brotli", "grpcio", "grpcio-status", "proto-plus", "grpc-google-iam-v1", "-c", constraints_path) + session.install( + "mock", + "pytest", + "pytest-cov", + "pytest-asyncio", + "brotli", + "grpcio", + "grpcio-status", + "proto-plus", + "grpc-google-iam-v1", + "-c", + constraints_path, + ) if install_extras: session.install("opentelemetry-api", "opentelemetry-sdk") diff --git a/samples/generated_samples/snippet_metadata_google.storage.v2.json b/samples/generated_samples/snippet_metadata_google.storage.v2.json new file mode 100644 index 000000000..907bdb30e --- /dev/null +++ b/samples/generated_samples/snippet_metadata_google.storage.v2.json @@ -0,0 +1,3939 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.storage.v2", + "version": "v2" + } + ], + "language": "PYTHON", + "name": "google-cloud-storage", + "version": "0.1.0" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.bidi_read_object", + "method": { + "fullName": "google.storage.v2.Storage.BidiReadObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "BidiReadObject" + }, + "parameters": [ + { + "name": "requests", + "type": "Iterator[google.cloud.storage_v2.types.BidiReadObjectRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "Iterable[google.cloud.storage_v2.types.BidiReadObjectResponse]", + "shortName": "bidi_read_object" + }, + "description": "Sample for BidiReadObject", + "file": "storage_v2_generated_storage_bidi_read_object_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_BidiReadObject_async", + "segments": [ + { + "end": 61, + "start": 27, + "type": "FULL" + }, + { + "end": 61, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 54, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 55, + "type": "REQUEST_EXECUTION" + }, + { + "end": 62, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_bidi_read_object_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.bidi_read_object", + "method": { + "fullName": "google.storage.v2.Storage.BidiReadObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "BidiReadObject" + }, + "parameters": [ + { + "name": "requests", + "type": "Iterator[google.cloud.storage_v2.types.BidiReadObjectRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "Iterable[google.cloud.storage_v2.types.BidiReadObjectResponse]", + "shortName": "bidi_read_object" + }, + "description": "Sample for BidiReadObject", + "file": "storage_v2_generated_storage_bidi_read_object_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_BidiReadObject_sync", + "segments": [ + { + "end": 61, + "start": 27, + "type": "FULL" + }, + { + "end": 61, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 54, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 55, + "type": "REQUEST_EXECUTION" + }, + { + "end": 62, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_bidi_read_object_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.bidi_write_object", + "method": { + "fullName": "google.storage.v2.Storage.BidiWriteObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "BidiWriteObject" + }, + "parameters": [ + { + "name": "requests", + "type": "Iterator[google.cloud.storage_v2.types.BidiWriteObjectRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "Iterable[google.cloud.storage_v2.types.BidiWriteObjectResponse]", + "shortName": "bidi_write_object" + }, + "description": "Sample for BidiWriteObject", + "file": "storage_v2_generated_storage_bidi_write_object_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_BidiWriteObject_async", + "segments": [ + { + "end": 63, + "start": 27, + "type": "FULL" + }, + { + "end": 63, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 56, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 59, + "start": 57, + "type": "REQUEST_EXECUTION" + }, + { + "end": 64, + "start": 60, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_bidi_write_object_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.bidi_write_object", + "method": { + "fullName": "google.storage.v2.Storage.BidiWriteObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "BidiWriteObject" + }, + "parameters": [ + { + "name": "requests", + "type": "Iterator[google.cloud.storage_v2.types.BidiWriteObjectRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "Iterable[google.cloud.storage_v2.types.BidiWriteObjectResponse]", + "shortName": "bidi_write_object" + }, + "description": "Sample for BidiWriteObject", + "file": "storage_v2_generated_storage_bidi_write_object_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_BidiWriteObject_sync", + "segments": [ + { + "end": 63, + "start": 27, + "type": "FULL" + }, + { + "end": 63, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 56, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 59, + "start": 57, + "type": "REQUEST_EXECUTION" + }, + { + "end": 64, + "start": 60, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_bidi_write_object_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.cancel_resumable_write", + "method": { + "fullName": "google.storage.v2.Storage.CancelResumableWrite", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "CancelResumableWrite" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.CancelResumableWriteRequest" + }, + { + "name": "upload_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.CancelResumableWriteResponse", + "shortName": "cancel_resumable_write" + }, + "description": "Sample for CancelResumableWrite", + "file": "storage_v2_generated_storage_cancel_resumable_write_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_CancelResumableWrite_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_cancel_resumable_write_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.cancel_resumable_write", + "method": { + "fullName": "google.storage.v2.Storage.CancelResumableWrite", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "CancelResumableWrite" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.CancelResumableWriteRequest" + }, + { + "name": "upload_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.CancelResumableWriteResponse", + "shortName": "cancel_resumable_write" + }, + "description": "Sample for CancelResumableWrite", + "file": "storage_v2_generated_storage_cancel_resumable_write_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_CancelResumableWrite_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_cancel_resumable_write_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.compose_object", + "method": { + "fullName": "google.storage.v2.Storage.ComposeObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "ComposeObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.ComposeObjectRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Object", + "shortName": "compose_object" + }, + "description": "Sample for ComposeObject", + "file": "storage_v2_generated_storage_compose_object_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_ComposeObject_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_compose_object_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.compose_object", + "method": { + "fullName": "google.storage.v2.Storage.ComposeObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "ComposeObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.ComposeObjectRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Object", + "shortName": "compose_object" + }, + "description": "Sample for ComposeObject", + "file": "storage_v2_generated_storage_compose_object_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_ComposeObject_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_compose_object_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.create_bucket", + "method": { + "fullName": "google.storage.v2.Storage.CreateBucket", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "CreateBucket" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.CreateBucketRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "bucket", + "type": "google.cloud.storage_v2.types.Bucket" + }, + { + "name": "bucket_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Bucket", + "shortName": "create_bucket" + }, + "description": "Sample for CreateBucket", + "file": "storage_v2_generated_storage_create_bucket_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_CreateBucket_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_create_bucket_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.create_bucket", + "method": { + "fullName": "google.storage.v2.Storage.CreateBucket", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "CreateBucket" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.CreateBucketRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "bucket", + "type": "google.cloud.storage_v2.types.Bucket" + }, + { + "name": "bucket_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Bucket", + "shortName": "create_bucket" + }, + "description": "Sample for CreateBucket", + "file": "storage_v2_generated_storage_create_bucket_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_CreateBucket_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_create_bucket_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.delete_bucket", + "method": { + "fullName": "google.storage.v2.Storage.DeleteBucket", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "DeleteBucket" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.DeleteBucketRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_bucket" + }, + "description": "Sample for DeleteBucket", + "file": "storage_v2_generated_storage_delete_bucket_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_DeleteBucket_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_delete_bucket_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.delete_bucket", + "method": { + "fullName": "google.storage.v2.Storage.DeleteBucket", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "DeleteBucket" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.DeleteBucketRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_bucket" + }, + "description": "Sample for DeleteBucket", + "file": "storage_v2_generated_storage_delete_bucket_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_DeleteBucket_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_delete_bucket_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.delete_object", + "method": { + "fullName": "google.storage.v2.Storage.DeleteObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "DeleteObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.DeleteObjectRequest" + }, + { + "name": "bucket", + "type": "str" + }, + { + "name": "object_", + "type": "str" + }, + { + "name": "generation", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_object" + }, + "description": "Sample for DeleteObject", + "file": "storage_v2_generated_storage_delete_object_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_DeleteObject_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_delete_object_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.delete_object", + "method": { + "fullName": "google.storage.v2.Storage.DeleteObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "DeleteObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.DeleteObjectRequest" + }, + { + "name": "bucket", + "type": "str" + }, + { + "name": "object_", + "type": "str" + }, + { + "name": "generation", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_object" + }, + "description": "Sample for DeleteObject", + "file": "storage_v2_generated_storage_delete_object_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_DeleteObject_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_delete_object_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.get_bucket", + "method": { + "fullName": "google.storage.v2.Storage.GetBucket", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "GetBucket" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.GetBucketRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Bucket", + "shortName": "get_bucket" + }, + "description": "Sample for GetBucket", + "file": "storage_v2_generated_storage_get_bucket_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_GetBucket_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_get_bucket_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.get_bucket", + "method": { + "fullName": "google.storage.v2.Storage.GetBucket", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "GetBucket" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.GetBucketRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Bucket", + "shortName": "get_bucket" + }, + "description": "Sample for GetBucket", + "file": "storage_v2_generated_storage_get_bucket_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_GetBucket_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_get_bucket_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.get_iam_policy", + "method": { + "fullName": "google.storage.v2.Storage.GetIamPolicy", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "GetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "get_iam_policy" + }, + "description": "Sample for GetIamPolicy", + "file": "storage_v2_generated_storage_get_iam_policy_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_GetIamPolicy_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_get_iam_policy_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.get_iam_policy", + "method": { + "fullName": "google.storage.v2.Storage.GetIamPolicy", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "GetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "get_iam_policy" + }, + "description": "Sample for GetIamPolicy", + "file": "storage_v2_generated_storage_get_iam_policy_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_GetIamPolicy_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_get_iam_policy_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.get_object", + "method": { + "fullName": "google.storage.v2.Storage.GetObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "GetObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.GetObjectRequest" + }, + { + "name": "bucket", + "type": "str" + }, + { + "name": "object_", + "type": "str" + }, + { + "name": "generation", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Object", + "shortName": "get_object" + }, + "description": "Sample for GetObject", + "file": "storage_v2_generated_storage_get_object_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_GetObject_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_get_object_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.get_object", + "method": { + "fullName": "google.storage.v2.Storage.GetObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "GetObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.GetObjectRequest" + }, + { + "name": "bucket", + "type": "str" + }, + { + "name": "object_", + "type": "str" + }, + { + "name": "generation", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Object", + "shortName": "get_object" + }, + "description": "Sample for GetObject", + "file": "storage_v2_generated_storage_get_object_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_GetObject_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_get_object_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.list_buckets", + "method": { + "fullName": "google.storage.v2.Storage.ListBuckets", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "ListBuckets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.ListBucketsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.services.storage.pagers.ListBucketsAsyncPager", + "shortName": "list_buckets" + }, + "description": "Sample for ListBuckets", + "file": "storage_v2_generated_storage_list_buckets_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_ListBuckets_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_list_buckets_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.list_buckets", + "method": { + "fullName": "google.storage.v2.Storage.ListBuckets", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "ListBuckets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.ListBucketsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.services.storage.pagers.ListBucketsPager", + "shortName": "list_buckets" + }, + "description": "Sample for ListBuckets", + "file": "storage_v2_generated_storage_list_buckets_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_ListBuckets_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_list_buckets_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.list_objects", + "method": { + "fullName": "google.storage.v2.Storage.ListObjects", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "ListObjects" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.ListObjectsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.services.storage.pagers.ListObjectsAsyncPager", + "shortName": "list_objects" + }, + "description": "Sample for ListObjects", + "file": "storage_v2_generated_storage_list_objects_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_ListObjects_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_list_objects_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.list_objects", + "method": { + "fullName": "google.storage.v2.Storage.ListObjects", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "ListObjects" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.ListObjectsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.services.storage.pagers.ListObjectsPager", + "shortName": "list_objects" + }, + "description": "Sample for ListObjects", + "file": "storage_v2_generated_storage_list_objects_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_ListObjects_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_list_objects_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.lock_bucket_retention_policy", + "method": { + "fullName": "google.storage.v2.Storage.LockBucketRetentionPolicy", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "LockBucketRetentionPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.LockBucketRetentionPolicyRequest" + }, + { + "name": "bucket", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Bucket", + "shortName": "lock_bucket_retention_policy" + }, + "description": "Sample for LockBucketRetentionPolicy", + "file": "storage_v2_generated_storage_lock_bucket_retention_policy_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_LockBucketRetentionPolicy_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_lock_bucket_retention_policy_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.lock_bucket_retention_policy", + "method": { + "fullName": "google.storage.v2.Storage.LockBucketRetentionPolicy", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "LockBucketRetentionPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.LockBucketRetentionPolicyRequest" + }, + { + "name": "bucket", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Bucket", + "shortName": "lock_bucket_retention_policy" + }, + "description": "Sample for LockBucketRetentionPolicy", + "file": "storage_v2_generated_storage_lock_bucket_retention_policy_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_LockBucketRetentionPolicy_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_lock_bucket_retention_policy_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.move_object", + "method": { + "fullName": "google.storage.v2.Storage.MoveObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "MoveObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.MoveObjectRequest" + }, + { + "name": "bucket", + "type": "str" + }, + { + "name": "source_object", + "type": "str" + }, + { + "name": "destination_object", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Object", + "shortName": "move_object" + }, + "description": "Sample for MoveObject", + "file": "storage_v2_generated_storage_move_object_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_MoveObject_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_move_object_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.move_object", + "method": { + "fullName": "google.storage.v2.Storage.MoveObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "MoveObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.MoveObjectRequest" + }, + { + "name": "bucket", + "type": "str" + }, + { + "name": "source_object", + "type": "str" + }, + { + "name": "destination_object", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Object", + "shortName": "move_object" + }, + "description": "Sample for MoveObject", + "file": "storage_v2_generated_storage_move_object_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_MoveObject_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_move_object_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.query_write_status", + "method": { + "fullName": "google.storage.v2.Storage.QueryWriteStatus", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "QueryWriteStatus" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.QueryWriteStatusRequest" + }, + { + "name": "upload_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.QueryWriteStatusResponse", + "shortName": "query_write_status" + }, + "description": "Sample for QueryWriteStatus", + "file": "storage_v2_generated_storage_query_write_status_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_QueryWriteStatus_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_query_write_status_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.query_write_status", + "method": { + "fullName": "google.storage.v2.Storage.QueryWriteStatus", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "QueryWriteStatus" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.QueryWriteStatusRequest" + }, + { + "name": "upload_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.QueryWriteStatusResponse", + "shortName": "query_write_status" + }, + "description": "Sample for QueryWriteStatus", + "file": "storage_v2_generated_storage_query_write_status_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_QueryWriteStatus_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_query_write_status_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.read_object", + "method": { + "fullName": "google.storage.v2.Storage.ReadObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "ReadObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.ReadObjectRequest" + }, + { + "name": "bucket", + "type": "str" + }, + { + "name": "object_", + "type": "str" + }, + { + "name": "generation", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "Iterable[google.cloud.storage_v2.types.ReadObjectResponse]", + "shortName": "read_object" + }, + "description": "Sample for ReadObject", + "file": "storage_v2_generated_storage_read_object_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_ReadObject_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_read_object_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.read_object", + "method": { + "fullName": "google.storage.v2.Storage.ReadObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "ReadObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.ReadObjectRequest" + }, + { + "name": "bucket", + "type": "str" + }, + { + "name": "object_", + "type": "str" + }, + { + "name": "generation", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "Iterable[google.cloud.storage_v2.types.ReadObjectResponse]", + "shortName": "read_object" + }, + "description": "Sample for ReadObject", + "file": "storage_v2_generated_storage_read_object_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_ReadObject_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_read_object_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.restore_object", + "method": { + "fullName": "google.storage.v2.Storage.RestoreObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "RestoreObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.RestoreObjectRequest" + }, + { + "name": "bucket", + "type": "str" + }, + { + "name": "object_", + "type": "str" + }, + { + "name": "generation", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Object", + "shortName": "restore_object" + }, + "description": "Sample for RestoreObject", + "file": "storage_v2_generated_storage_restore_object_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_RestoreObject_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_restore_object_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.restore_object", + "method": { + "fullName": "google.storage.v2.Storage.RestoreObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "RestoreObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.RestoreObjectRequest" + }, + { + "name": "bucket", + "type": "str" + }, + { + "name": "object_", + "type": "str" + }, + { + "name": "generation", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Object", + "shortName": "restore_object" + }, + "description": "Sample for RestoreObject", + "file": "storage_v2_generated_storage_restore_object_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_RestoreObject_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_restore_object_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.rewrite_object", + "method": { + "fullName": "google.storage.v2.Storage.RewriteObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "RewriteObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.RewriteObjectRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.RewriteResponse", + "shortName": "rewrite_object" + }, + "description": "Sample for RewriteObject", + "file": "storage_v2_generated_storage_rewrite_object_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_RewriteObject_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_rewrite_object_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.rewrite_object", + "method": { + "fullName": "google.storage.v2.Storage.RewriteObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "RewriteObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.RewriteObjectRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.RewriteResponse", + "shortName": "rewrite_object" + }, + "description": "Sample for RewriteObject", + "file": "storage_v2_generated_storage_rewrite_object_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_RewriteObject_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_rewrite_object_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.set_iam_policy", + "method": { + "fullName": "google.storage.v2.Storage.SetIamPolicy", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "SetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "set_iam_policy" + }, + "description": "Sample for SetIamPolicy", + "file": "storage_v2_generated_storage_set_iam_policy_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_SetIamPolicy_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_set_iam_policy_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.set_iam_policy", + "method": { + "fullName": "google.storage.v2.Storage.SetIamPolicy", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "SetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "set_iam_policy" + }, + "description": "Sample for SetIamPolicy", + "file": "storage_v2_generated_storage_set_iam_policy_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_SetIamPolicy_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_set_iam_policy_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.start_resumable_write", + "method": { + "fullName": "google.storage.v2.Storage.StartResumableWrite", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "StartResumableWrite" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.StartResumableWriteRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.StartResumableWriteResponse", + "shortName": "start_resumable_write" + }, + "description": "Sample for StartResumableWrite", + "file": "storage_v2_generated_storage_start_resumable_write_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_StartResumableWrite_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_start_resumable_write_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.start_resumable_write", + "method": { + "fullName": "google.storage.v2.Storage.StartResumableWrite", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "StartResumableWrite" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.StartResumableWriteRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.StartResumableWriteResponse", + "shortName": "start_resumable_write" + }, + "description": "Sample for StartResumableWrite", + "file": "storage_v2_generated_storage_start_resumable_write_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_StartResumableWrite_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_start_resumable_write_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.test_iam_permissions", + "method": { + "fullName": "google.storage.v2.Storage.TestIamPermissions", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "TestIamPermissions" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "permissions", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", + "shortName": "test_iam_permissions" + }, + "description": "Sample for TestIamPermissions", + "file": "storage_v2_generated_storage_test_iam_permissions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_TestIamPermissions_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_test_iam_permissions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.test_iam_permissions", + "method": { + "fullName": "google.storage.v2.Storage.TestIamPermissions", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "TestIamPermissions" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "permissions", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", + "shortName": "test_iam_permissions" + }, + "description": "Sample for TestIamPermissions", + "file": "storage_v2_generated_storage_test_iam_permissions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_TestIamPermissions_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_test_iam_permissions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.update_bucket", + "method": { + "fullName": "google.storage.v2.Storage.UpdateBucket", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "UpdateBucket" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.UpdateBucketRequest" + }, + { + "name": "bucket", + "type": "google.cloud.storage_v2.types.Bucket" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Bucket", + "shortName": "update_bucket" + }, + "description": "Sample for UpdateBucket", + "file": "storage_v2_generated_storage_update_bucket_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_UpdateBucket_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_update_bucket_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.update_bucket", + "method": { + "fullName": "google.storage.v2.Storage.UpdateBucket", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "UpdateBucket" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.UpdateBucketRequest" + }, + { + "name": "bucket", + "type": "google.cloud.storage_v2.types.Bucket" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Bucket", + "shortName": "update_bucket" + }, + "description": "Sample for UpdateBucket", + "file": "storage_v2_generated_storage_update_bucket_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_UpdateBucket_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_update_bucket_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.update_object", + "method": { + "fullName": "google.storage.v2.Storage.UpdateObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "UpdateObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.UpdateObjectRequest" + }, + { + "name": "object_", + "type": "google.cloud.storage_v2.types.Object" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Object", + "shortName": "update_object" + }, + "description": "Sample for UpdateObject", + "file": "storage_v2_generated_storage_update_object_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_UpdateObject_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_update_object_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.update_object", + "method": { + "fullName": "google.storage.v2.Storage.UpdateObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "UpdateObject" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.storage_v2.types.UpdateObjectRequest" + }, + { + "name": "object_", + "type": "google.cloud.storage_v2.types.Object" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.Object", + "shortName": "update_object" + }, + "description": "Sample for UpdateObject", + "file": "storage_v2_generated_storage_update_object_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_UpdateObject_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_update_object_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.storage_v2.StorageAsyncClient", + "shortName": "StorageAsyncClient" + }, + "fullName": "google.cloud.storage_v2.StorageAsyncClient.write_object", + "method": { + "fullName": "google.storage.v2.Storage.WriteObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "WriteObject" + }, + "parameters": [ + { + "name": "requests", + "type": "Iterator[google.cloud.storage_v2.types.WriteObjectRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.WriteObjectResponse", + "shortName": "write_object" + }, + "description": "Sample for WriteObject", + "file": "storage_v2_generated_storage_write_object_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_WriteObject_async", + "segments": [ + { + "end": 62, + "start": 27, + "type": "FULL" + }, + { + "end": 62, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 56, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 59, + "start": 57, + "type": "REQUEST_EXECUTION" + }, + { + "end": 63, + "start": 60, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_write_object_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.storage_v2.StorageClient", + "shortName": "StorageClient" + }, + "fullName": "google.cloud.storage_v2.StorageClient.write_object", + "method": { + "fullName": "google.storage.v2.Storage.WriteObject", + "service": { + "fullName": "google.storage.v2.Storage", + "shortName": "Storage" + }, + "shortName": "WriteObject" + }, + "parameters": [ + { + "name": "requests", + "type": "Iterator[google.cloud.storage_v2.types.WriteObjectRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.storage_v2.types.WriteObjectResponse", + "shortName": "write_object" + }, + "description": "Sample for WriteObject", + "file": "storage_v2_generated_storage_write_object_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "storage_v2_generated_Storage_WriteObject_sync", + "segments": [ + { + "end": 62, + "start": 27, + "type": "FULL" + }, + { + "end": 62, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 56, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 59, + "start": 57, + "type": "REQUEST_EXECUTION" + }, + { + "end": 63, + "start": 60, + "type": "RESPONSE_HANDLING" + } + ], + "title": "storage_v2_generated_storage_write_object_sync.py" + } + ] +} diff --git a/samples/generated_samples/storage_v2_generated_storage_bidi_read_object_async.py b/samples/generated_samples/storage_v2_generated_storage_bidi_read_object_async.py new file mode 100644 index 000000000..395fe9eea --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_bidi_read_object_async.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BidiReadObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_BidiReadObject_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_bidi_read_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.BidiReadObjectRequest( + ) + + # This method expects an iterator which contains + # 'storage_v2.BidiReadObjectRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + stream = await client.bidi_read_object(requests=request_generator()) + + # Handle the response + async for response in stream: + print(response) + +# [END storage_v2_generated_Storage_BidiReadObject_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_bidi_read_object_sync.py b/samples/generated_samples/storage_v2_generated_storage_bidi_read_object_sync.py new file mode 100644 index 000000000..d494483b1 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_bidi_read_object_sync.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BidiReadObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_BidiReadObject_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_bidi_read_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.BidiReadObjectRequest( + ) + + # This method expects an iterator which contains + # 'storage_v2.BidiReadObjectRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + stream = client.bidi_read_object(requests=request_generator()) + + # Handle the response + for response in stream: + print(response) + +# [END storage_v2_generated_Storage_BidiReadObject_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_bidi_write_object_async.py b/samples/generated_samples/storage_v2_generated_storage_bidi_write_object_async.py new file mode 100644 index 000000000..89cf5a0d7 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_bidi_write_object_async.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BidiWriteObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_BidiWriteObject_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_bidi_write_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.BidiWriteObjectRequest( + upload_id="upload_id_value", + write_offset=1297, + ) + + # This method expects an iterator which contains + # 'storage_v2.BidiWriteObjectRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + stream = await client.bidi_write_object(requests=request_generator()) + + # Handle the response + async for response in stream: + print(response) + +# [END storage_v2_generated_Storage_BidiWriteObject_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_bidi_write_object_sync.py b/samples/generated_samples/storage_v2_generated_storage_bidi_write_object_sync.py new file mode 100644 index 000000000..e53f97fc8 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_bidi_write_object_sync.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BidiWriteObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_BidiWriteObject_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_bidi_write_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.BidiWriteObjectRequest( + upload_id="upload_id_value", + write_offset=1297, + ) + + # This method expects an iterator which contains + # 'storage_v2.BidiWriteObjectRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + stream = client.bidi_write_object(requests=request_generator()) + + # Handle the response + for response in stream: + print(response) + +# [END storage_v2_generated_Storage_BidiWriteObject_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_cancel_resumable_write_async.py b/samples/generated_samples/storage_v2_generated_storage_cancel_resumable_write_async.py new file mode 100644 index 000000000..3e2610dd2 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_cancel_resumable_write_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelResumableWrite +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_CancelResumableWrite_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_cancel_resumable_write(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.CancelResumableWriteRequest( + upload_id="upload_id_value", + ) + + # Make the request + response = await client.cancel_resumable_write(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_CancelResumableWrite_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_cancel_resumable_write_sync.py b/samples/generated_samples/storage_v2_generated_storage_cancel_resumable_write_sync.py new file mode 100644 index 000000000..38a1fe5b1 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_cancel_resumable_write_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelResumableWrite +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_CancelResumableWrite_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_cancel_resumable_write(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.CancelResumableWriteRequest( + upload_id="upload_id_value", + ) + + # Make the request + response = client.cancel_resumable_write(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_CancelResumableWrite_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_compose_object_async.py b/samples/generated_samples/storage_v2_generated_storage_compose_object_async.py new file mode 100644 index 000000000..0f1e5462f --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_compose_object_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ComposeObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_ComposeObject_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_compose_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.ComposeObjectRequest( + ) + + # Make the request + response = await client.compose_object(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_ComposeObject_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_compose_object_sync.py b/samples/generated_samples/storage_v2_generated_storage_compose_object_sync.py new file mode 100644 index 000000000..f6363ee89 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_compose_object_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ComposeObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_ComposeObject_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_compose_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.ComposeObjectRequest( + ) + + # Make the request + response = client.compose_object(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_ComposeObject_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_create_bucket_async.py b/samples/generated_samples/storage_v2_generated_storage_create_bucket_async.py new file mode 100644 index 000000000..afd3d5ea8 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_create_bucket_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBucket +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_CreateBucket_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_create_bucket(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.CreateBucketRequest( + parent="parent_value", + bucket_id="bucket_id_value", + ) + + # Make the request + response = await client.create_bucket(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_CreateBucket_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_create_bucket_sync.py b/samples/generated_samples/storage_v2_generated_storage_create_bucket_sync.py new file mode 100644 index 000000000..9cc81d3b4 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_create_bucket_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBucket +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_CreateBucket_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_create_bucket(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.CreateBucketRequest( + parent="parent_value", + bucket_id="bucket_id_value", + ) + + # Make the request + response = client.create_bucket(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_CreateBucket_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_delete_bucket_async.py b/samples/generated_samples/storage_v2_generated_storage_delete_bucket_async.py new file mode 100644 index 000000000..3e978be0d --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_delete_bucket_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBucket +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_DeleteBucket_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_delete_bucket(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.DeleteBucketRequest( + name="name_value", + ) + + # Make the request + await client.delete_bucket(request=request) + + +# [END storage_v2_generated_Storage_DeleteBucket_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_delete_bucket_sync.py b/samples/generated_samples/storage_v2_generated_storage_delete_bucket_sync.py new file mode 100644 index 000000000..93df10f29 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_delete_bucket_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBucket +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_DeleteBucket_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_delete_bucket(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.DeleteBucketRequest( + name="name_value", + ) + + # Make the request + client.delete_bucket(request=request) + + +# [END storage_v2_generated_Storage_DeleteBucket_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_delete_object_async.py b/samples/generated_samples/storage_v2_generated_storage_delete_object_async.py new file mode 100644 index 000000000..ba87aca5f --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_delete_object_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_DeleteObject_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_delete_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.DeleteObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Make the request + await client.delete_object(request=request) + + +# [END storage_v2_generated_Storage_DeleteObject_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_delete_object_sync.py b/samples/generated_samples/storage_v2_generated_storage_delete_object_sync.py new file mode 100644 index 000000000..cc2b9d68e --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_delete_object_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_DeleteObject_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_delete_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.DeleteObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Make the request + client.delete_object(request=request) + + +# [END storage_v2_generated_Storage_DeleteObject_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_get_bucket_async.py b/samples/generated_samples/storage_v2_generated_storage_get_bucket_async.py new file mode 100644 index 000000000..25816a892 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_get_bucket_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBucket +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_GetBucket_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_get_bucket(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.GetBucketRequest( + name="name_value", + ) + + # Make the request + response = await client.get_bucket(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_GetBucket_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_get_bucket_sync.py b/samples/generated_samples/storage_v2_generated_storage_get_bucket_sync.py new file mode 100644 index 000000000..2b2af4f60 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_get_bucket_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBucket +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_GetBucket_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_get_bucket(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.GetBucketRequest( + name="name_value", + ) + + # Make the request + response = client.get_bucket(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_GetBucket_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_get_iam_policy_async.py b/samples/generated_samples/storage_v2_generated_storage_get_iam_policy_async.py new file mode 100644 index 000000000..a7920496b --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_get_iam_policy_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_GetIamPolicy_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_get_iam_policy(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.get_iam_policy(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_GetIamPolicy_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_get_iam_policy_sync.py b/samples/generated_samples/storage_v2_generated_storage_get_iam_policy_sync.py new file mode 100644 index 000000000..a63fd2927 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_get_iam_policy_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_GetIamPolicy_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_get_iam_policy(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.get_iam_policy(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_GetIamPolicy_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_get_object_async.py b/samples/generated_samples/storage_v2_generated_storage_get_object_async.py new file mode 100644 index 000000000..656c7e96f --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_get_object_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_GetObject_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_get_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.GetObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Make the request + response = await client.get_object(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_GetObject_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_get_object_sync.py b/samples/generated_samples/storage_v2_generated_storage_get_object_sync.py new file mode 100644 index 000000000..6611ee9e7 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_get_object_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_GetObject_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_get_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.GetObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Make the request + response = client.get_object(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_GetObject_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_list_buckets_async.py b/samples/generated_samples/storage_v2_generated_storage_list_buckets_async.py new file mode 100644 index 000000000..0a51045b9 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_list_buckets_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBuckets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_ListBuckets_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_list_buckets(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.ListBucketsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_buckets(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END storage_v2_generated_Storage_ListBuckets_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_list_buckets_sync.py b/samples/generated_samples/storage_v2_generated_storage_list_buckets_sync.py new file mode 100644 index 000000000..e40cafec7 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_list_buckets_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBuckets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_ListBuckets_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_list_buckets(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.ListBucketsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_buckets(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END storage_v2_generated_Storage_ListBuckets_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_list_objects_async.py b/samples/generated_samples/storage_v2_generated_storage_list_objects_async.py new file mode 100644 index 000000000..c7f2db4fd --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_list_objects_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListObjects +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_ListObjects_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_list_objects(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.ListObjectsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_objects(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END storage_v2_generated_Storage_ListObjects_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_list_objects_sync.py b/samples/generated_samples/storage_v2_generated_storage_list_objects_sync.py new file mode 100644 index 000000000..1a33f0eef --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_list_objects_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListObjects +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_ListObjects_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_list_objects(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.ListObjectsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_objects(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END storage_v2_generated_Storage_ListObjects_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_lock_bucket_retention_policy_async.py b/samples/generated_samples/storage_v2_generated_storage_lock_bucket_retention_policy_async.py new file mode 100644 index 000000000..cc307095b --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_lock_bucket_retention_policy_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for LockBucketRetentionPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_LockBucketRetentionPolicy_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_lock_bucket_retention_policy(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.LockBucketRetentionPolicyRequest( + bucket="bucket_value", + if_metageneration_match=2413, + ) + + # Make the request + response = await client.lock_bucket_retention_policy(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_LockBucketRetentionPolicy_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_lock_bucket_retention_policy_sync.py b/samples/generated_samples/storage_v2_generated_storage_lock_bucket_retention_policy_sync.py new file mode 100644 index 000000000..e80bb13c5 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_lock_bucket_retention_policy_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for LockBucketRetentionPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_LockBucketRetentionPolicy_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_lock_bucket_retention_policy(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.LockBucketRetentionPolicyRequest( + bucket="bucket_value", + if_metageneration_match=2413, + ) + + # Make the request + response = client.lock_bucket_retention_policy(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_LockBucketRetentionPolicy_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_move_object_async.py b/samples/generated_samples/storage_v2_generated_storage_move_object_async.py new file mode 100644 index 000000000..cb8c7b3f4 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_move_object_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MoveObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_MoveObject_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_move_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.MoveObjectRequest( + bucket="bucket_value", + source_object="source_object_value", + destination_object="destination_object_value", + ) + + # Make the request + response = await client.move_object(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_MoveObject_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_move_object_sync.py b/samples/generated_samples/storage_v2_generated_storage_move_object_sync.py new file mode 100644 index 000000000..8f8b3a2c7 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_move_object_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MoveObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_MoveObject_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_move_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.MoveObjectRequest( + bucket="bucket_value", + source_object="source_object_value", + destination_object="destination_object_value", + ) + + # Make the request + response = client.move_object(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_MoveObject_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_query_write_status_async.py b/samples/generated_samples/storage_v2_generated_storage_query_write_status_async.py new file mode 100644 index 000000000..408054740 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_query_write_status_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryWriteStatus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_QueryWriteStatus_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_query_write_status(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.QueryWriteStatusRequest( + upload_id="upload_id_value", + ) + + # Make the request + response = await client.query_write_status(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_QueryWriteStatus_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_query_write_status_sync.py b/samples/generated_samples/storage_v2_generated_storage_query_write_status_sync.py new file mode 100644 index 000000000..0c88f0f72 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_query_write_status_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryWriteStatus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_QueryWriteStatus_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_query_write_status(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.QueryWriteStatusRequest( + upload_id="upload_id_value", + ) + + # Make the request + response = client.query_write_status(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_QueryWriteStatus_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_read_object_async.py b/samples/generated_samples/storage_v2_generated_storage_read_object_async.py new file mode 100644 index 000000000..3fb95e47d --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_read_object_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_ReadObject_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_read_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.ReadObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Make the request + stream = await client.read_object(request=request) + + # Handle the response + async for response in stream: + print(response) + +# [END storage_v2_generated_Storage_ReadObject_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_read_object_sync.py b/samples/generated_samples/storage_v2_generated_storage_read_object_sync.py new file mode 100644 index 000000000..a188aeca3 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_read_object_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_ReadObject_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_read_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.ReadObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Make the request + stream = client.read_object(request=request) + + # Handle the response + for response in stream: + print(response) + +# [END storage_v2_generated_Storage_ReadObject_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_restore_object_async.py b/samples/generated_samples/storage_v2_generated_storage_restore_object_async.py new file mode 100644 index 000000000..13c1de2e1 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_restore_object_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RestoreObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_RestoreObject_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_restore_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.RestoreObjectRequest( + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + # Make the request + response = await client.restore_object(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_RestoreObject_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_restore_object_sync.py b/samples/generated_samples/storage_v2_generated_storage_restore_object_sync.py new file mode 100644 index 000000000..dde9f9a30 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_restore_object_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RestoreObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_RestoreObject_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_restore_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.RestoreObjectRequest( + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + # Make the request + response = client.restore_object(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_RestoreObject_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_rewrite_object_async.py b/samples/generated_samples/storage_v2_generated_storage_rewrite_object_async.py new file mode 100644 index 000000000..e8d676c4a --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_rewrite_object_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RewriteObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_RewriteObject_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_rewrite_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.RewriteObjectRequest( + destination_name="destination_name_value", + destination_bucket="destination_bucket_value", + source_bucket="source_bucket_value", + source_object="source_object_value", + ) + + # Make the request + response = await client.rewrite_object(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_RewriteObject_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_rewrite_object_sync.py b/samples/generated_samples/storage_v2_generated_storage_rewrite_object_sync.py new file mode 100644 index 000000000..9c4c78cc3 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_rewrite_object_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RewriteObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_RewriteObject_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_rewrite_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.RewriteObjectRequest( + destination_name="destination_name_value", + destination_bucket="destination_bucket_value", + source_bucket="source_bucket_value", + source_object="source_object_value", + ) + + # Make the request + response = client.rewrite_object(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_RewriteObject_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_set_iam_policy_async.py b/samples/generated_samples/storage_v2_generated_storage_set_iam_policy_async.py new file mode 100644 index 000000000..06a9c50fb --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_set_iam_policy_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_SetIamPolicy_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_set_iam_policy(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.set_iam_policy(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_SetIamPolicy_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_set_iam_policy_sync.py b/samples/generated_samples/storage_v2_generated_storage_set_iam_policy_sync.py new file mode 100644 index 000000000..2b3c0c6bc --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_set_iam_policy_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_SetIamPolicy_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_set_iam_policy(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.set_iam_policy(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_SetIamPolicy_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_start_resumable_write_async.py b/samples/generated_samples/storage_v2_generated_storage_start_resumable_write_async.py new file mode 100644 index 000000000..673a05e97 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_start_resumable_write_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StartResumableWrite +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_StartResumableWrite_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_start_resumable_write(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.StartResumableWriteRequest( + ) + + # Make the request + response = await client.start_resumable_write(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_StartResumableWrite_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_start_resumable_write_sync.py b/samples/generated_samples/storage_v2_generated_storage_start_resumable_write_sync.py new file mode 100644 index 000000000..6f202ee2e --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_start_resumable_write_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StartResumableWrite +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_StartResumableWrite_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_start_resumable_write(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.StartResumableWriteRequest( + ) + + # Make the request + response = client.start_resumable_write(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_StartResumableWrite_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_test_iam_permissions_async.py b/samples/generated_samples/storage_v2_generated_storage_test_iam_permissions_async.py new file mode 100644 index 000000000..dd4818f83 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_test_iam_permissions_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for TestIamPermissions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_TestIamPermissions_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_test_iam_permissions(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = await client.test_iam_permissions(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_TestIamPermissions_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_test_iam_permissions_sync.py b/samples/generated_samples/storage_v2_generated_storage_test_iam_permissions_sync.py new file mode 100644 index 000000000..e66d50d92 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_test_iam_permissions_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for TestIamPermissions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_TestIamPermissions_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_test_iam_permissions(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = client.test_iam_permissions(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_TestIamPermissions_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_update_bucket_async.py b/samples/generated_samples/storage_v2_generated_storage_update_bucket_async.py new file mode 100644 index 000000000..bd30f9440 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_update_bucket_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateBucket +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_UpdateBucket_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_update_bucket(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.UpdateBucketRequest( + ) + + # Make the request + response = await client.update_bucket(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_UpdateBucket_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_update_bucket_sync.py b/samples/generated_samples/storage_v2_generated_storage_update_bucket_sync.py new file mode 100644 index 000000000..2ca346ec5 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_update_bucket_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateBucket +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_UpdateBucket_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_update_bucket(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.UpdateBucketRequest( + ) + + # Make the request + response = client.update_bucket(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_UpdateBucket_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_update_object_async.py b/samples/generated_samples/storage_v2_generated_storage_update_object_async.py new file mode 100644 index 000000000..835840a30 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_update_object_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_UpdateObject_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_update_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.UpdateObjectRequest( + ) + + # Make the request + response = await client.update_object(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_UpdateObject_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_update_object_sync.py b/samples/generated_samples/storage_v2_generated_storage_update_object_sync.py new file mode 100644 index 000000000..2c70255b1 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_update_object_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_UpdateObject_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_update_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.UpdateObjectRequest( + ) + + # Make the request + response = client.update_object(request=request) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_UpdateObject_sync] diff --git a/samples/generated_samples/storage_v2_generated_storage_write_object_async.py b/samples/generated_samples/storage_v2_generated_storage_write_object_async.py new file mode 100644 index 000000000..33b8fb286 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_write_object_async.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_WriteObject_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +async def sample_write_object(): + # Create a client + client = storage_v2.StorageAsyncClient() + + # Initialize request argument(s) + request = storage_v2.WriteObjectRequest( + upload_id="upload_id_value", + write_offset=1297, + ) + + # This method expects an iterator which contains + # 'storage_v2.WriteObjectRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + response = await client.write_object(requests=request_generator()) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_WriteObject_async] diff --git a/samples/generated_samples/storage_v2_generated_storage_write_object_sync.py b/samples/generated_samples/storage_v2_generated_storage_write_object_sync.py new file mode 100644 index 000000000..c24150533 --- /dev/null +++ b/samples/generated_samples/storage_v2_generated_storage_write_object_sync.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteObject +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-storage + + +# [START storage_v2_generated_Storage_WriteObject_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import storage_v2 + + +def sample_write_object(): + # Create a client + client = storage_v2.StorageClient() + + # Initialize request argument(s) + request = storage_v2.WriteObjectRequest( + upload_id="upload_id_value", + write_offset=1297, + ) + + # This method expects an iterator which contains + # 'storage_v2.WriteObjectRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + response = client.write_object(requests=request_generator()) + + # Handle the response + print(response) + +# [END storage_v2_generated_Storage_WriteObject_sync] diff --git a/testing/constraints-3.10.txt b/testing/constraints-3.10.txt index e69de29bb..ad3f0fa58 100644 --- a/testing/constraints-3.10.txt +++ b/testing/constraints-3.10.txt @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf +grpc-google-iam-v1 diff --git a/testing/constraints-3.11.txt b/testing/constraints-3.11.txt index e69de29bb..ad3f0fa58 100644 --- a/testing/constraints-3.11.txt +++ b/testing/constraints-3.11.txt @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf +grpc-google-iam-v1 diff --git a/testing/constraints-3.12.txt b/testing/constraints-3.12.txt index e69de29bb..ad3f0fa58 100644 --- a/testing/constraints-3.12.txt +++ b/testing/constraints-3.12.txt @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf +grpc-google-iam-v1 diff --git a/testing/constraints-3.13.txt b/testing/constraints-3.13.txt index e69de29bb..2010e549c 100644 --- a/testing/constraints-3.13.txt +++ b/testing/constraints-3.13.txt @@ -0,0 +1,12 @@ +# We use the constraints file for the latest Python version +# (currently this file) to check that the latest +# major versions of dependencies are supported in setup.py. +# List all library dependencies and extras in this file. +# Require the latest major version be installed for each dependency. +# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0", +# Then this file should have google-cloud-foo>=1 +google-api-core>=2 +google-auth>=2 +proto-plus>=1 +protobuf>=6 +grpc-google-iam-v1>=0 diff --git a/testing/constraints-3.8.txt b/testing/constraints-3.8.txt index e69de29bb..ad3f0fa58 100644 --- a/testing/constraints-3.8.txt +++ b/testing/constraints-3.8.txt @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf +grpc-google-iam-v1 diff --git a/tests/__init__.py b/tests/__init__.py index e69de29bb..cbf94b283 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/gapic/__init__.py b/tests/unit/gapic/__init__.py new file mode 100644 index 000000000..cbf94b283 --- /dev/null +++ b/tests/unit/gapic/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/gapic/storage_v2/__init__.py b/tests/unit/gapic/storage_v2/__init__.py new file mode 100644 index 000000000..cbf94b283 --- /dev/null +++ b/tests/unit/gapic/storage_v2/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/gapic/storage_v2/test_storage.py b/tests/unit/gapic/storage_v2/test_storage.py new file mode 100644 index 000000000..4869eba7f --- /dev/null +++ b/tests/unit/gapic/storage_v2/test_storage.py @@ -0,0 +1,11512 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +import json +import math +import pytest +from google.api_core import api_core_version +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers + +try: + from google.auth.aio import credentials as ga_credentials_async + + HAS_GOOGLE_AUTH_AIO = True +except ImportError: # pragma: NO COVER + HAS_GOOGLE_AUTH_AIO = False + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.storage_v2.services.storage import StorageAsyncClient +from google.cloud.storage_v2.services.storage import StorageClient +from google.cloud.storage_v2.services.storage import pagers +from google.cloud.storage_v2.services.storage import transports +from google.cloud.storage_v2.types import storage +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.type import date_pb2 # type: ignore +from google.type import expr_pb2 # type: ignore +import google.auth + + +CRED_INFO_JSON = { + "credential_source": "/path/to/file", + "credential_type": "service account credentials", + "principal": "service-account@example.com", +} +CRED_INFO_STRING = json.dumps(CRED_INFO_JSON) + + +async def mock_async_gen(data, chunk_size=1): + for i in range(0, len(data)): # pragma: NO COVER + chunk = data[i : i + chunk_size] + yield chunk.encode("utf-8") + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded. +# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107. +def async_anonymous_credentials(): + if HAS_GOOGLE_AUTH_AIO: + return ga_credentials_async.AnonymousCredentials() + return ga_credentials.AnonymousCredentials() + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert StorageClient._get_default_mtls_endpoint(None) is None + assert StorageClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ( + StorageClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + ) + assert ( + StorageClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + StorageClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert StorageClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +def test__read_environment_variables(): + assert StorageClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert StorageClient._read_environment_variables() == (True, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert StorageClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + StorageClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert StorageClient._read_environment_variables() == (False, "never", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert StorageClient._read_environment_variables() == (False, "always", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert StorageClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + StorageClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert StorageClient._read_environment_variables() == (False, "auto", "foo.com") + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert StorageClient._get_client_cert_source(None, False) is None + assert ( + StorageClient._get_client_cert_source(mock_provided_cert_source, False) is None + ) + assert ( + StorageClient._get_client_cert_source(mock_provided_cert_source, True) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + StorageClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + StorageClient._get_client_cert_source(mock_provided_cert_source, "true") + is mock_provided_cert_source + ) + + +@mock.patch.object( + StorageClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(StorageClient), +) +@mock.patch.object( + StorageAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(StorageAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = StorageClient._DEFAULT_UNIVERSE + default_endpoint = StorageClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = StorageClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + StorageClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + StorageClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == StorageClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + StorageClient._get_api_endpoint(None, None, default_universe, "auto") + == default_endpoint + ) + assert ( + StorageClient._get_api_endpoint(None, None, default_universe, "always") + == StorageClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + StorageClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == StorageClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + StorageClient._get_api_endpoint(None, None, mock_universe, "never") + == mock_endpoint + ) + assert ( + StorageClient._get_api_endpoint(None, None, default_universe, "never") + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + StorageClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + StorageClient._get_universe_domain(client_universe_domain, universe_domain_env) + == client_universe_domain + ) + assert ( + StorageClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + StorageClient._get_universe_domain(None, None) + == StorageClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + StorageClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "error_code,cred_info_json,show_cred_info", + [ + (401, CRED_INFO_JSON, True), + (403, CRED_INFO_JSON, True), + (404, CRED_INFO_JSON, True), + (500, CRED_INFO_JSON, False), + (401, None, False), + (403, None, False), + (404, None, False), + (500, None, False), + ], +) +def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): + cred = mock.Mock(["get_cred_info"]) + cred.get_cred_info = mock.Mock(return_value=cred_info_json) + client = StorageClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=["foo"]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + if show_cred_info: + assert error.details == ["foo", CRED_INFO_STRING] + else: + assert error.details == ["foo"] + + +@pytest.mark.parametrize("error_code", [401, 403, 404, 500]) +def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): + cred = mock.Mock([]) + assert not hasattr(cred, "get_cred_info") + client = StorageClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=[]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + assert error.details == [] + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (StorageClient, "grpc"), + (StorageAsyncClient, "grpc_asyncio"), + ], +) +def test_storage_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ("storage.googleapis.com:443") + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.StorageGrpcTransport, "grpc"), + (transports.StorageGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_storage_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (StorageClient, "grpc"), + (StorageAsyncClient, "grpc_asyncio"), + ], +) +def test_storage_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ("storage.googleapis.com:443") + + +def test_storage_client_get_transport_class(): + transport = StorageClient.get_transport_class() + available_transports = [ + transports.StorageGrpcTransport, + ] + assert transport in available_transports + + transport = StorageClient.get_transport_class("grpc") + assert transport == transports.StorageGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (StorageClient, transports.StorageGrpcTransport, "grpc"), + (StorageAsyncClient, transports.StorageGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +@mock.patch.object( + StorageClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(StorageClient), +) +@mock.patch.object( + StorageAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(StorageAsyncClient), +) +def test_storage_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(StorageClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(StorageClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (StorageClient, transports.StorageGrpcTransport, "grpc", "true"), + ( + StorageAsyncClient, + transports.StorageGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (StorageClient, transports.StorageGrpcTransport, "grpc", "false"), + ( + StorageAsyncClient, + transports.StorageGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + StorageClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(StorageClient), +) +@mock.patch.object( + StorageAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(StorageAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_storage_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class", [StorageClient, StorageAsyncClient]) +@mock.patch.object( + StorageClient, "DEFAULT_ENDPOINT", modify_default_endpoint(StorageClient) +) +@mock.patch.object( + StorageAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(StorageAsyncClient) +) +def test_storage_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize("client_class", [StorageClient, StorageAsyncClient]) +@mock.patch.object( + StorageClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(StorageClient), +) +@mock.patch.object( + StorageAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(StorageAsyncClient), +) +def test_storage_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = StorageClient._DEFAULT_UNIVERSE + default_endpoint = StorageClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = StorageClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (StorageClient, transports.StorageGrpcTransport, "grpc"), + (StorageAsyncClient, transports.StorageGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_storage_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + (StorageClient, transports.StorageGrpcTransport, "grpc", grpc_helpers), + ( + StorageAsyncClient, + transports.StorageGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_storage_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_storage_client_client_options_from_dict(): + with mock.patch( + "google.cloud.storage_v2.services.storage.transports.StorageGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = StorageClient(client_options={"api_endpoint": "squid.clam.whelk"}) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + (StorageClient, transports.StorageGrpcTransport, "grpc", grpc_helpers), + ( + StorageAsyncClient, + transports.StorageGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_storage_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "storage.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + ), + scopes=None, + default_host="storage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.DeleteBucketRequest, + dict, + ], +) +def test_delete_bucket(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.DeleteBucketRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_bucket_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.DeleteBucketRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_bucket), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_bucket(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.DeleteBucketRequest( + name="name_value", + ) + + +def test_delete_bucket_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_bucket in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_bucket] = mock_rpc + request = {} + client.delete_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_bucket(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_bucket_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_bucket + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_bucket + ] = mock_rpc + + request = {} + await client.delete_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_bucket(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_bucket_async( + transport: str = "grpc_asyncio", request_type=storage.DeleteBucketRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.DeleteBucketRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_bucket_async_from_dict(): + await test_delete_bucket_async(request_type=dict) + + +def test_delete_bucket_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_bucket( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_bucket_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_bucket( + storage.DeleteBucketRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_bucket_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_bucket( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_bucket_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_bucket( + storage.DeleteBucketRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.GetBucketRequest, + dict, + ], +) +def test_get_bucket(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + response = client.get_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.GetBucketRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Bucket) + assert response.name == "name_value" + assert response.bucket_id == "bucket_id_value" + assert response.etag == "etag_value" + assert response.project == "project_value" + assert response.metageneration == 1491 + assert response.location == "location_value" + assert response.location_type == "location_type_value" + assert response.storage_class == "storage_class_value" + assert response.rpo == "rpo_value" + assert response.default_event_based_hold is True + assert response.satisfies_pzs is True + + +def test_get_bucket_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.GetBucketRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_bucket), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_bucket(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.GetBucketRequest( + name="name_value", + ) + + +def test_get_bucket_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_bucket in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_bucket] = mock_rpc + request = {} + client.get_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_bucket(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_bucket_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_bucket + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_bucket + ] = mock_rpc + + request = {} + await client.get_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_bucket(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_bucket_async( + transport: str = "grpc_asyncio", request_type=storage.GetBucketRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + ) + response = await client.get_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.GetBucketRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Bucket) + assert response.name == "name_value" + assert response.bucket_id == "bucket_id_value" + assert response.etag == "etag_value" + assert response.project == "project_value" + assert response.metageneration == 1491 + assert response.location == "location_value" + assert response.location_type == "location_type_value" + assert response.storage_class == "storage_class_value" + assert response.rpo == "rpo_value" + assert response.default_event_based_hold is True + assert response.satisfies_pzs is True + + +@pytest.mark.asyncio +async def test_get_bucket_async_from_dict(): + await test_get_bucket_async(request_type=dict) + + +def test_get_bucket_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Bucket() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_bucket( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_bucket_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_bucket( + storage.GetBucketRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_bucket_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Bucket() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(storage.Bucket()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_bucket( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_bucket_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_bucket( + storage.GetBucketRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.CreateBucketRequest, + dict, + ], +) +def test_create_bucket(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + response = client.create_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.CreateBucketRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Bucket) + assert response.name == "name_value" + assert response.bucket_id == "bucket_id_value" + assert response.etag == "etag_value" + assert response.project == "project_value" + assert response.metageneration == 1491 + assert response.location == "location_value" + assert response.location_type == "location_type_value" + assert response.storage_class == "storage_class_value" + assert response.rpo == "rpo_value" + assert response.default_event_based_hold is True + assert response.satisfies_pzs is True + + +def test_create_bucket_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.CreateBucketRequest( + parent="parent_value", + bucket_id="bucket_id_value", + predefined_acl="predefined_acl_value", + predefined_default_object_acl="predefined_default_object_acl_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_bucket), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_bucket(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.CreateBucketRequest( + parent="parent_value", + bucket_id="bucket_id_value", + predefined_acl="predefined_acl_value", + predefined_default_object_acl="predefined_default_object_acl_value", + ) + + +def test_create_bucket_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_bucket in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_bucket] = mock_rpc + request = {} + client.create_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.create_bucket(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_bucket_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_bucket + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_bucket + ] = mock_rpc + + request = {} + await client.create_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.create_bucket(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_bucket_async( + transport: str = "grpc_asyncio", request_type=storage.CreateBucketRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + ) + response = await client.create_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.CreateBucketRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Bucket) + assert response.name == "name_value" + assert response.bucket_id == "bucket_id_value" + assert response.etag == "etag_value" + assert response.project == "project_value" + assert response.metageneration == 1491 + assert response.location == "location_value" + assert response.location_type == "location_type_value" + assert response.storage_class == "storage_class_value" + assert response.rpo == "rpo_value" + assert response.default_event_based_hold is True + assert response.satisfies_pzs is True + + +@pytest.mark.asyncio +async def test_create_bucket_async_from_dict(): + await test_create_bucket_async(request_type=dict) + + +def test_create_bucket_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Bucket() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_bucket( + parent="parent_value", + bucket=storage.Bucket(name="name_value"), + bucket_id="bucket_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].bucket + mock_val = storage.Bucket(name="name_value") + assert arg == mock_val + arg = args[0].bucket_id + mock_val = "bucket_id_value" + assert arg == mock_val + + +def test_create_bucket_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_bucket( + storage.CreateBucketRequest(), + parent="parent_value", + bucket=storage.Bucket(name="name_value"), + bucket_id="bucket_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_bucket_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Bucket() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(storage.Bucket()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_bucket( + parent="parent_value", + bucket=storage.Bucket(name="name_value"), + bucket_id="bucket_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].bucket + mock_val = storage.Bucket(name="name_value") + assert arg == mock_val + arg = args[0].bucket_id + mock_val = "bucket_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_bucket_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_bucket( + storage.CreateBucketRequest(), + parent="parent_value", + bucket=storage.Bucket(name="name_value"), + bucket_id="bucket_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.ListBucketsRequest, + dict, + ], +) +def test_list_buckets(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.ListBucketsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_buckets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.ListBucketsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBucketsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_buckets_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.ListBucketsRequest( + parent="parent_value", + page_token="page_token_value", + prefix="prefix_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_buckets(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.ListBucketsRequest( + parent="parent_value", + page_token="page_token_value", + prefix="prefix_value", + ) + + +def test_list_buckets_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_buckets in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_buckets] = mock_rpc + request = {} + client.list_buckets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_buckets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_buckets_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_buckets + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_buckets + ] = mock_rpc + + request = {} + await client.list_buckets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_buckets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_buckets_async( + transport: str = "grpc_asyncio", request_type=storage.ListBucketsRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.ListBucketsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_buckets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.ListBucketsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBucketsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_buckets_async_from_dict(): + await test_list_buckets_async(request_type=dict) + + +def test_list_buckets_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.ListBucketsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_buckets( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_buckets_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_buckets( + storage.ListBucketsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_buckets_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.ListBucketsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.ListBucketsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_buckets( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_buckets_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_buckets( + storage.ListBucketsRequest(), + parent="parent_value", + ) + + +def test_list_buckets_pager(transport_name: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + storage.ListBucketsResponse( + buckets=[ + storage.Bucket(), + storage.Bucket(), + storage.Bucket(), + ], + next_page_token="abc", + ), + storage.ListBucketsResponse( + buckets=[], + next_page_token="def", + ), + storage.ListBucketsResponse( + buckets=[ + storage.Bucket(), + ], + next_page_token="ghi", + ), + storage.ListBucketsResponse( + buckets=[ + storage.Bucket(), + storage.Bucket(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + pager = client.list_buckets(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, storage.Bucket) for i in results) + + +def test_list_buckets_pages(transport_name: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + storage.ListBucketsResponse( + buckets=[ + storage.Bucket(), + storage.Bucket(), + storage.Bucket(), + ], + next_page_token="abc", + ), + storage.ListBucketsResponse( + buckets=[], + next_page_token="def", + ), + storage.ListBucketsResponse( + buckets=[ + storage.Bucket(), + ], + next_page_token="ghi", + ), + storage.ListBucketsResponse( + buckets=[ + storage.Bucket(), + storage.Bucket(), + ], + ), + RuntimeError, + ) + pages = list(client.list_buckets(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_buckets_async_pager(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_buckets), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + storage.ListBucketsResponse( + buckets=[ + storage.Bucket(), + storage.Bucket(), + storage.Bucket(), + ], + next_page_token="abc", + ), + storage.ListBucketsResponse( + buckets=[], + next_page_token="def", + ), + storage.ListBucketsResponse( + buckets=[ + storage.Bucket(), + ], + next_page_token="ghi", + ), + storage.ListBucketsResponse( + buckets=[ + storage.Bucket(), + storage.Bucket(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_buckets( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, storage.Bucket) for i in responses) + + +@pytest.mark.asyncio +async def test_list_buckets_async_pages(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_buckets), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + storage.ListBucketsResponse( + buckets=[ + storage.Bucket(), + storage.Bucket(), + storage.Bucket(), + ], + next_page_token="abc", + ), + storage.ListBucketsResponse( + buckets=[], + next_page_token="def", + ), + storage.ListBucketsResponse( + buckets=[ + storage.Bucket(), + ], + next_page_token="ghi", + ), + storage.ListBucketsResponse( + buckets=[ + storage.Bucket(), + storage.Bucket(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_buckets(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + storage.LockBucketRetentionPolicyRequest, + dict, + ], +) +def test_lock_bucket_retention_policy(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lock_bucket_retention_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + response = client.lock_bucket_retention_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.LockBucketRetentionPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Bucket) + assert response.name == "name_value" + assert response.bucket_id == "bucket_id_value" + assert response.etag == "etag_value" + assert response.project == "project_value" + assert response.metageneration == 1491 + assert response.location == "location_value" + assert response.location_type == "location_type_value" + assert response.storage_class == "storage_class_value" + assert response.rpo == "rpo_value" + assert response.default_event_based_hold is True + assert response.satisfies_pzs is True + + +def test_lock_bucket_retention_policy_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.LockBucketRetentionPolicyRequest( + bucket="bucket_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lock_bucket_retention_policy), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.lock_bucket_retention_policy(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.LockBucketRetentionPolicyRequest( + bucket="bucket_value", + ) + + +def test_lock_bucket_retention_policy_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.lock_bucket_retention_policy + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.lock_bucket_retention_policy + ] = mock_rpc + request = {} + client.lock_bucket_retention_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.lock_bucket_retention_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_lock_bucket_retention_policy_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.lock_bucket_retention_policy + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.lock_bucket_retention_policy + ] = mock_rpc + + request = {} + await client.lock_bucket_retention_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.lock_bucket_retention_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_lock_bucket_retention_policy_async( + transport: str = "grpc_asyncio", + request_type=storage.LockBucketRetentionPolicyRequest, +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lock_bucket_retention_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + ) + response = await client.lock_bucket_retention_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.LockBucketRetentionPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Bucket) + assert response.name == "name_value" + assert response.bucket_id == "bucket_id_value" + assert response.etag == "etag_value" + assert response.project == "project_value" + assert response.metageneration == 1491 + assert response.location == "location_value" + assert response.location_type == "location_type_value" + assert response.storage_class == "storage_class_value" + assert response.rpo == "rpo_value" + assert response.default_event_based_hold is True + assert response.satisfies_pzs is True + + +@pytest.mark.asyncio +async def test_lock_bucket_retention_policy_async_from_dict(): + await test_lock_bucket_retention_policy_async(request_type=dict) + + +def test_lock_bucket_retention_policy_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lock_bucket_retention_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Bucket() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.lock_bucket_retention_policy( + bucket="bucket_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = "bucket_value" + assert arg == mock_val + + +def test_lock_bucket_retention_policy_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.lock_bucket_retention_policy( + storage.LockBucketRetentionPolicyRequest(), + bucket="bucket_value", + ) + + +@pytest.mark.asyncio +async def test_lock_bucket_retention_policy_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lock_bucket_retention_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Bucket() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(storage.Bucket()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.lock_bucket_retention_policy( + bucket="bucket_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = "bucket_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_lock_bucket_retention_policy_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.lock_bucket_retention_policy( + storage.LockBucketRetentionPolicyRequest(), + bucket="bucket_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_iam_policy(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + +def test_get_iam_policy_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc + request = {} + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_iam_policy + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_iam_policy + ] = mock_rpc + + request = {} + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_from_dict(): + await test_get_iam_policy_async(request_type=dict) + + +def test_get_iam_policy_from_dict_foreign(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_get_iam_policy_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_iam_policy( + resource="resource_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + + +def test_get_iam_policy_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_iam_policy( + resource="resource_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.set_iam_policy(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + +def test_set_iam_policy_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.set_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc + request = {} + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.set_iam_policy + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.set_iam_policy + ] = mock_rpc + + request = {} + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_set_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_from_dict(): + await test_set_iam_policy_async(request_type=dict) + + +def test_set_iam_policy_from_dict_foreign(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + "update_mask": field_mask_pb2.FieldMask(paths=["paths_value"]), + } + ) + call.assert_called() + + +def test_set_iam_policy_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_iam_policy( + resource="resource_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + + +def test_set_iam_policy_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_iam_policy( + resource="resource_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.test_iam_permissions(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + ) + + +def test_test_iam_permissions_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.test_iam_permissions in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.test_iam_permissions + ] = mock_rpc + request = {} + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.test_iam_permissions + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.test_iam_permissions + ] = mock_rpc + + request = {} + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async( + transport: str = "grpc_asyncio", + request_type=iam_policy_pb2.TestIamPermissionsRequest, +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_from_dict(): + await test_test_iam_permissions_async(request_type=dict) + + +def test_test_iam_permissions_from_dict_foreign(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_test_iam_permissions_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.test_iam_permissions( + resource="resource_value", + permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + arg = args[0].permissions + mock_val = ["permissions_value"] + assert arg == mock_val + + +def test_test_iam_permissions_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.test_iam_permissions( + resource="resource_value", + permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + arg = args[0].permissions + mock_val = ["permissions_value"] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.UpdateBucketRequest, + dict, + ], +) +def test_update_bucket(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + response = client.update_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.UpdateBucketRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Bucket) + assert response.name == "name_value" + assert response.bucket_id == "bucket_id_value" + assert response.etag == "etag_value" + assert response.project == "project_value" + assert response.metageneration == 1491 + assert response.location == "location_value" + assert response.location_type == "location_type_value" + assert response.storage_class == "storage_class_value" + assert response.rpo == "rpo_value" + assert response.default_event_based_hold is True + assert response.satisfies_pzs is True + + +def test_update_bucket_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.UpdateBucketRequest( + predefined_acl="predefined_acl_value", + predefined_default_object_acl="predefined_default_object_acl_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_bucket), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_bucket(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.UpdateBucketRequest( + predefined_acl="predefined_acl_value", + predefined_default_object_acl="predefined_default_object_acl_value", + ) + + +def test_update_bucket_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_bucket in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_bucket] = mock_rpc + request = {} + client.update_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_bucket(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_bucket_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_bucket + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_bucket + ] = mock_rpc + + request = {} + await client.update_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.update_bucket(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_bucket_async( + transport: str = "grpc_asyncio", request_type=storage.UpdateBucketRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + ) + response = await client.update_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.UpdateBucketRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Bucket) + assert response.name == "name_value" + assert response.bucket_id == "bucket_id_value" + assert response.etag == "etag_value" + assert response.project == "project_value" + assert response.metageneration == 1491 + assert response.location == "location_value" + assert response.location_type == "location_type_value" + assert response.storage_class == "storage_class_value" + assert response.rpo == "rpo_value" + assert response.default_event_based_hold is True + assert response.satisfies_pzs is True + + +@pytest.mark.asyncio +async def test_update_bucket_async_from_dict(): + await test_update_bucket_async(request_type=dict) + + +def test_update_bucket_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Bucket() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_bucket( + bucket=storage.Bucket(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = storage.Bucket(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_bucket_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_bucket( + storage.UpdateBucketRequest(), + bucket=storage.Bucket(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_bucket_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Bucket() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(storage.Bucket()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_bucket( + bucket=storage.Bucket(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = storage.Bucket(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_bucket_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_bucket( + storage.UpdateBucketRequest(), + bucket=storage.Bucket(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.ComposeObjectRequest, + dict, + ], +) +def test_compose_object(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.compose_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + response = client.compose_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.ComposeObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Object) + assert response.name == "name_value" + assert response.bucket == "bucket_value" + assert response.etag == "etag_value" + assert response.generation == 1068 + assert response.restore_token == "restore_token_value" + assert response.metageneration == 1491 + assert response.storage_class == "storage_class_value" + assert response.size == 443 + assert response.content_encoding == "content_encoding_value" + assert response.content_disposition == "content_disposition_value" + assert response.cache_control == "cache_control_value" + assert response.content_language == "content_language_value" + assert response.content_type == "content_type_value" + assert response.component_count == 1627 + assert response.kms_key == "kms_key_value" + assert response.temporary_hold is True + assert response.event_based_hold is True + + +def test_compose_object_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.ComposeObjectRequest( + destination_predefined_acl="destination_predefined_acl_value", + kms_key="kms_key_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.compose_object), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.compose_object(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.ComposeObjectRequest( + destination_predefined_acl="destination_predefined_acl_value", + kms_key="kms_key_value", + ) + + +def test_compose_object_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.compose_object in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.compose_object] = mock_rpc + request = {} + client.compose_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.compose_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_compose_object_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.compose_object + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.compose_object + ] = mock_rpc + + request = {} + await client.compose_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.compose_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_compose_object_async( + transport: str = "grpc_asyncio", request_type=storage.ComposeObjectRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.compose_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + response = await client.compose_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.ComposeObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Object) + assert response.name == "name_value" + assert response.bucket == "bucket_value" + assert response.etag == "etag_value" + assert response.generation == 1068 + assert response.restore_token == "restore_token_value" + assert response.metageneration == 1491 + assert response.storage_class == "storage_class_value" + assert response.size == 443 + assert response.content_encoding == "content_encoding_value" + assert response.content_disposition == "content_disposition_value" + assert response.cache_control == "cache_control_value" + assert response.content_language == "content_language_value" + assert response.content_type == "content_type_value" + assert response.component_count == 1627 + assert response.kms_key == "kms_key_value" + assert response.temporary_hold is True + assert response.event_based_hold is True + + +@pytest.mark.asyncio +async def test_compose_object_async_from_dict(): + await test_compose_object_async(request_type=dict) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.DeleteObjectRequest, + dict, + ], +) +def test_delete_object(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.DeleteObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_object_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.DeleteObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_object), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_object(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.DeleteObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + +def test_delete_object_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_object in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_object] = mock_rpc + request = {} + client.delete_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_object_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_object + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_object + ] = mock_rpc + + request = {} + await client.delete_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_object_async( + transport: str = "grpc_asyncio", request_type=storage.DeleteObjectRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.DeleteObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_object_async_from_dict(): + await test_delete_object_async(request_type=dict) + + +def test_delete_object_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_object( + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = "bucket_value" + assert arg == mock_val + arg = args[0].object_ + mock_val = "object__value" + assert arg == mock_val + arg = args[0].generation + mock_val = 1068 + assert arg == mock_val + + +def test_delete_object_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_object( + storage.DeleteObjectRequest(), + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + +@pytest.mark.asyncio +async def test_delete_object_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_object( + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = "bucket_value" + assert arg == mock_val + arg = args[0].object_ + mock_val = "object__value" + assert arg == mock_val + arg = args[0].generation + mock_val = 1068 + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_object_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_object( + storage.DeleteObjectRequest(), + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.RestoreObjectRequest, + dict, + ], +) +def test_restore_object(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + response = client.restore_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.RestoreObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Object) + assert response.name == "name_value" + assert response.bucket == "bucket_value" + assert response.etag == "etag_value" + assert response.generation == 1068 + assert response.restore_token == "restore_token_value" + assert response.metageneration == 1491 + assert response.storage_class == "storage_class_value" + assert response.size == 443 + assert response.content_encoding == "content_encoding_value" + assert response.content_disposition == "content_disposition_value" + assert response.cache_control == "cache_control_value" + assert response.content_language == "content_language_value" + assert response.content_type == "content_type_value" + assert response.component_count == 1627 + assert response.kms_key == "kms_key_value" + assert response.temporary_hold is True + assert response.event_based_hold is True + + +def test_restore_object_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.RestoreObjectRequest( + bucket="bucket_value", + object_="object__value", + restore_token="restore_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_object), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.restore_object(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.RestoreObjectRequest( + bucket="bucket_value", + object_="object__value", + restore_token="restore_token_value", + ) + + +def test_restore_object_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.restore_object in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.restore_object] = mock_rpc + request = {} + client.restore_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.restore_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_restore_object_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.restore_object + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.restore_object + ] = mock_rpc + + request = {} + await client.restore_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.restore_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_restore_object_async( + transport: str = "grpc_asyncio", request_type=storage.RestoreObjectRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + response = await client.restore_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.RestoreObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Object) + assert response.name == "name_value" + assert response.bucket == "bucket_value" + assert response.etag == "etag_value" + assert response.generation == 1068 + assert response.restore_token == "restore_token_value" + assert response.metageneration == 1491 + assert response.storage_class == "storage_class_value" + assert response.size == 443 + assert response.content_encoding == "content_encoding_value" + assert response.content_disposition == "content_disposition_value" + assert response.cache_control == "cache_control_value" + assert response.content_language == "content_language_value" + assert response.content_type == "content_type_value" + assert response.component_count == 1627 + assert response.kms_key == "kms_key_value" + assert response.temporary_hold is True + assert response.event_based_hold is True + + +@pytest.mark.asyncio +async def test_restore_object_async_from_dict(): + await test_restore_object_async(request_type=dict) + + +def test_restore_object_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Object() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.restore_object( + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = "bucket_value" + assert arg == mock_val + arg = args[0].object_ + mock_val = "object__value" + assert arg == mock_val + arg = args[0].generation + mock_val = 1068 + assert arg == mock_val + + +def test_restore_object_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.restore_object( + storage.RestoreObjectRequest(), + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + +@pytest.mark.asyncio +async def test_restore_object_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Object() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(storage.Object()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.restore_object( + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = "bucket_value" + assert arg == mock_val + arg = args[0].object_ + mock_val = "object__value" + assert arg == mock_val + arg = args[0].generation + mock_val = 1068 + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_restore_object_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.restore_object( + storage.RestoreObjectRequest(), + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.CancelResumableWriteRequest, + dict, + ], +) +def test_cancel_resumable_write(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_resumable_write), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = storage.CancelResumableWriteResponse() + response = client.cancel_resumable_write(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.CancelResumableWriteRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.CancelResumableWriteResponse) + + +def test_cancel_resumable_write_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.CancelResumableWriteRequest( + upload_id="upload_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_resumable_write), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.cancel_resumable_write(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.CancelResumableWriteRequest( + upload_id="upload_id_value", + ) + + +def test_cancel_resumable_write_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.cancel_resumable_write + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.cancel_resumable_write + ] = mock_rpc + request = {} + client.cancel_resumable_write(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.cancel_resumable_write(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_cancel_resumable_write_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.cancel_resumable_write + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.cancel_resumable_write + ] = mock_rpc + + request = {} + await client.cancel_resumable_write(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.cancel_resumable_write(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_cancel_resumable_write_async( + transport: str = "grpc_asyncio", request_type=storage.CancelResumableWriteRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_resumable_write), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.CancelResumableWriteResponse() + ) + response = await client.cancel_resumable_write(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.CancelResumableWriteRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.CancelResumableWriteResponse) + + +@pytest.mark.asyncio +async def test_cancel_resumable_write_async_from_dict(): + await test_cancel_resumable_write_async(request_type=dict) + + +def test_cancel_resumable_write_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_resumable_write), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = storage.CancelResumableWriteResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_resumable_write( + upload_id="upload_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].upload_id + mock_val = "upload_id_value" + assert arg == mock_val + + +def test_cancel_resumable_write_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_resumable_write( + storage.CancelResumableWriteRequest(), + upload_id="upload_id_value", + ) + + +@pytest.mark.asyncio +async def test_cancel_resumable_write_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_resumable_write), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = storage.CancelResumableWriteResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.CancelResumableWriteResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_resumable_write( + upload_id="upload_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].upload_id + mock_val = "upload_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_cancel_resumable_write_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_resumable_write( + storage.CancelResumableWriteRequest(), + upload_id="upload_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.GetObjectRequest, + dict, + ], +) +def test_get_object(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + response = client.get_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.GetObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Object) + assert response.name == "name_value" + assert response.bucket == "bucket_value" + assert response.etag == "etag_value" + assert response.generation == 1068 + assert response.restore_token == "restore_token_value" + assert response.metageneration == 1491 + assert response.storage_class == "storage_class_value" + assert response.size == 443 + assert response.content_encoding == "content_encoding_value" + assert response.content_disposition == "content_disposition_value" + assert response.cache_control == "cache_control_value" + assert response.content_language == "content_language_value" + assert response.content_type == "content_type_value" + assert response.component_count == 1627 + assert response.kms_key == "kms_key_value" + assert response.temporary_hold is True + assert response.event_based_hold is True + + +def test_get_object_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.GetObjectRequest( + bucket="bucket_value", + object_="object__value", + restore_token="restore_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_object), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_object(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.GetObjectRequest( + bucket="bucket_value", + object_="object__value", + restore_token="restore_token_value", + ) + + +def test_get_object_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_object in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_object] = mock_rpc + request = {} + client.get_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_object_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_object + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_object + ] = mock_rpc + + request = {} + await client.get_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_object_async( + transport: str = "grpc_asyncio", request_type=storage.GetObjectRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + response = await client.get_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.GetObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Object) + assert response.name == "name_value" + assert response.bucket == "bucket_value" + assert response.etag == "etag_value" + assert response.generation == 1068 + assert response.restore_token == "restore_token_value" + assert response.metageneration == 1491 + assert response.storage_class == "storage_class_value" + assert response.size == 443 + assert response.content_encoding == "content_encoding_value" + assert response.content_disposition == "content_disposition_value" + assert response.cache_control == "cache_control_value" + assert response.content_language == "content_language_value" + assert response.content_type == "content_type_value" + assert response.component_count == 1627 + assert response.kms_key == "kms_key_value" + assert response.temporary_hold is True + assert response.event_based_hold is True + + +@pytest.mark.asyncio +async def test_get_object_async_from_dict(): + await test_get_object_async(request_type=dict) + + +def test_get_object_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Object() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_object( + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = "bucket_value" + assert arg == mock_val + arg = args[0].object_ + mock_val = "object__value" + assert arg == mock_val + arg = args[0].generation + mock_val = 1068 + assert arg == mock_val + + +def test_get_object_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_object( + storage.GetObjectRequest(), + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + +@pytest.mark.asyncio +async def test_get_object_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Object() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(storage.Object()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_object( + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = "bucket_value" + assert arg == mock_val + arg = args[0].object_ + mock_val = "object__value" + assert arg == mock_val + arg = args[0].generation + mock_val = 1068 + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_object_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_object( + storage.GetObjectRequest(), + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.ReadObjectRequest, + dict, + ], +) +def test_read_object(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([storage.ReadObjectResponse()]) + response = client.read_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.ReadObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, storage.ReadObjectResponse) + + +def test_read_object_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.ReadObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_object), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.read_object(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.ReadObjectRequest( + bucket="bucket_value", + object_="object__value", + ) + + +def test_read_object_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.read_object in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.read_object] = mock_rpc + request = {} + client.read_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.read_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_read_object_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.read_object + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.read_object + ] = mock_rpc + + request = {} + await client.read_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.read_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_read_object_async( + transport: str = "grpc_asyncio", request_type=storage.ReadObjectRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[storage.ReadObjectResponse()] + ) + response = await client.read_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.ReadObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, storage.ReadObjectResponse) + + +@pytest.mark.asyncio +async def test_read_object_async_from_dict(): + await test_read_object_async(request_type=dict) + + +def test_read_object_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([storage.ReadObjectResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_object( + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = "bucket_value" + assert arg == mock_val + arg = args[0].object_ + mock_val = "object__value" + assert arg == mock_val + arg = args[0].generation + mock_val = 1068 + assert arg == mock_val + + +def test_read_object_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_object( + storage.ReadObjectRequest(), + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + +@pytest.mark.asyncio +async def test_read_object_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([storage.ReadObjectResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_object( + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = "bucket_value" + assert arg == mock_val + arg = args[0].object_ + mock_val = "object__value" + assert arg == mock_val + arg = args[0].generation + mock_val = 1068 + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_read_object_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_object( + storage.ReadObjectRequest(), + bucket="bucket_value", + object_="object__value", + generation=1068, + ) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.BidiReadObjectRequest, + dict, + ], +) +def test_bidi_read_object(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + requests = [request] + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.bidi_read_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([storage.BidiReadObjectResponse()]) + response = client.bidi_read_object(iter(requests)) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert next(args[0]) == request + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, storage.BidiReadObjectResponse) + + +def test_bidi_read_object_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.bidi_read_object in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.bidi_read_object + ] = mock_rpc + request = [{}] + client.bidi_read_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.bidi_read_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_bidi_read_object_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.bidi_read_object + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.bidi_read_object + ] = mock_rpc + + request = [{}] + await client.bidi_read_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.bidi_read_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_bidi_read_object_async( + transport: str = "grpc_asyncio", request_type=storage.BidiReadObjectRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + requests = [request] + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.bidi_read_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.StreamStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[storage.BidiReadObjectResponse()] + ) + response = await client.bidi_read_object(iter(requests)) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert next(args[0]) == request + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, storage.BidiReadObjectResponse) + + +@pytest.mark.asyncio +async def test_bidi_read_object_async_from_dict(): + await test_bidi_read_object_async(request_type=dict) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.UpdateObjectRequest, + dict, + ], +) +def test_update_object(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + response = client.update_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.UpdateObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Object) + assert response.name == "name_value" + assert response.bucket == "bucket_value" + assert response.etag == "etag_value" + assert response.generation == 1068 + assert response.restore_token == "restore_token_value" + assert response.metageneration == 1491 + assert response.storage_class == "storage_class_value" + assert response.size == 443 + assert response.content_encoding == "content_encoding_value" + assert response.content_disposition == "content_disposition_value" + assert response.cache_control == "cache_control_value" + assert response.content_language == "content_language_value" + assert response.content_type == "content_type_value" + assert response.component_count == 1627 + assert response.kms_key == "kms_key_value" + assert response.temporary_hold is True + assert response.event_based_hold is True + + +def test_update_object_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.UpdateObjectRequest( + predefined_acl="predefined_acl_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_object), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_object(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.UpdateObjectRequest( + predefined_acl="predefined_acl_value", + ) + + +def test_update_object_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_object in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_object] = mock_rpc + request = {} + client.update_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_object_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_object + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_object + ] = mock_rpc + + request = {} + await client.update_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.update_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_object_async( + transport: str = "grpc_asyncio", request_type=storage.UpdateObjectRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + response = await client.update_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.UpdateObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Object) + assert response.name == "name_value" + assert response.bucket == "bucket_value" + assert response.etag == "etag_value" + assert response.generation == 1068 + assert response.restore_token == "restore_token_value" + assert response.metageneration == 1491 + assert response.storage_class == "storage_class_value" + assert response.size == 443 + assert response.content_encoding == "content_encoding_value" + assert response.content_disposition == "content_disposition_value" + assert response.cache_control == "cache_control_value" + assert response.content_language == "content_language_value" + assert response.content_type == "content_type_value" + assert response.component_count == 1627 + assert response.kms_key == "kms_key_value" + assert response.temporary_hold is True + assert response.event_based_hold is True + + +@pytest.mark.asyncio +async def test_update_object_async_from_dict(): + await test_update_object_async(request_type=dict) + + +def test_update_object_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Object() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_object( + object_=storage.Object(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].object_ + mock_val = storage.Object(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_object_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_object( + storage.UpdateObjectRequest(), + object_=storage.Object(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_object_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Object() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(storage.Object()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_object( + object_=storage.Object(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].object_ + mock_val = storage.Object(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_object_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_object( + storage.UpdateObjectRequest(), + object_=storage.Object(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.WriteObjectRequest, + dict, + ], +) +def test_write_object(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + requests = [request] + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.write_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.WriteObjectResponse( + persisted_size=1517, + ) + response = client.write_object(iter(requests)) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert next(args[0]) == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.WriteObjectResponse) + + +def test_write_object_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.write_object in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.write_object] = mock_rpc + request = [{}] + client.write_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.write_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_write_object_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.write_object + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.write_object + ] = mock_rpc + + request = [{}] + await client.write_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.write_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_write_object_async( + transport: str = "grpc_asyncio", request_type=storage.WriteObjectRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + requests = [request] + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.write_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeStreamUnaryCall( + storage.WriteObjectResponse() + ) + response = await (await client.write_object(iter(requests))) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert next(args[0]) == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.WriteObjectResponse) + + +@pytest.mark.asyncio +async def test_write_object_async_from_dict(): + await test_write_object_async(request_type=dict) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.BidiWriteObjectRequest, + dict, + ], +) +def test_bidi_write_object(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + requests = [request] + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.bidi_write_object), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([storage.BidiWriteObjectResponse()]) + response = client.bidi_write_object(iter(requests)) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert next(args[0]) == request + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, storage.BidiWriteObjectResponse) + + +def test_bidi_write_object_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.bidi_write_object in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.bidi_write_object + ] = mock_rpc + request = [{}] + client.bidi_write_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.bidi_write_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_bidi_write_object_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.bidi_write_object + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.bidi_write_object + ] = mock_rpc + + request = [{}] + await client.bidi_write_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.bidi_write_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_bidi_write_object_async( + transport: str = "grpc_asyncio", request_type=storage.BidiWriteObjectRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + requests = [request] + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.bidi_write_object), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.StreamStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[storage.BidiWriteObjectResponse()] + ) + response = await client.bidi_write_object(iter(requests)) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert next(args[0]) == request + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, storage.BidiWriteObjectResponse) + + +@pytest.mark.asyncio +async def test_bidi_write_object_async_from_dict(): + await test_bidi_write_object_async(request_type=dict) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.ListObjectsRequest, + dict, + ], +) +def test_list_objects(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_objects), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.ListObjectsResponse( + prefixes=["prefixes_value"], + next_page_token="next_page_token_value", + ) + response = client.list_objects(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.ListObjectsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListObjectsPager) + assert response.prefixes == ["prefixes_value"] + assert response.next_page_token == "next_page_token_value" + + +def test_list_objects_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.ListObjectsRequest( + parent="parent_value", + page_token="page_token_value", + delimiter="delimiter_value", + prefix="prefix_value", + lexicographic_start="lexicographic_start_value", + lexicographic_end="lexicographic_end_value", + match_glob="match_glob_value", + filter="filter_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_objects), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_objects(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.ListObjectsRequest( + parent="parent_value", + page_token="page_token_value", + delimiter="delimiter_value", + prefix="prefix_value", + lexicographic_start="lexicographic_start_value", + lexicographic_end="lexicographic_end_value", + match_glob="match_glob_value", + filter="filter_value", + ) + + +def test_list_objects_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_objects in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_objects] = mock_rpc + request = {} + client.list_objects(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_objects(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_objects_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_objects + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_objects + ] = mock_rpc + + request = {} + await client.list_objects(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_objects(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_objects_async( + transport: str = "grpc_asyncio", request_type=storage.ListObjectsRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_objects), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.ListObjectsResponse( + prefixes=["prefixes_value"], + next_page_token="next_page_token_value", + ) + ) + response = await client.list_objects(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.ListObjectsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListObjectsAsyncPager) + assert response.prefixes == ["prefixes_value"] + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_objects_async_from_dict(): + await test_list_objects_async(request_type=dict) + + +def test_list_objects_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_objects), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.ListObjectsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_objects( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_objects_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_objects( + storage.ListObjectsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_objects_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_objects), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.ListObjectsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.ListObjectsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_objects( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_objects_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_objects( + storage.ListObjectsRequest(), + parent="parent_value", + ) + + +def test_list_objects_pager(transport_name: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_objects), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + storage.ListObjectsResponse( + objects=[ + storage.Object(), + storage.Object(), + storage.Object(), + ], + next_page_token="abc", + ), + storage.ListObjectsResponse( + objects=[], + next_page_token="def", + ), + storage.ListObjectsResponse( + objects=[ + storage.Object(), + ], + next_page_token="ghi", + ), + storage.ListObjectsResponse( + objects=[ + storage.Object(), + storage.Object(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + pager = client.list_objects(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, storage.Object) for i in results) + + +def test_list_objects_pages(transport_name: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_objects), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + storage.ListObjectsResponse( + objects=[ + storage.Object(), + storage.Object(), + storage.Object(), + ], + next_page_token="abc", + ), + storage.ListObjectsResponse( + objects=[], + next_page_token="def", + ), + storage.ListObjectsResponse( + objects=[ + storage.Object(), + ], + next_page_token="ghi", + ), + storage.ListObjectsResponse( + objects=[ + storage.Object(), + storage.Object(), + ], + ), + RuntimeError, + ) + pages = list(client.list_objects(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_objects_async_pager(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_objects), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + storage.ListObjectsResponse( + objects=[ + storage.Object(), + storage.Object(), + storage.Object(), + ], + next_page_token="abc", + ), + storage.ListObjectsResponse( + objects=[], + next_page_token="def", + ), + storage.ListObjectsResponse( + objects=[ + storage.Object(), + ], + next_page_token="ghi", + ), + storage.ListObjectsResponse( + objects=[ + storage.Object(), + storage.Object(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_objects( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, storage.Object) for i in responses) + + +@pytest.mark.asyncio +async def test_list_objects_async_pages(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_objects), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + storage.ListObjectsResponse( + objects=[ + storage.Object(), + storage.Object(), + storage.Object(), + ], + next_page_token="abc", + ), + storage.ListObjectsResponse( + objects=[], + next_page_token="def", + ), + storage.ListObjectsResponse( + objects=[ + storage.Object(), + ], + next_page_token="ghi", + ), + storage.ListObjectsResponse( + objects=[ + storage.Object(), + storage.Object(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_objects(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + storage.RewriteObjectRequest, + dict, + ], +) +def test_rewrite_object(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.rewrite_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.RewriteResponse( + total_bytes_rewritten=2285, + object_size=1169, + done=True, + rewrite_token="rewrite_token_value", + ) + response = client.rewrite_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.RewriteObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.RewriteResponse) + assert response.total_bytes_rewritten == 2285 + assert response.object_size == 1169 + assert response.done is True + assert response.rewrite_token == "rewrite_token_value" + + +def test_rewrite_object_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.RewriteObjectRequest( + destination_name="destination_name_value", + destination_bucket="destination_bucket_value", + destination_kms_key="destination_kms_key_value", + source_bucket="source_bucket_value", + source_object="source_object_value", + rewrite_token="rewrite_token_value", + destination_predefined_acl="destination_predefined_acl_value", + copy_source_encryption_algorithm="copy_source_encryption_algorithm_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.rewrite_object), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.rewrite_object(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.RewriteObjectRequest( + destination_name="destination_name_value", + destination_bucket="destination_bucket_value", + destination_kms_key="destination_kms_key_value", + source_bucket="source_bucket_value", + source_object="source_object_value", + rewrite_token="rewrite_token_value", + destination_predefined_acl="destination_predefined_acl_value", + copy_source_encryption_algorithm="copy_source_encryption_algorithm_value", + ) + + +def test_rewrite_object_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.rewrite_object in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.rewrite_object] = mock_rpc + request = {} + client.rewrite_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.rewrite_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_rewrite_object_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.rewrite_object + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.rewrite_object + ] = mock_rpc + + request = {} + await client.rewrite_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.rewrite_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_rewrite_object_async( + transport: str = "grpc_asyncio", request_type=storage.RewriteObjectRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.rewrite_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.RewriteResponse( + total_bytes_rewritten=2285, + object_size=1169, + done=True, + rewrite_token="rewrite_token_value", + ) + ) + response = await client.rewrite_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.RewriteObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.RewriteResponse) + assert response.total_bytes_rewritten == 2285 + assert response.object_size == 1169 + assert response.done is True + assert response.rewrite_token == "rewrite_token_value" + + +@pytest.mark.asyncio +async def test_rewrite_object_async_from_dict(): + await test_rewrite_object_async(request_type=dict) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.StartResumableWriteRequest, + dict, + ], +) +def test_start_resumable_write(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_resumable_write), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = storage.StartResumableWriteResponse( + upload_id="upload_id_value", + ) + response = client.start_resumable_write(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.StartResumableWriteRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.StartResumableWriteResponse) + assert response.upload_id == "upload_id_value" + + +def test_start_resumable_write_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.StartResumableWriteRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_resumable_write), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.start_resumable_write(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.StartResumableWriteRequest() + + +def test_start_resumable_write_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.start_resumable_write + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.start_resumable_write + ] = mock_rpc + request = {} + client.start_resumable_write(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.start_resumable_write(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_start_resumable_write_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.start_resumable_write + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.start_resumable_write + ] = mock_rpc + + request = {} + await client.start_resumable_write(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.start_resumable_write(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_start_resumable_write_async( + transport: str = "grpc_asyncio", request_type=storage.StartResumableWriteRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_resumable_write), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.StartResumableWriteResponse( + upload_id="upload_id_value", + ) + ) + response = await client.start_resumable_write(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.StartResumableWriteRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.StartResumableWriteResponse) + assert response.upload_id == "upload_id_value" + + +@pytest.mark.asyncio +async def test_start_resumable_write_async_from_dict(): + await test_start_resumable_write_async(request_type=dict) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.QueryWriteStatusRequest, + dict, + ], +) +def test_query_write_status(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_write_status), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = storage.QueryWriteStatusResponse( + persisted_size=1517, + ) + response = client.query_write_status(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.QueryWriteStatusRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.QueryWriteStatusResponse) + + +def test_query_write_status_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.QueryWriteStatusRequest( + upload_id="upload_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_write_status), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.query_write_status(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.QueryWriteStatusRequest( + upload_id="upload_id_value", + ) + + +def test_query_write_status_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.query_write_status in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.query_write_status + ] = mock_rpc + request = {} + client.query_write_status(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.query_write_status(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_query_write_status_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.query_write_status + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.query_write_status + ] = mock_rpc + + request = {} + await client.query_write_status(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.query_write_status(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_query_write_status_async( + transport: str = "grpc_asyncio", request_type=storage.QueryWriteStatusRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_write_status), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.QueryWriteStatusResponse() + ) + response = await client.query_write_status(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.QueryWriteStatusRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.QueryWriteStatusResponse) + + +@pytest.mark.asyncio +async def test_query_write_status_async_from_dict(): + await test_query_write_status_async(request_type=dict) + + +def test_query_write_status_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_write_status), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = storage.QueryWriteStatusResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_write_status( + upload_id="upload_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].upload_id + mock_val = "upload_id_value" + assert arg == mock_val + + +def test_query_write_status_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_write_status( + storage.QueryWriteStatusRequest(), + upload_id="upload_id_value", + ) + + +@pytest.mark.asyncio +async def test_query_write_status_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_write_status), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = storage.QueryWriteStatusResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.QueryWriteStatusResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_write_status( + upload_id="upload_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].upload_id + mock_val = "upload_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_query_write_status_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_write_status( + storage.QueryWriteStatusRequest(), + upload_id="upload_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + storage.MoveObjectRequest, + dict, + ], +) +def test_move_object(request_type, transport: str = "grpc"): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.move_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + response = client.move_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = storage.MoveObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Object) + assert response.name == "name_value" + assert response.bucket == "bucket_value" + assert response.etag == "etag_value" + assert response.generation == 1068 + assert response.restore_token == "restore_token_value" + assert response.metageneration == 1491 + assert response.storage_class == "storage_class_value" + assert response.size == 443 + assert response.content_encoding == "content_encoding_value" + assert response.content_disposition == "content_disposition_value" + assert response.cache_control == "cache_control_value" + assert response.content_language == "content_language_value" + assert response.content_type == "content_type_value" + assert response.component_count == 1627 + assert response.kms_key == "kms_key_value" + assert response.temporary_hold is True + assert response.event_based_hold is True + + +def test_move_object_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = storage.MoveObjectRequest( + bucket="bucket_value", + source_object="source_object_value", + destination_object="destination_object_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.move_object), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.move_object(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == storage.MoveObjectRequest( + bucket="bucket_value", + source_object="source_object_value", + destination_object="destination_object_value", + ) + + +def test_move_object_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.move_object in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.move_object] = mock_rpc + request = {} + client.move_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.move_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_move_object_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.move_object + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.move_object + ] = mock_rpc + + request = {} + await client.move_object(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.move_object(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_move_object_async( + transport: str = "grpc_asyncio", request_type=storage.MoveObjectRequest +): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.move_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + response = await client.move_object(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = storage.MoveObjectRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, storage.Object) + assert response.name == "name_value" + assert response.bucket == "bucket_value" + assert response.etag == "etag_value" + assert response.generation == 1068 + assert response.restore_token == "restore_token_value" + assert response.metageneration == 1491 + assert response.storage_class == "storage_class_value" + assert response.size == 443 + assert response.content_encoding == "content_encoding_value" + assert response.content_disposition == "content_disposition_value" + assert response.cache_control == "cache_control_value" + assert response.content_language == "content_language_value" + assert response.content_type == "content_type_value" + assert response.component_count == 1627 + assert response.kms_key == "kms_key_value" + assert response.temporary_hold is True + assert response.event_based_hold is True + + +@pytest.mark.asyncio +async def test_move_object_async_from_dict(): + await test_move_object_async(request_type=dict) + + +def test_move_object_flattened(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.move_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Object() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.move_object( + bucket="bucket_value", + source_object="source_object_value", + destination_object="destination_object_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = "bucket_value" + assert arg == mock_val + arg = args[0].source_object + mock_val = "source_object_value" + assert arg == mock_val + arg = args[0].destination_object + mock_val = "destination_object_value" + assert arg == mock_val + + +def test_move_object_flattened_error(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.move_object( + storage.MoveObjectRequest(), + bucket="bucket_value", + source_object="source_object_value", + destination_object="destination_object_value", + ) + + +@pytest.mark.asyncio +async def test_move_object_flattened_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.move_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = storage.Object() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(storage.Object()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.move_object( + bucket="bucket_value", + source_object="source_object_value", + destination_object="destination_object_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].bucket + mock_val = "bucket_value" + assert arg == mock_val + arg = args[0].source_object + mock_val = "source_object_value" + assert arg == mock_val + arg = args[0].destination_object + mock_val = "destination_object_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_move_object_flattened_error_async(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.move_object( + storage.MoveObjectRequest(), + bucket="bucket_value", + source_object="source_object_value", + destination_object="destination_object_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.StorageGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.StorageGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = StorageClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.StorageGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = StorageClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = StorageClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.StorageGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = StorageClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.StorageGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = StorageClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.StorageGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.StorageGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.StorageGrpcTransport, + transports.StorageGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_kind_grpc(): + transport = StorageClient.get_transport_class("grpc")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "grpc" + + +def test_initialize_client_w_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_bucket_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_bucket), "__call__") as call: + call.return_value = None + client.delete_bucket(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.DeleteBucketRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_bucket_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_bucket), "__call__") as call: + call.return_value = storage.Bucket() + client.get_bucket(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.GetBucketRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_bucket_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_bucket), "__call__") as call: + call.return_value = storage.Bucket() + client.create_bucket(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.CreateBucketRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_buckets_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + call.return_value = storage.ListBucketsResponse() + client.list_buckets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.ListBucketsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_lock_bucket_retention_policy_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.lock_bucket_retention_policy), "__call__" + ) as call: + call.return_value = storage.Bucket() + client.lock_bucket_retention_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.LockBucketRetentionPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_iam_policy_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_set_iam_policy_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_test_iam_permissions_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_bucket_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_bucket), "__call__") as call: + call.return_value = storage.Bucket() + client.update_bucket(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.UpdateBucketRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_compose_object_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.compose_object), "__call__") as call: + call.return_value = storage.Object() + client.compose_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.ComposeObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_object_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_object), "__call__") as call: + call.return_value = None + client.delete_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.DeleteObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_restore_object_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.restore_object), "__call__") as call: + call.return_value = storage.Object() + client.restore_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.RestoreObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_cancel_resumable_write_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.cancel_resumable_write), "__call__" + ) as call: + call.return_value = storage.CancelResumableWriteResponse() + client.cancel_resumable_write(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.CancelResumableWriteRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_object_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_object), "__call__") as call: + call.return_value = storage.Object() + client.get_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.GetObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_read_object_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_object), "__call__") as call: + call.return_value = iter([storage.ReadObjectResponse()]) + client.read_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.ReadObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_object_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_object), "__call__") as call: + call.return_value = storage.Object() + client.update_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.UpdateObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_objects_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_objects), "__call__") as call: + call.return_value = storage.ListObjectsResponse() + client.list_objects(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.ListObjectsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_rewrite_object_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.rewrite_object), "__call__") as call: + call.return_value = storage.RewriteResponse() + client.rewrite_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.RewriteObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_start_resumable_write_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.start_resumable_write), "__call__" + ) as call: + call.return_value = storage.StartResumableWriteResponse() + client.start_resumable_write(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.StartResumableWriteRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_query_write_status_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.query_write_status), "__call__" + ) as call: + call.return_value = storage.QueryWriteStatusResponse() + client.query_write_status(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.QueryWriteStatusRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_move_object_empty_call_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.move_object), "__call__") as call: + call.return_value = storage.Object() + client.move_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.MoveObjectRequest() + + assert args[0] == request_msg + + +def test_delete_bucket_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_bucket), "__call__") as call: + call.return_value = None + client.delete_bucket(request={"name": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.DeleteBucketRequest(**{"name": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_get_bucket_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_bucket), "__call__") as call: + call.return_value = storage.Bucket() + client.get_bucket(request={"name": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.GetBucketRequest(**{"name": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_create_bucket_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_bucket), "__call__") as call: + call.return_value = storage.Bucket() + client.create_bucket(request={"parent": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.CreateBucketRequest(**{"parent": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"project": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_create_bucket_routing_parameters_request_2_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_bucket), "__call__") as call: + call.return_value = storage.Bucket() + client.create_bucket(request={"bucket": {"project": "sample1"}}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.CreateBucketRequest(**{"bucket": {"project": "sample1"}}) + + assert args[0] == request_msg + + expected_headers = {"project": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_list_buckets_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + call.return_value = storage.ListBucketsResponse() + client.list_buckets(request={"parent": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.ListBucketsRequest(**{"parent": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"project": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_lock_bucket_retention_policy_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.lock_bucket_retention_policy), "__call__" + ) as call: + call.return_value = storage.Bucket() + client.lock_bucket_retention_policy(request={"bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.LockBucketRetentionPolicyRequest(**{"bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_get_iam_policy_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.get_iam_policy(request={"resource": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest(**{"resource": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_get_iam_policy_routing_parameters_request_2_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.get_iam_policy( + request={"resource": "projects/sample1/buckets/sample2/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest( + **{"resource": "projects/sample1/buckets/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "projects/sample1/buckets/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_set_iam_policy_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.set_iam_policy(request={"resource": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest(**{"resource": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_set_iam_policy_routing_parameters_request_2_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.set_iam_policy( + request={"resource": "projects/sample1/buckets/sample2/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest( + **{"resource": "projects/sample1/buckets/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "projects/sample1/buckets/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_test_iam_permissions_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions(request={"resource": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest( + **{"resource": "sample1"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_test_iam_permissions_routing_parameters_request_2_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions( + request={"resource": "projects/sample1/buckets/sample2/objects/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest( + **{"resource": "projects/sample1/buckets/sample2/objects/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "projects/sample1/buckets/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_test_iam_permissions_routing_parameters_request_3_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions( + request={ + "resource": "projects/sample1/buckets/sample2/managedFolders/sample3" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest( + **{"resource": "projects/sample1/buckets/sample2/managedFolders/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "projects/sample1/buckets/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_update_bucket_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_bucket), "__call__") as call: + call.return_value = storage.Bucket() + client.update_bucket(request={"bucket": {"name": "sample1"}}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.UpdateBucketRequest(**{"bucket": {"name": "sample1"}}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_compose_object_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.compose_object), "__call__") as call: + call.return_value = storage.Object() + client.compose_object(request={"destination": {"bucket": "sample1"}}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.ComposeObjectRequest( + **{"destination": {"bucket": "sample1"}} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_delete_object_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_object), "__call__") as call: + call.return_value = None + client.delete_object(request={"bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.DeleteObjectRequest(**{"bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_restore_object_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.restore_object), "__call__") as call: + call.return_value = storage.Object() + client.restore_object(request={"bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.RestoreObjectRequest(**{"bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_cancel_resumable_write_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.cancel_resumable_write), "__call__" + ) as call: + call.return_value = storage.CancelResumableWriteResponse() + client.cancel_resumable_write( + request={"upload_id": "projects/sample1/buckets/sample2/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.CancelResumableWriteRequest( + **{"upload_id": "projects/sample1/buckets/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "projects/sample1/buckets/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_get_object_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_object), "__call__") as call: + call.return_value = storage.Object() + client.get_object(request={"bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.GetObjectRequest(**{"bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_read_object_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_object), "__call__") as call: + call.return_value = iter([storage.ReadObjectResponse()]) + client.read_object(request={"bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.ReadObjectRequest(**{"bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_update_object_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_object), "__call__") as call: + call.return_value = storage.Object() + client.update_object(request={"object": {"bucket": "sample1"}}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.UpdateObjectRequest(**{"object": {"bucket": "sample1"}}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_list_objects_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_objects), "__call__") as call: + call.return_value = storage.ListObjectsResponse() + client.list_objects(request={"parent": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.ListObjectsRequest(**{"parent": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_rewrite_object_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.rewrite_object), "__call__") as call: + call.return_value = storage.RewriteResponse() + client.rewrite_object(request={"source_bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.RewriteObjectRequest(**{"source_bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"source_bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_rewrite_object_routing_parameters_request_2_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.rewrite_object), "__call__") as call: + call.return_value = storage.RewriteResponse() + client.rewrite_object(request={"destination_bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.RewriteObjectRequest(**{"destination_bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_start_resumable_write_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.start_resumable_write), "__call__" + ) as call: + call.return_value = storage.StartResumableWriteResponse() + client.start_resumable_write( + request={"write_object_spec": {"resource": {"bucket": "sample1"}}} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.StartResumableWriteRequest( + **{"write_object_spec": {"resource": {"bucket": "sample1"}}} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_query_write_status_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.query_write_status), "__call__" + ) as call: + call.return_value = storage.QueryWriteStatusResponse() + client.query_write_status( + request={"upload_id": "projects/sample1/buckets/sample2/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.QueryWriteStatusRequest( + **{"upload_id": "projects/sample1/buckets/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "projects/sample1/buckets/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_move_object_routing_parameters_request_1_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.move_object), "__call__") as call: + call.return_value = storage.Object() + client.move_object(request={"bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.MoveObjectRequest(**{"bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_transport_kind_grpc_asyncio(): + transport = StorageAsyncClient.get_transport_class("grpc_asyncio")( + credentials=async_anonymous_credentials() + ) + assert transport.kind == "grpc_asyncio" + + +def test_initialize_client_w_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_bucket_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_bucket(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.DeleteBucketRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_bucket_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + ) + await client.get_bucket(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.GetBucketRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_bucket_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + ) + await client.create_bucket(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.CreateBucketRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_buckets_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.ListBucketsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_buckets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.ListBucketsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_lock_bucket_retention_policy_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.lock_bucket_retention_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + ) + await client.lock_bucket_retention_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.LockBucketRetentionPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_iam_policy_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_set_iam_policy_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_test_iam_permissions_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + await client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_bucket_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + ) + await client.update_bucket(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.UpdateBucketRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_compose_object_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.compose_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + await client.compose_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.ComposeObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_object_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.DeleteObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_restore_object_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.restore_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + await client.restore_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.RestoreObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_cancel_resumable_write_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.cancel_resumable_write), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.CancelResumableWriteResponse() + ) + await client.cancel_resumable_write(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.CancelResumableWriteRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_object_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + await client.get_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.GetObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_read_object_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[storage.ReadObjectResponse()] + ) + await client.read_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.ReadObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_object_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + await client.update_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.UpdateObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_objects_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_objects), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.ListObjectsResponse( + prefixes=["prefixes_value"], + next_page_token="next_page_token_value", + ) + ) + await client.list_objects(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.ListObjectsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_rewrite_object_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.rewrite_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.RewriteResponse( + total_bytes_rewritten=2285, + object_size=1169, + done=True, + rewrite_token="rewrite_token_value", + ) + ) + await client.rewrite_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.RewriteObjectRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_start_resumable_write_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.start_resumable_write), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.StartResumableWriteResponse( + upload_id="upload_id_value", + ) + ) + await client.start_resumable_write(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.StartResumableWriteRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_query_write_status_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.query_write_status), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.QueryWriteStatusResponse() + ) + await client.query_write_status(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.QueryWriteStatusRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_move_object_empty_call_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.move_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + await client.move_object(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = storage.MoveObjectRequest() + + assert args[0] == request_msg + + +@pytest.mark.asyncio +async def test_delete_bucket_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_bucket(request={"name": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.DeleteBucketRequest(**{"name": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_get_bucket_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + ) + await client.get_bucket(request={"name": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.GetBucketRequest(**{"name": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_create_bucket_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + ) + await client.create_bucket(request={"parent": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.CreateBucketRequest(**{"parent": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"project": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_create_bucket_routing_parameters_request_2_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + ) + await client.create_bucket(request={"bucket": {"project": "sample1"}}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.CreateBucketRequest(**{"bucket": {"project": "sample1"}}) + + assert args[0] == request_msg + + expected_headers = {"project": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_list_buckets_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.ListBucketsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_buckets(request={"parent": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.ListBucketsRequest(**{"parent": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"project": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_lock_bucket_retention_policy_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.lock_bucket_retention_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + ) + await client.lock_bucket_retention_policy(request={"bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.LockBucketRetentionPolicyRequest(**{"bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_get_iam_policy_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.get_iam_policy(request={"resource": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest(**{"resource": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_get_iam_policy_routing_parameters_request_2_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.get_iam_policy( + request={"resource": "projects/sample1/buckets/sample2/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest( + **{"resource": "projects/sample1/buckets/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "projects/sample1/buckets/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_set_iam_policy_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.set_iam_policy(request={"resource": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest(**{"resource": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_set_iam_policy_routing_parameters_request_2_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.set_iam_policy( + request={"resource": "projects/sample1/buckets/sample2/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest( + **{"resource": "projects/sample1/buckets/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "projects/sample1/buckets/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + await client.test_iam_permissions(request={"resource": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest( + **{"resource": "sample1"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_routing_parameters_request_2_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + await client.test_iam_permissions( + request={"resource": "projects/sample1/buckets/sample2/objects/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest( + **{"resource": "projects/sample1/buckets/sample2/objects/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "projects/sample1/buckets/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_routing_parameters_request_3_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + await client.test_iam_permissions( + request={ + "resource": "projects/sample1/buckets/sample2/managedFolders/sample3" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest( + **{"resource": "projects/sample1/buckets/sample2/managedFolders/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "projects/sample1/buckets/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_update_bucket_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Bucket( + name="name_value", + bucket_id="bucket_id_value", + etag="etag_value", + project="project_value", + metageneration=1491, + location="location_value", + location_type="location_type_value", + storage_class="storage_class_value", + rpo="rpo_value", + default_event_based_hold=True, + satisfies_pzs=True, + ) + ) + await client.update_bucket(request={"bucket": {"name": "sample1"}}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.UpdateBucketRequest(**{"bucket": {"name": "sample1"}}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_compose_object_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.compose_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + await client.compose_object(request={"destination": {"bucket": "sample1"}}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.ComposeObjectRequest( + **{"destination": {"bucket": "sample1"}} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_delete_object_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_object(request={"bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.DeleteObjectRequest(**{"bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_restore_object_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.restore_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + await client.restore_object(request={"bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.RestoreObjectRequest(**{"bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_cancel_resumable_write_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.cancel_resumable_write), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.CancelResumableWriteResponse() + ) + await client.cancel_resumable_write( + request={"upload_id": "projects/sample1/buckets/sample2/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.CancelResumableWriteRequest( + **{"upload_id": "projects/sample1/buckets/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "projects/sample1/buckets/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_get_object_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + await client.get_object(request={"bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.GetObjectRequest(**{"bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_read_object_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[storage.ReadObjectResponse()] + ) + await client.read_object(request={"bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.ReadObjectRequest(**{"bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_update_object_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + await client.update_object(request={"object": {"bucket": "sample1"}}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.UpdateObjectRequest(**{"object": {"bucket": "sample1"}}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_list_objects_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_objects), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.ListObjectsResponse( + prefixes=["prefixes_value"], + next_page_token="next_page_token_value", + ) + ) + await client.list_objects(request={"parent": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.ListObjectsRequest(**{"parent": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_rewrite_object_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.rewrite_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.RewriteResponse( + total_bytes_rewritten=2285, + object_size=1169, + done=True, + rewrite_token="rewrite_token_value", + ) + ) + await client.rewrite_object(request={"source_bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.RewriteObjectRequest(**{"source_bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"source_bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_rewrite_object_routing_parameters_request_2_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.rewrite_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.RewriteResponse( + total_bytes_rewritten=2285, + object_size=1169, + done=True, + rewrite_token="rewrite_token_value", + ) + ) + await client.rewrite_object(request={"destination_bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.RewriteObjectRequest(**{"destination_bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_start_resumable_write_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.start_resumable_write), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.StartResumableWriteResponse( + upload_id="upload_id_value", + ) + ) + await client.start_resumable_write( + request={"write_object_spec": {"resource": {"bucket": "sample1"}}} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.StartResumableWriteRequest( + **{"write_object_spec": {"resource": {"bucket": "sample1"}}} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_query_write_status_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.query_write_status), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.QueryWriteStatusResponse() + ) + await client.query_write_status( + request={"upload_id": "projects/sample1/buckets/sample2/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.QueryWriteStatusRequest( + **{"upload_id": "projects/sample1/buckets/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = {"bucket": "projects/sample1/buckets/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_move_object_routing_parameters_request_1_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.move_object), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + storage.Object( + name="name_value", + bucket="bucket_value", + etag="etag_value", + generation=1068, + restore_token="restore_token_value", + metageneration=1491, + storage_class="storage_class_value", + size=443, + content_encoding="content_encoding_value", + content_disposition="content_disposition_value", + cache_control="cache_control_value", + content_language="content_language_value", + content_type="content_type_value", + component_count=1627, + kms_key="kms_key_value", + temporary_hold=True, + event_based_hold=True, + ) + ) + await client.move_object(request={"bucket": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = storage.MoveObjectRequest(**{"bucket": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"bucket": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.StorageGrpcTransport, + ) + + +def test_storage_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.StorageTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_storage_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.storage_v2.services.storage.transports.StorageTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.StorageTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "delete_bucket", + "get_bucket", + "create_bucket", + "list_buckets", + "lock_bucket_retention_policy", + "get_iam_policy", + "set_iam_policy", + "test_iam_permissions", + "update_bucket", + "compose_object", + "delete_object", + "restore_object", + "cancel_resumable_write", + "get_object", + "read_object", + "bidi_read_object", + "update_object", + "write_object", + "bidi_write_object", + "list_objects", + "rewrite_object", + "start_resumable_write", + "query_write_status", + "move_object", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_storage_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.storage_v2.services.storage.transports.StorageTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.StorageTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + ), + quota_project_id="octopus", + ) + + +def test_storage_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.storage_v2.services.storage.transports.StorageTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.StorageTransport() + adc.assert_called_once() + + +def test_storage_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + StorageClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + ), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.StorageGrpcTransport, + transports.StorageGrpcAsyncIOTransport, + ], +) +def test_storage_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + ), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.StorageGrpcTransport, + transports.StorageGrpcAsyncIOTransport, + ], +) +def test_storage_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.StorageGrpcTransport, grpc_helpers), + (transports.StorageGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_storage_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "storage.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + ), + scopes=["1", "2"], + default_host="storage.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [transports.StorageGrpcTransport, transports.StorageGrpcAsyncIOTransport], +) +def test_storage_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_storage_host_no_port(transport_name): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="storage.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ("storage.googleapis.com:443") + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_storage_host_with_port(transport_name): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="storage.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ("storage.googleapis.com:8000") + + +def test_storage_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.StorageGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_storage_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.StorageGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.StorageGrpcTransport, transports.StorageGrpcAsyncIOTransport], +) +def test_storage_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.StorageGrpcTransport, transports.StorageGrpcAsyncIOTransport], +) +def test_storage_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_bucket_path(): + project = "squid" + bucket = "clam" + expected = "projects/{project}/buckets/{bucket}".format( + project=project, + bucket=bucket, + ) + actual = StorageClient.bucket_path(project, bucket) + assert expected == actual + + +def test_parse_bucket_path(): + expected = { + "project": "whelk", + "bucket": "octopus", + } + path = StorageClient.bucket_path(**expected) + + # Check that the path construction is reversible. + actual = StorageClient.parse_bucket_path(path) + assert expected == actual + + +def test_crypto_key_path(): + project = "oyster" + location = "nudibranch" + key_ring = "cuttlefish" + crypto_key = "mussel" + expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format( + project=project, + location=location, + key_ring=key_ring, + crypto_key=crypto_key, + ) + actual = StorageClient.crypto_key_path(project, location, key_ring, crypto_key) + assert expected == actual + + +def test_parse_crypto_key_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "key_ring": "scallop", + "crypto_key": "abalone", + } + path = StorageClient.crypto_key_path(**expected) + + # Check that the path construction is reversible. + actual = StorageClient.parse_crypto_key_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = StorageClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = StorageClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = StorageClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = StorageClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = StorageClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = StorageClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = StorageClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = StorageClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = StorageClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format( + project=project, + ) + actual = StorageClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = StorageClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = StorageClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = StorageClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = StorageClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = StorageClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.StorageTransport, "_prep_wrapped_messages" + ) as prep: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.StorageTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = StorageClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close_grpc(): + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + with mock.patch.object( + type(getattr(client.transport, "_grpc_channel")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +@pytest.mark.asyncio +async def test_transport_close_grpc_asyncio(): + client = StorageAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" + ) + with mock.patch.object( + type(getattr(client.transport, "_grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = StorageClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (StorageClient, transports.StorageGrpcTransport), + (StorageAsyncClient, transports.StorageGrpcAsyncIOTransport), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) From 5964009c0efae9c84a9433715d74befca7ddf98d Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Fri, 18 Jul 2025 07:48:53 -0400 Subject: [PATCH 07/12] chore: exclude autogenerated .coveragerc (#1510) --- owlbot.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/owlbot.py b/owlbot.py index 2cc8e4259..e04f47062 100644 --- a/owlbot.py +++ b/owlbot.py @@ -37,6 +37,7 @@ s.move( [library], excludes=[ + ".coveragerc", "docs/**/*", "scripts/fixup*.py", "setup.py", @@ -108,12 +109,6 @@ """) -s.replace( - ".coveragerc", - "omit =", - """omit = - .nox/*""") - python.py_samples(skip_readmes=True) # Use a python runtime which is available in the owlbot post processor here From 61bd57509ab3966f4382656200f2600ce41cce24 Mon Sep 17 00:00:00 2001 From: Chandra Shekhar Sirimala Date: Fri, 18 Jul 2025 18:34:52 +0530 Subject: [PATCH 08/12] chore: remove duplicates in coveragerc (#1511) * chore: remove duplicates in coveragerc * chore: fix formatting --- .coveragerc | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.coveragerc b/.coveragerc index 89c9bc1b9..b20914c45 100644 --- a/.coveragerc +++ b/.coveragerc @@ -4,11 +4,7 @@ branch = True [report] show_missing = True omit = - .nox/* - .nox/* - .nox/* - .nox/* - .nox/* + .nox/* google/cloud/storage/__init__.py google/cloud/storage/gapic_version.py exclude_lines = From 083a505e1f43c20657570c74ec707485f4b3989d Mon Sep 17 00:00:00 2001 From: Chandra Shekhar Sirimala Date: Tue, 29 Jul 2025 11:15:34 +0530 Subject: [PATCH 09/12] chore: improve docs for list_files_with_prefix (#1517) If a user wants to just list prefixes without listing the blob names they can do so by following the updated documentation. --- samples/snippets/storage_list_files_with_prefix.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/samples/snippets/storage_list_files_with_prefix.py b/samples/snippets/storage_list_files_with_prefix.py index be7468cba..7f877d1d6 100644 --- a/samples/snippets/storage_list_files_with_prefix.py +++ b/samples/snippets/storage_list_files_with_prefix.py @@ -46,12 +46,23 @@ def list_blobs_with_prefix(bucket_name, prefix, delimiter=None): that lists the "subfolders" under `a/`: a/b/ + + + Note: If you only want to list prefixes a/b/ and don't want to iterate over + blobs, you can do + + ``` + for page in blobs.pages: + print(page.prefixes) + ``` """ storage_client = storage.Client() # Note: Client.list_blobs requires at least package version 1.17.0. - blobs = storage_client.list_blobs(bucket_name, prefix=prefix, delimiter=delimiter) + blobs = storage_client.list_blobs( + bucket_name, prefix=prefix, delimiter=delimiter + ) # Note: The call returns a response only when the iterator is consumed. print("Blobs:") From 6a9923e4fc944f7a7c3906eb7800d23677bd2481 Mon Sep 17 00:00:00 2001 From: Chandra Shekhar Sirimala Date: Wed, 30 Jul 2025 10:51:23 +0530 Subject: [PATCH 10/12] Fix: Add logs on AssertionError for issue #1512 (#1518) Adding Response.content and headers will help debug this flaky issue which throws 403, having more verbose information would be helpful. Closes #1512 --- tests/system/test__signing.py | 122 +++++++++++++++++++++++++++------- 1 file changed, 99 insertions(+), 23 deletions(-) diff --git a/tests/system/test__signing.py b/tests/system/test__signing.py index cdf718d90..ed5a0c09a 100644 --- a/tests/system/test__signing.py +++ b/tests/system/test__signing.py @@ -51,10 +51,17 @@ def _create_signed_list_blobs_url_helper( ) response = requests.get(signed_url) - assert response.status_code == 200 + assert ( + response.status_code == 200 + ), f"Response content start: {response.content} \ + :Response content end. \ + Response headers start: {response.headers} \ + :Response headers end." -def test_create_signed_list_blobs_url_v2(storage_client, signing_bucket, no_mtls): +def test_create_signed_list_blobs_url_v2( + storage_client, signing_bucket, no_mtls +): _create_signed_list_blobs_url_helper( storage_client, signing_bucket, @@ -76,7 +83,9 @@ def test_create_signed_list_blobs_url_v2_w_expiration( ) -def test_create_signed_list_blobs_url_v4(storage_client, signing_bucket, no_mtls): +def test_create_signed_list_blobs_url_v4( + storage_client, signing_bucket, no_mtls +): _create_signed_list_blobs_url_helper( storage_client, signing_bucket, @@ -137,7 +146,12 @@ def _create_signed_read_url_helper( headers["x-goog-encryption-key-sha256"] = key_hash response = requests.get(signed_url, headers=headers) - assert response.status_code == 200 + assert ( + response.status_code == 200 + ), f"Response content start: {response.content} \ + :Response content end. \ + Response headers start: {response.headers} \ + :Response headers end." if payload is not None: assert response.content == payload @@ -215,7 +229,9 @@ def test_create_signed_read_url_v4_w_non_ascii_name( ) -def test_create_signed_read_url_v2_w_csek(storage_client, signing_bucket, no_mtls): +def test_create_signed_read_url_v2_w_csek( + storage_client, signing_bucket, no_mtls +): encryption_key = os.urandom(32) _create_signed_read_url_helper( storage_client, @@ -226,7 +242,9 @@ def test_create_signed_read_url_v2_w_csek(storage_client, signing_bucket, no_mtl ) -def test_create_signed_read_url_v4_w_csek(storage_client, signing_bucket, no_mtls): +def test_create_signed_read_url_v4_w_csek( + storage_client, signing_bucket, no_mtls +): encryption_key = os.urandom(32) _create_signed_read_url_helper( storage_client, @@ -309,7 +327,9 @@ def test_create_signed_read_url_v4_w_access_token_universe_domain( "https://www.googleapis.com/auth/devstorage.read_write", "https://www.googleapis.com/auth/iam", ] - response = universe_domain_iam_client.generate_access_token(name=name, scope=scope) + response = universe_domain_iam_client.generate_access_token( + name=name, scope=scope + ) _create_signed_read_url_helper( universe_domain_client, @@ -320,7 +340,9 @@ def test_create_signed_read_url_v4_w_access_token_universe_domain( ) -def _create_signed_delete_url_helper(client, bucket, version="v2", expiration=None): +def _create_signed_delete_url_helper( + client, bucket, version="v2", expiration=None +): expiration = _morph_expiration(version, expiration) blob = bucket.blob("DELETE_ME.txt") @@ -335,7 +357,12 @@ def _create_signed_delete_url_helper(client, bucket, version="v2", expiration=No response = requests.request("DELETE", signed_delete_url) - assert response.status_code == 204 + assert ( + response.status_code == 204 + ), f"Response content start: {response.content} \ + :Response content end. \ + Response headers start: {response.headers} \ + :Response headers end." assert response.content == b"" assert not blob.exists() @@ -345,7 +372,9 @@ def test_create_signed_delete_url_v2(storage_client, signing_bucket, no_mtls): def test_create_signed_delete_url_v4(storage_client, signing_bucket, no_mtls): - _create_signed_delete_url_helper(storage_client, signing_bucket, version="v4") + _create_signed_delete_url_helper( + storage_client, signing_bucket, version="v4" + ) def _create_signed_resumable_upload_url_helper( @@ -364,14 +393,26 @@ def _create_signed_resumable_upload_url_helper( ) post_headers = {"x-goog-resumable": "start"} - post_response = requests.post(signed_resumable_upload_url, headers=post_headers) - assert post_response.status_code == 201 + post_response = requests.post( + signed_resumable_upload_url, headers=post_headers + ) + assert ( + post_response.status_code == 201 + ), f"Response content start: {post_response.content} \ + :Response content end. \ + Response headers start: {post_response.headers} \ + :Response headers end." # Finish uploading the body. location = post_response.headers["Location"] put_headers = {"content-length": str(len(payload))} put_response = requests.put(location, headers=put_headers, data=payload) - assert put_response.status_code == 200 + assert ( + put_response.status_code == 200 + ), f"Response content start: {put_response.content} \ + :Response content end. \ + Response headers start: {put_response.headers} \ + :Response headers end." # Download using a signed URL and verify. signed_download_url = blob.generate_signed_url( @@ -379,7 +420,12 @@ def _create_signed_resumable_upload_url_helper( ) get_response = requests.get(signed_download_url) - assert get_response.status_code == 200 + assert ( + get_response.status_code == 200 + ), f"Response content start: {get_response.content} \ + :Response content end. \ + Response headers start: {get_response.headers} \ + :Response headers end." assert get_response.content == payload # Finally, delete the blob using a signed URL. @@ -391,10 +437,17 @@ def _create_signed_resumable_upload_url_helper( ) delete_response = requests.delete(signed_delete_url) - assert delete_response.status_code == 204 + assert ( + delete_response.status_code == 204 + ), f"Response content start: {delete_response.content} \ + :Response content end. \ + Response headers start: {delete_response.headers} \ + :Response headers end." -def test_create_signed_resumable_upload_url_v2(storage_client, signing_bucket, no_mtls): +def test_create_signed_resumable_upload_url_v2( + storage_client, signing_bucket, no_mtls +): _create_signed_resumable_upload_url_helper( storage_client, signing_bucket, @@ -402,7 +455,9 @@ def test_create_signed_resumable_upload_url_v2(storage_client, signing_bucket, n ) -def test_create_signed_resumable_upload_url_v4(storage_client, signing_bucket, no_mtls): +def test_create_signed_resumable_upload_url_v4( + storage_client, signing_bucket, no_mtls +): _create_signed_resumable_upload_url_helper( storage_client, signing_bucket, @@ -439,10 +494,17 @@ def test_generate_signed_post_policy_v4( ) with open(blob_name, "r") as f: files = {"file": (blob_name, f)} - response = requests.post(policy["url"], data=policy["fields"], files=files) + response = requests.post( + policy["url"], data=policy["fields"], files=files + ) os.remove(blob_name) - assert response.status_code == 204 + assert ( + response.status_code == 204 + ), f"Response content start: {response.content} \ + :Response content end. \ + Response headers start: {response.headers} \ + :Response headers end." blob = bucket.get_blob(blob_name) assert blob.download_as_bytes() == payload @@ -487,10 +549,17 @@ def test_generate_signed_post_policy_v4_access_token_sa_email( ) with open(blob_name, "r") as f: files = {"file": (blob_name, f)} - response = requests.post(policy["url"], data=policy["fields"], files=files) + response = requests.post( + policy["url"], data=policy["fields"], files=files + ) os.remove(blob_name) - assert response.status_code == 204 + assert ( + response.status_code == 204 + ), f"Response content start: {response.content} \ + :Response content end. \ + Response headers start: {response.headers} \ + :Response headers end." blob = signing_bucket.get_blob(blob_name) blobs_to_delete.append(blob) @@ -522,9 +591,16 @@ def test_generate_signed_post_policy_v4_invalid_field( ) with open(blob_name, "r") as f: files = {"file": (blob_name, f)} - response = requests.post(policy["url"], data=policy["fields"], files=files) + response = requests.post( + policy["url"], data=policy["fields"], files=files + ) os.remove(blob_name) - assert response.status_code == 400 + assert ( + response.status_code == 400 + ), f"Response content start: {response.content} \ + :Response content end. \ + Response headers start: {response.headers} \ + :Response headers end." assert list(bucket.list_blobs()) == [] From a29073cf58df9c5667305e05c6378284057cda23 Mon Sep 17 00:00:00 2001 From: Pulkit Aggarwal <54775856+Pulkit0110@users.noreply.github.com> Date: Tue, 5 Aug 2025 07:06:06 +0530 Subject: [PATCH 11/12] feat: add support for bucket IP filter (#1516) * feat: add support for bucket IP filter * minor fix * fix unit tests * change create bucket with filter system test * add more system tests * update system tests * resolving comments --- google/cloud/storage/bucket.py | 55 ++++++++++++ google/cloud/storage/ip_filter.py | 143 ++++++++++++++++++++++++++++++ tests/system/test_bucket.py | 68 ++++++++++++++ tests/unit/test_bucket.py | 48 ++++++++++ tests/unit/test_ip_filter.py | 106 ++++++++++++++++++++++ 5 files changed, 420 insertions(+) create mode 100644 google/cloud/storage/ip_filter.py create mode 100644 tests/unit/test_ip_filter.py diff --git a/google/cloud/storage/bucket.py b/google/cloud/storage/bucket.py index c8df6c600..9d8c5e431 100644 --- a/google/cloud/storage/bucket.py +++ b/google/cloud/storage/bucket.py @@ -56,6 +56,7 @@ from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS from google.cloud.storage.constants import REGION_LOCATION_TYPE from google.cloud.storage.constants import STANDARD_STORAGE_CLASS +from google.cloud.storage.ip_filter import IPFilter from google.cloud.storage.notification import BucketNotification from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT from google.cloud.storage.retry import DEFAULT_RETRY @@ -88,6 +89,7 @@ _FROM_STRING_MESSAGE = ( "Bucket.from_string() is deprecated. " "Use Bucket.from_uri() instead." ) +_IP_FILTER_PROPERTY = "ipFilter" def _blobs_page_start(iterator, page, response): @@ -3887,6 +3889,59 @@ def generate_signed_url( query_parameters=query_parameters, ) + @property + def ip_filter(self): + """Retrieve or set the IP Filter configuration for this bucket. + + See https://cloud.google.com/storage/docs/ip-filtering-overview and + https://cloud.google.com/storage/docs/json_api/v1/buckets#ipFilter + + .. note:: + The getter for this property returns an + :class:`~google.cloud.storage.ip_filter.IPFilter` object, which is a + structured representation of the bucket's IP filter configuration. + Modifying the returned object has no effect. To update the bucket's + IP filter, create and assign a new ``IPFilter`` object to this + property and then call + :meth:`~google.cloud.storage.bucket.Bucket.patch`. + + .. code-block:: python + + from google.cloud.storage.ip_filter import ( + IPFilter, + PublicNetworkSource, + ) + + ip_filter = IPFilter() + ip_filter.mode = "Enabled" + ip_filter.public_network_source = PublicNetworkSource( + allowed_ip_cidr_ranges=["203.0.113.5/32"] + ) + bucket.ip_filter = ip_filter + bucket.patch() + + :setter: Set the IP Filter configuration for this bucket. + :getter: Gets the IP Filter configuration for this bucket. + + :rtype: :class:`~google.cloud.storage.ip_filter.IPFilter` or ``NoneType`` + :returns: + An ``IPFilter`` object representing the configuration, or ``None`` + if no filter is configured. + """ + resource = self._properties.get(_IP_FILTER_PROPERTY) + if resource: + return IPFilter._from_api_resource(resource) + return None + + @ip_filter.setter + def ip_filter(self, value): + if value is None: + self._patch_property(_IP_FILTER_PROPERTY, None) + elif isinstance(value, IPFilter): + self._patch_property(_IP_FILTER_PROPERTY, value._to_api_resource()) + else: + self._patch_property(_IP_FILTER_PROPERTY, value) + class SoftDeletePolicy(dict): """Map a bucket's soft delete policy. diff --git a/google/cloud/storage/ip_filter.py b/google/cloud/storage/ip_filter.py new file mode 100644 index 000000000..e5b2318bf --- /dev/null +++ b/google/cloud/storage/ip_filter.py @@ -0,0 +1,143 @@ +# Copyright 2014 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""IP Filter configuration for Google Cloud Storage Buckets.""" + +from typing import Dict, Any, Optional, List + +_MODE = "mode" +_PUBLIC_NETWORK_SOURCE = "publicNetworkSource" +_VPC_NETWORK_SOURCES = "vpcNetworkSources" +_ALLOWED_IP_CIDR_RANGES = "allowedIpCidrRanges" +_NETWORK = "network" +_ALLOW_ALL_SERVICE_AGENT_ACCESS = "allowAllServiceAgentAccess" +_ALLOW_CROSS_ORG_VPCS = "allowCrossOrgVpcs" + + +class PublicNetworkSource: + """Represents a public network source for a GCS Bucket IP Filter. + + :type allowed_ip_cidr_ranges: list(str) or None + :param allowed_ip_cidr_ranges: A list of public IPv4 or IPv6 ranges in + CIDR notation that are allowed to access + the bucket. + """ + + def __init__(self, allowed_ip_cidr_ranges: Optional[List[str]] = None): + self.allowed_ip_cidr_ranges = allowed_ip_cidr_ranges or [] + + def _to_api_resource(self) -> Dict[str, Any]: + """Serializes this object to a dictionary for API requests.""" + return {_ALLOWED_IP_CIDR_RANGES: self.allowed_ip_cidr_ranges} + + +class VpcNetworkSource: + """Represents a VPC network source for a GCS Bucket IP Filter. + + :type network: str + :param network: The resource name of the VPC network. + + :type allowed_ip_cidr_ranges: list(str) or None + :param allowed_ip_cidr_ranges: A list of IPv4 or IPv6 ranges in CIDR + notation allowed to access the bucket + from this VPC. + """ + + def __init__( + self, network: str, allowed_ip_cidr_ranges: Optional[List[str]] = None + ): + self.network = network + self.allowed_ip_cidr_ranges = allowed_ip_cidr_ranges or [] + + def _to_api_resource(self) -> Dict[str, Any]: + """Serializes this object to a dictionary for API requests.""" + return { + _NETWORK: self.network, + _ALLOWED_IP_CIDR_RANGES: self.allowed_ip_cidr_ranges, + } + + +class IPFilter: + """Represents a GCS Bucket IP Filter configuration. + + This class is a helper for constructing the IP Filter dictionary to be + assigned to a bucket's ``ip_filter`` property. + """ + + """ + Attributes: + mode (str): Required. The mode of the IP filter. Can be "Enabled" or "Disabled". + allow_all_service_agent_access (bool): Required. If True, allows Google + Cloud service agents to bypass the IP filter. + public_network_source (PublicNetworkSource): (Optional) The configuration + for requests from the public internet. + vpc_network_sources (list(VpcNetworkSource)): (Optional) A list of + configurations for requests from VPC networks. + allow_cross_org_vpcs (bool): (Optional) If True, allows VPCs from + other organizations to be used in the configuration. + """ + + def __init__(self): + self.mode: Optional[str] = None + self.public_network_source: Optional[PublicNetworkSource] = None + self.vpc_network_sources: List[VpcNetworkSource] = [] + self.allow_all_service_agent_access: Optional[bool] = None + self.allow_cross_org_vpcs: Optional[bool] = None + + @classmethod + def _from_api_resource(cls, resource: Dict[str, Any]) -> "IPFilter": + """Factory: creates an IPFilter instance from a server response.""" + ip_filter = cls() + ip_filter.mode = resource.get(_MODE) + ip_filter.allow_all_service_agent_access = resource.get( + _ALLOW_ALL_SERVICE_AGENT_ACCESS, None + ) + + public_network_source_data = resource.get(_PUBLIC_NETWORK_SOURCE, None) + if public_network_source_data: + ip_filter.public_network_source = PublicNetworkSource( + allowed_ip_cidr_ranges=public_network_source_data.get( + _ALLOWED_IP_CIDR_RANGES, [] + ) + ) + + vns_res_list = resource.get(_VPC_NETWORK_SOURCES, []) + ip_filter.vpc_network_sources = [ + VpcNetworkSource( + network=vns.get(_NETWORK), + allowed_ip_cidr_ranges=vns.get(_ALLOWED_IP_CIDR_RANGES, []), + ) + for vns in vns_res_list + ] + ip_filter.allow_cross_org_vpcs = resource.get(_ALLOW_CROSS_ORG_VPCS, None) + return ip_filter + + def _to_api_resource(self) -> Dict[str, Any]: + """Serializes this object to a dictionary for API requests.""" + resource = { + _MODE: self.mode, + _ALLOW_ALL_SERVICE_AGENT_ACCESS: self.allow_all_service_agent_access, + } + + if self.public_network_source: + resource[ + _PUBLIC_NETWORK_SOURCE + ] = self.public_network_source._to_api_resource() + if self.vpc_network_sources is not None: + resource[_VPC_NETWORK_SOURCES] = [ + vns._to_api_resource() for vns in self.vpc_network_sources + ] + if self.allow_cross_org_vpcs is not None: + resource[_ALLOW_CROSS_ORG_VPCS] = self.allow_cross_org_vpcs + return resource diff --git a/tests/system/test_bucket.py b/tests/system/test_bucket.py index f06de8e8c..3b05e8483 100644 --- a/tests/system/test_bucket.py +++ b/tests/system/test_bucket.py @@ -17,6 +17,11 @@ from google.api_core import exceptions from . import _helpers +from google.cloud.storage.ip_filter import ( + IPFilter, + PublicNetworkSource, + VpcNetworkSource, +) def test_bucket_create_w_alt_storage_class(storage_client, buckets_to_delete): @@ -1299,3 +1304,66 @@ def test_new_bucket_with_hierarchical_namespace( bucket = storage_client.create_bucket(bucket_obj) buckets_to_delete.append(bucket) assert bucket.hierarchical_namespace_enabled is True + + +def test_bucket_ip_filter_patch(storage_client, buckets_to_delete): + """Test setting and clearing IP filter configuration without enabling enforcement.""" + bucket_name = _helpers.unique_name("ip-filter-control") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + ip_filter = IPFilter() + ip_filter.mode = "Disabled" + ip_filter.allow_all_service_agent_access = True + ip_filter.public_network_source = PublicNetworkSource( + allowed_ip_cidr_ranges=["203.0.113.10/32"] + ) + ip_filter.vpc_network_sources.append( + VpcNetworkSource( + network=f"projects/{storage_client.project}/global/networks/default", + allowed_ip_cidr_ranges=["10.0.0.0/8"], + ) + ) + bucket.ip_filter = ip_filter + bucket.patch() + + # Reload and verify the full configuration was set correctly. + bucket.reload() + reloaded_filter = bucket.ip_filter + assert reloaded_filter is not None + assert reloaded_filter.mode == "Disabled" + assert reloaded_filter.allow_all_service_agent_access is True + assert reloaded_filter.public_network_source.allowed_ip_cidr_ranges == [ + "203.0.113.10/32" + ] + assert len(reloaded_filter.vpc_network_sources) == 1 + +def test_list_buckets_with_ip_filter(storage_client, buckets_to_delete): + """Test that listing buckets returns a summarized IP filter.""" + bucket_name = _helpers.unique_name("ip-filter-list") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + ip_filter = IPFilter() + ip_filter.mode = "Disabled" + ip_filter.allow_all_service_agent_access = True + ip_filter.public_network_source = PublicNetworkSource( + allowed_ip_cidr_ranges=["203.0.113.10/32"] + ) + bucket.ip_filter = ip_filter + bucket.patch() + + buckets_list = list(storage_client.list_buckets(prefix=bucket_name)) + found_bucket = next((b for b in buckets_list if b.name == bucket_name), None) + + assert found_bucket is not None + summarized_filter = found_bucket.ip_filter + + assert summarized_filter is not None + assert summarized_filter.mode == "Disabled" + assert summarized_filter.allow_all_service_agent_access is True + + # Check that the summarized filter does not include full details. + assert summarized_filter.public_network_source is None + assert summarized_filter.vpc_network_sources == [] + diff --git a/tests/unit/test_bucket.py b/tests/unit/test_bucket.py index e494cc18a..809b572e0 100644 --- a/tests/unit/test_bucket.py +++ b/tests/unit/test_bucket.py @@ -4612,6 +4612,54 @@ def test_generate_signed_url_v4_w_incompatible_params(self): virtual_hosted_style=True, bucket_bound_hostname="cdn.example.com" ) + def test_ip_filter_getter_unset(self): + """Test that ip_filter is None when not set.""" + bucket = self._make_one() + self.assertIsNone(bucket.ip_filter) + + def test_ip_filter_getter_w_value(self): + """Test getting an existing ip_filter configuration.""" + from google.cloud.storage.ip_filter import IPFilter + + ipf_property = {"mode": "Enabled"} + properties = {"ipFilter": ipf_property} + bucket = self._make_one(properties=properties) + + ip_filter = bucket.ip_filter + self.assertIsInstance(ip_filter, IPFilter) + self.assertEqual(ip_filter.mode, "Enabled") + + def test_ip_filter_setter(self): + """Test setting the ip_filter with a helper class.""" + from google.cloud.storage.ip_filter import IPFilter + from google.cloud.storage.bucket import _IP_FILTER_PROPERTY + + bucket = self._make_one() + ip_filter = IPFilter() + ip_filter.mode = "Enabled" + + bucket.ip_filter = ip_filter + + self.assertIn(_IP_FILTER_PROPERTY, bucket._changes) + self.assertEqual( + bucket._properties[_IP_FILTER_PROPERTY], + { + "mode": "Enabled", + "vpcNetworkSources": [], + "allowAllServiceAgentAccess": None, + }, + ) + + def test_ip_filter_setter_w_none(self): + """Test clearing the ip_filter by setting it to None.""" + from google.cloud.storage.bucket import _IP_FILTER_PROPERTY + + bucket = self._make_one(properties={"ipFilter": {"mode": "Enabled"}}) + bucket.ip_filter = None + + self.assertIn(_IP_FILTER_PROPERTY, bucket._changes) + self.assertIsNone(bucket._properties.get(_IP_FILTER_PROPERTY)) + class Test__item_to_notification(unittest.TestCase): def _call_fut(self, iterator, item): diff --git a/tests/unit/test_ip_filter.py b/tests/unit/test_ip_filter.py new file mode 100644 index 000000000..369462f2f --- /dev/null +++ b/tests/unit/test_ip_filter.py @@ -0,0 +1,106 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestIPFilterHelpers(unittest.TestCase): + @staticmethod + def _get_public_network_source_class(): + from google.cloud.storage.ip_filter import PublicNetworkSource + + return PublicNetworkSource + + @staticmethod + def _get_vpc_network_source_class(): + from google.cloud.storage.ip_filter import VpcNetworkSource + + return VpcNetworkSource + + @staticmethod + def _get_ip_filter_class(): + from google.cloud.storage.ip_filter import IPFilter + + return IPFilter + + def test_public_network_source_serialization(self): + pns_class = self._get_public_network_source_class() + pns = pns_class(allowed_ip_cidr_ranges=["1.2.3.4/32"]) + resource = pns._to_api_resource() + self.assertEqual(resource, {"allowedIpCidrRanges": ["1.2.3.4/32"]}) + + def test_vpc_network_source_serialization(self): + vns_class = self._get_vpc_network_source_class() + vns = vns_class( + network="projects/p/global/networks/n", + allowed_ip_cidr_ranges=["10.0.0.0/8"], + ) + resource = vns._to_api_resource() + self.assertEqual( + resource, + { + "network": "projects/p/global/networks/n", + "allowedIpCidrRanges": ["10.0.0.0/8"], + }, + ) + + def test_ip_filter_full_serialization(self): + ip_filter_class = self._get_ip_filter_class() + pns_class = self._get_public_network_source_class() + vns_class = self._get_vpc_network_source_class() + + ip_filter = ip_filter_class() + ip_filter.mode = "Enabled" + ip_filter.public_network_source = pns_class( + allowed_ip_cidr_ranges=["1.2.3.4/32"] + ) + ip_filter.vpc_network_sources.append( + vns_class( + network="projects/p/global/networks/n", + allowed_ip_cidr_ranges=["10.0.0.0/8"], + ) + ) + ip_filter.allow_all_service_agent_access = True + + resource = ip_filter._to_api_resource() + expected = { + "mode": "Enabled", + "publicNetworkSource": {"allowedIpCidrRanges": ["1.2.3.4/32"]}, + "vpcNetworkSources": [ + { + "network": "projects/p/global/networks/n", + "allowedIpCidrRanges": ["10.0.0.0/8"], + } + ], + "allowAllServiceAgentAccess": True, + } + self.assertEqual(resource, expected) + + def test_ip_filter_deserialization(self): + ip_filter_class = self._get_ip_filter_class() + resource = { + "mode": "Enabled", + "publicNetworkSource": {"allowedIpCidrRanges": ["1.2.3.4/32"]}, + "allowAllServiceAgentAccess": False, + } + + ip_filter = ip_filter_class._from_api_resource(resource) + + self.assertEqual(ip_filter.mode, "Enabled") + self.assertIsNotNone(ip_filter.public_network_source) + self.assertEqual( + ip_filter.public_network_source.allowed_ip_cidr_ranges, ["1.2.3.4/32"] + ) + self.assertEqual(ip_filter.vpc_network_sources, []) + self.assertIs(ip_filter.allow_all_service_agent_access, False) From e84906c2252e98c2906d5d0e0da2249deb0c134a Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 14:28:57 +0530 Subject: [PATCH 12/12] chore(main): release 3.3.0 (#1508) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- CHANGELOG.md | 17 +++++++++++++++++ google/cloud/storage/version.py | 2 +- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 52b077ea5..92342065c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,23 @@ [1]: https://pypi.org/project/google-cloud-storage/#history +## [3.3.0](https://github.com/googleapis/python-storage/compare/v3.2.0...v3.3.0) (2025-08-05) + + +### Features + +* Add support for bucket IP filter ([#1516](https://github.com/googleapis/python-storage/issues/1516)) ([a29073c](https://github.com/googleapis/python-storage/commit/a29073cf58df9c5667305e05c6378284057cda23)) + + +### Bug Fixes + +* Add logs on AssertionError for issue [#1512](https://github.com/googleapis/python-storage/issues/1512) ([#1518](https://github.com/googleapis/python-storage/issues/1518)) ([6a9923e](https://github.com/googleapis/python-storage/commit/6a9923e4fc944f7a7c3906eb7800d23677bd2481)) + + +### Documentation + +* Update the documentation of move_blob function ([#1507](https://github.com/googleapis/python-storage/issues/1507)) ([72252e9](https://github.com/googleapis/python-storage/commit/72252e940909ce2e3da9cfd80f5b7b43a026f45c)) + ## [3.2.0](https://github.com/googleapis/python-storage/compare/v3.1.1...v3.2.0) (2025-07-04) diff --git a/google/cloud/storage/version.py b/google/cloud/storage/version.py index c24ca23d6..2279c3674 100644 --- a/google/cloud/storage/version.py +++ b/google/cloud/storage/version.py @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "3.2.0" +__version__ = "3.3.0"