diff --git a/.kokoro/system.sh b/.kokoro/system.sh index e316420b2559..04ed752f04f2 100755 --- a/.kokoro/system.sh +++ b/.kokoro/system.sh @@ -97,6 +97,7 @@ packages_with_system_tests=( "google-cloud-firestore" "google-cloud-logging" "google-cloud-testutils" + "sqlalchemy-spanner" ) # A file for running system tests diff --git a/.librarian/state.yaml b/.librarian/state.yaml index 37307629cfb8..1bf381811c82 100644 --- a/.librarian/state.yaml +++ b/.librarian/state.yaml @@ -4074,3 +4074,12 @@ libraries: preserve_regex: [] remove_regex: [] tag_format: '{id}-v{version}' + - id: sqlalchemy-spanner + version: 1.17.2 + last_generated_commit: "" + apis: [] + source_roots: + - packages/sqlalchemy-spanner + preserve_regex: [] + remove_regex: [] + tag_format: '{id}-v{version}' diff --git a/packages/sqlalchemy-spanner/.repo-metadata.json b/packages/sqlalchemy-spanner/.repo-metadata.json new file mode 100644 index 000000000000..2ee9a72eda79 --- /dev/null +++ b/packages/sqlalchemy-spanner/.repo-metadata.json @@ -0,0 +1,14 @@ +{ + "name": "sqlalchemy-spanner", + "name_pretty": "Spanner dialect for SQLAlchemy", + "product_documentation": "https://cloud.google.com/spanner/docs", + "client_documentation": "https://github.com/googleapis/python-spanner-sqlalchemy", + "issue_tracker": "https://issuetracker.google.com/issues?q=componentid:190851%2B%20status:open", + "release_level": "stable", + "language": "python", + "library_type": "INTEGRATION", + "repo": "googleapis/google-cloud-python", + "distribution_name": "sqlalchemy-spanner", + "requires_billing": true, + "api_shortname": "sqlalchemy-spanner" +} diff --git a/packages/sqlalchemy-spanner/BENCHMARKS.md b/packages/sqlalchemy-spanner/BENCHMARKS.md new file mode 100644 index 000000000000..14c44358d8d3 --- /dev/null +++ b/packages/sqlalchemy-spanner/BENCHMARKS.md @@ -0,0 +1,26 @@ +# Benchmarks + +The performance test suite is located in [test/benchmark.py](https://github.com/cloudspannerecosystem/python-spanner-sqlalchemy/blob/main/test/benchmark.py) and intended to compare execution time difference between SQLAlchemy dialect for Spanner and pure Spanner client. + +The test suite requirements: +- `scipy` Python package installed +- the original dialect requirements + +Use `PROJECT`, `INSTANCE` and `DATABASE` module constants to set a project to execute tests on. + +The following measurements were made on a VM instance. + +# 25-11-2021 + +|Test|mean, sec|error|std_dev| +|----|-------|-----|--------| +|SPANNER insert_one_row_with_fetch_after| 0.16|0.0|0.03| +|ALCHEMY insert_one_row_with_fetch_after| 0.11| 0.0|0.02| +|SPANNER read_one_row| 0.04| 0.0| 0.01| +|ALCHEMY read_one_row| 0.01| 0.0| 0.0| +|SPANNER insert_many_rows| 0.33| 0.01| 0.05| +|ALCHEMY insert_many_rows| 0.32| 0.01| 0.06| +|SPANNER select_many_rows| 0.04| 0.0| 0.01| +|ALCHEMY select_many_rows| 0.03| 0.0| 0.0| +|SPANNER insert_many_rows_with_mutations| 0.07| 0.0| 0.03| +|SQLALCHEMY insert_many_rows_with_mutations| 0.31| 0.01| 0.07| diff --git a/packages/sqlalchemy-spanner/CHANGELOG.md b/packages/sqlalchemy-spanner/CHANGELOG.md new file mode 100644 index 000000000000..cd8893785e44 --- /dev/null +++ b/packages/sqlalchemy-spanner/CHANGELOG.md @@ -0,0 +1,318 @@ +# Changelog + +[PyPI History][1] + +[1]: https://pypi.org/project/sqlalchemy-spanner/#history + +## [1.17.2](https://github.com/googleapis/google-cloud-python/compare/sqlalchemy-spanner-v1.17.1...sqlalchemy-spanner-v1.17.2) (2025-12-15) + + +### Bug Fixes + +* Retrieve columns in compound indexes in correct order (#798) ([9afe49bb720356c58890931c17546650ffd61f88](https://github.com/googleapis/google-cloud-python/commit/9afe49bb720356c58890931c17546650ffd61f88)) + +## [1.17.1](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.17.0...v1.17.1) (2025-10-21) + + +### Bug Fixes + +* Return Correct Column Order in get_multi_foreign_keys ([#783](https://github.com/googleapis/python-spanner-sqlalchemy/issues/783)) ([42027d5](https://github.com/googleapis/python-spanner-sqlalchemy/commit/42027d56abe3b3e87faece03f4ade84b9703acd6)), closes [#779](https://github.com/googleapis/python-spanner-sqlalchemy/issues/779) + +## [1.17.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.16.0...v1.17.0) (2025-10-09) + + +### Features + +* Add Support for Interleaved Indexes ([#762](https://github.com/googleapis/python-spanner-sqlalchemy/issues/762)) ([77b86f1](https://github.com/googleapis/python-spanner-sqlalchemy/commit/77b86f1ad9d31932c960497eb1fb29635b74cb92)), closes [#761](https://github.com/googleapis/python-spanner-sqlalchemy/issues/761) + +## [1.16.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.15.0...v1.16.0) (2025-09-02) + + +### Features + +* Support NULL FILTERED indexes ([#750](https://github.com/googleapis/python-spanner-sqlalchemy/issues/750)) ([4bc0589](https://github.com/googleapis/python-spanner-sqlalchemy/commit/4bc05898995a586816e116e0a3205966a52d1ef8)) + + +### Documentation + +* Add sample for parse_json ([#752](https://github.com/googleapis/python-spanner-sqlalchemy/issues/752)) ([b2f0e89](https://github.com/googleapis/python-spanner-sqlalchemy/commit/b2f0e89b8f01481fa6f29da055300eeb533591cc)), closes [#735](https://github.com/googleapis/python-spanner-sqlalchemy/issues/735) + +## [1.15.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.14.0...v1.15.0) (2025-08-19) + + +### Features + +* Add license metadata to setup.py ([#712](https://github.com/googleapis/python-spanner-sqlalchemy/issues/712)) ([8f2e97e](https://github.com/googleapis/python-spanner-sqlalchemy/commit/8f2e97e527b00bfb6db40d946a21f522177eab7b)) +* Enable SQLAlchemy 2.0's insertmany feature ([#721](https://github.com/googleapis/python-spanner-sqlalchemy/issues/721)) ([1fe9f4b](https://github.com/googleapis/python-spanner-sqlalchemy/commit/1fe9f4b0a2f94d66c925d1d60a1fd83fc45e9c89)) +* Support informational foreign keys ([#719](https://github.com/googleapis/python-spanner-sqlalchemy/issues/719)) ([c565ae1](https://github.com/googleapis/python-spanner-sqlalchemy/commit/c565ae12b1b429c66037e9cd0c4be427a60ab5b0)) + + +### Bug Fixes + +* Report column defaults in introspection ([#744](https://github.com/googleapis/python-spanner-sqlalchemy/issues/744)) ([309c641](https://github.com/googleapis/python-spanner-sqlalchemy/commit/309c64179d668dbe24881e6d7fb4783fb1d8bbf2)), closes [#730](https://github.com/googleapis/python-spanner-sqlalchemy/issues/730) +* Respect existing server default in alter column DDL ([#733](https://github.com/googleapis/python-spanner-sqlalchemy/issues/733)) ([1f8a25f](https://github.com/googleapis/python-spanner-sqlalchemy/commit/1f8a25f63286c1241141985d4f10f558e929a272)) + +## [1.14.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.13.1...v1.14.0) (2025-06-27) + + +### Features + +* Support commit timestamp option ([#697](https://github.com/googleapis/python-spanner-sqlalchemy/issues/697)) ([82bb8ed](https://github.com/googleapis/python-spanner-sqlalchemy/commit/82bb8ed583a5fd91c8a10fb73c85a6a5f45269f6)) + +## [1.13.1](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.13.0...v1.13.1) (2025-06-20) + + +### Bug Fixes + +* Support retrieval of cross-schema foreign keys ([ef07a1f](https://github.com/googleapis/python-spanner-sqlalchemy/commit/ef07a1f55736eae9751f85fef66599fdfa21bcd4)), closes [#638](https://github.com/googleapis/python-spanner-sqlalchemy/issues/638) + +## [1.13.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.12.0...v1.13.0) (2025-06-05) + + +### Features + +* Introduce compatibility with native namespace packages ([#375](https://github.com/googleapis/python-spanner-sqlalchemy/issues/375)) ([052e699](https://github.com/googleapis/python-spanner-sqlalchemy/commit/052e699f82a795def518f4f0a32039e1c68174a0)) + +## [1.12.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.11.1...v1.12.0) (2025-06-02) + + +### Features + +* Document the use of statement and transaction tags ([#676](https://github.com/googleapis/python-spanner-sqlalchemy/issues/676)) ([c78ad04](https://github.com/googleapis/python-spanner-sqlalchemy/commit/c78ad04dc7a3e1c773bde21ef927d5250f47992d)) +* Support database role in connect arguments ([#667](https://github.com/googleapis/python-spanner-sqlalchemy/issues/667)) ([47aa27c](https://github.com/googleapis/python-spanner-sqlalchemy/commit/47aa27c489cb7051cb55468ab4d6b79f8c0ce1f3)) +* Support multi-row inserts ([#671](https://github.com/googleapis/python-spanner-sqlalchemy/issues/671)) ([f5d94cd](https://github.com/googleapis/python-spanner-sqlalchemy/commit/f5d94cd15cba43684fc584072018ab3bc826f457)), closes [#670](https://github.com/googleapis/python-spanner-sqlalchemy/issues/670) + +## [1.11.1](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.11.0...v1.11.1) (2025-05-27) + + +### Bug Fixes + +* Update README to include isolation level repeatable read ([#668](https://github.com/googleapis/python-spanner-sqlalchemy/issues/668)) ([d84daf6](https://github.com/googleapis/python-spanner-sqlalchemy/commit/d84daf65a496bdff6f5d9e835490785c69533238)) + +## [1.11.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.10.0...v1.11.0) (2025-05-07) + + +### Features + +* Add isolation level support and sample ([#652](https://github.com/googleapis/python-spanner-sqlalchemy/issues/652)) ([0aba318](https://github.com/googleapis/python-spanner-sqlalchemy/commit/0aba31835bc581a0a05e29b5878ba0a665686414)) +* Add SpannerPickleType ([#655](https://github.com/googleapis/python-spanner-sqlalchemy/issues/655)) ([0837542](https://github.com/googleapis/python-spanner-sqlalchemy/commit/0837542e5606ab9ea7a8765bf54524ebf9b0dd71)), closes [#654](https://github.com/googleapis/python-spanner-sqlalchemy/issues/654) +* Support schemas in queries and dml statements ([#639](https://github.com/googleapis/python-spanner-sqlalchemy/issues/639)) ([81c154a](https://github.com/googleapis/python-spanner-sqlalchemy/commit/81c154a37b82315a8bb57319ba11272626addad3)) + + +### Bug Fixes + +* Column order in get_multi_pk_constraint ([#640](https://github.com/googleapis/python-spanner-sqlalchemy/issues/640)) ([16c87e4](https://github.com/googleapis/python-spanner-sqlalchemy/commit/16c87e4fbf1b9d5dbac0e3279cce078a2d09e4b4)) +* Include schema when creating indices ([#637](https://github.com/googleapis/python-spanner-sqlalchemy/issues/637)) ([41905e2](https://github.com/googleapis/python-spanner-sqlalchemy/commit/41905e21b5b6473d5dbf75d40db765ebf48235dc)) + +## [1.10.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.9.0...v1.10.0) (2025-03-17) + + +### Features + +* Support AUTO_INCREMENT and IDENTITY columns ([#610](https://github.com/googleapis/python-spanner-sqlalchemy/issues/610)) ([f67ebe8](https://github.com/googleapis/python-spanner-sqlalchemy/commit/f67ebe888ef4da8d94ff6d1e1d7f4cd5de37616c)) + +## [1.9.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.8.0...v1.9.0) (2025-02-21) + + +### Features + +* Support request and transaction tags ([#558](https://github.com/googleapis/python-spanner-sqlalchemy/issues/558)) ([c4496fd](https://github.com/googleapis/python-spanner-sqlalchemy/commit/c4496fd73c2afe0f519fed0264abe2abb9d022b9)) + + +### Documentation + +* Add test for using FOR UPDATE ([#575](https://github.com/googleapis/python-spanner-sqlalchemy/issues/575)) ([8419ae4](https://github.com/googleapis/python-spanner-sqlalchemy/commit/8419ae4ef07ba5b5e3134586c475cfcaeda240b5)) + +## [1.8.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.7.0...v1.8.0) (2024-12-09) + + +### Features + +* Add helper function for insert-or-update ([#526](https://github.com/googleapis/python-spanner-sqlalchemy/issues/526)) ([6ff12ec](https://github.com/googleapis/python-spanner-sqlalchemy/commit/6ff12ecf6f1883525a901df4b4103b45ca95abed)), closes [#391](https://github.com/googleapis/python-spanner-sqlalchemy/issues/391) +* Support dml returning ([#335](https://github.com/googleapis/python-spanner-sqlalchemy/issues/335)) ([7db3f37](https://github.com/googleapis/python-spanner-sqlalchemy/commit/7db3f374510673d6521b16ca44d21043069d6ee7)) +* Support float32 ([#531](https://github.com/googleapis/python-spanner-sqlalchemy/issues/531)) ([6c3cb42](https://github.com/googleapis/python-spanner-sqlalchemy/commit/6c3cb42919c5c8d52719d855af4fc2bb22c13fae)) +* Support Partitioned DML ([#541](https://github.com/googleapis/python-spanner-sqlalchemy/issues/541)) ([108d965](https://github.com/googleapis/python-spanner-sqlalchemy/commit/108d965c60b6ea817de7fed86dca3d20f923d975)), closes [#496](https://github.com/googleapis/python-spanner-sqlalchemy/issues/496) + + +### Bug Fixes + +* Add `existing_nullable` usage to `visit_column_type` ([#329](https://github.com/googleapis/python-spanner-sqlalchemy/issues/329)) ([273f03b](https://github.com/googleapis/python-spanner-sqlalchemy/commit/273f03bdf27c12317712a9939eedd25bd88c475a)) +* Map now() to current_timestamp ([#540](https://github.com/googleapis/python-spanner-sqlalchemy/issues/540)) ([4b24f33](https://github.com/googleapis/python-spanner-sqlalchemy/commit/4b24f335ff918c7404201db16d05ccc817626dfe)), closes [#497](https://github.com/googleapis/python-spanner-sqlalchemy/issues/497) +* Support storing columns for indices ([#485](https://github.com/googleapis/python-spanner-sqlalchemy/issues/485)) ([93579c8](https://github.com/googleapis/python-spanner-sqlalchemy/commit/93579c8d6298dd9a07b2ca2b9c451036e33d2e6f)) +* Support THEN RETURN for insert, update, delete ([#503](https://github.com/googleapis/python-spanner-sqlalchemy/issues/503)) ([ac64472](https://github.com/googleapis/python-spanner-sqlalchemy/commit/ac644726665213f234ce8ec4dea715c820a670e9)) + + +### Dependencies + +* Add nh3 ([#481](https://github.com/googleapis/python-spanner-sqlalchemy/issues/481)) ([3c2bcf9](https://github.com/googleapis/python-spanner-sqlalchemy/commit/3c2bcf9901ce132a6d5d5d3b1ad3608526a378b5)) +* Add proto plus ([#482](https://github.com/googleapis/python-spanner-sqlalchemy/issues/482)) ([8663453](https://github.com/googleapis/python-spanner-sqlalchemy/commit/86634531793cf01b46cefe87f74375ee59060638)) +* Update all deps ([#413](https://github.com/googleapis/python-spanner-sqlalchemy/issues/413)) ([25d9d2c](https://github.com/googleapis/python-spanner-sqlalchemy/commit/25d9d2c32638eb3e551921eecea435452c548bcb)) + + +### Documentation + +* Add sample for read-only transactions ([#533](https://github.com/googleapis/python-spanner-sqlalchemy/issues/533)) ([d2d72b6](https://github.com/googleapis/python-spanner-sqlalchemy/commit/d2d72b6fad4ea457114a50a2869d053798fed452)) +* Add sample for stale reads ([#539](https://github.com/googleapis/python-spanner-sqlalchemy/issues/539)) ([e9df810](https://github.com/googleapis/python-spanner-sqlalchemy/commit/e9df8105b18e03dbf3b746fed85ffe9da286b953)) +* Add samples for Spanner-specific features ([#492](https://github.com/googleapis/python-spanner-sqlalchemy/issues/492)) ([a6ed382](https://github.com/googleapis/python-spanner-sqlalchemy/commit/a6ed382be2a7105f9e8b2f855df3919e8c6750c9)) +* Cleanup the transaction section of README a bit ([#545](https://github.com/googleapis/python-spanner-sqlalchemy/issues/545)) ([c3b5df5](https://github.com/googleapis/python-spanner-sqlalchemy/commit/c3b5df52c2fc62b11aa684c2d02dac95dd06ab59)) +* Fix readme typo ([#487](https://github.com/googleapis/python-spanner-sqlalchemy/issues/487)) ([b452b4f](https://github.com/googleapis/python-spanner-sqlalchemy/commit/b452b4f73d200b99fd800862c88304b67aa035c5)) + +## [1.7.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.6.2...v1.7.0) (2024-02-07) + + +### Features + +* Support sequences ([#336](https://github.com/googleapis/python-spanner-sqlalchemy/issues/336)) ([e35a8a0](https://github.com/googleapis/python-spanner-sqlalchemy/commit/e35a8a01fadce8b5a4b0208f9e6146a4241fa827)) + + +### Bug Fixes + +* Db.params OpenTelemetry integration issue ([#346](https://github.com/googleapis/python-spanner-sqlalchemy/issues/346)) ([0a69031](https://github.com/googleapis/python-spanner-sqlalchemy/commit/0a69031c9145945e5c438df48977329a67f94a78)) +* Fixing test for literals due to change in sqlalchemy core tests ([#384](https://github.com/googleapis/python-spanner-sqlalchemy/issues/384)) ([62cccc3](https://github.com/googleapis/python-spanner-sqlalchemy/commit/62cccc33cba504f8a4c67bd215341a3e747ec9bf)) +* Table name should be quoted by back quotes (`) on DROP TABLE ([#385](https://github.com/googleapis/python-spanner-sqlalchemy/issues/385)) ([628d26c](https://github.com/googleapis/python-spanner-sqlalchemy/commit/628d26c416cbe44871d8114251989d9f581bebf0)) + +## [1.6.2](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.6.1...v1.6.2) (2023-05-31) + + +### Bug Fixes + +* Disables sequence support ([#326](https://github.com/googleapis/python-spanner-sqlalchemy/issues/326)) ([7b441ff](https://github.com/googleapis/python-spanner-sqlalchemy/commit/7b441ff867160a102ebe88dfa27b3e21b9149007)) + +## [1.6.1](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.6.0...v1.6.1) (2023-05-23) + + +### Bug Fixes + +* Add opentelemetry version ([#322](https://github.com/googleapis/python-spanner-sqlalchemy/issues/322)) ([b80d24d](https://github.com/googleapis/python-spanner-sqlalchemy/commit/b80d24d251f07d4c000aa214955cf9729cd49545)) +* Fix check so it's all lowercase. ([#321](https://github.com/googleapis/python-spanner-sqlalchemy/issues/321)) ([8fae358](https://github.com/googleapis/python-spanner-sqlalchemy/commit/8fae3587d5c963539b255c976136b18041147e5b)) + +## [1.6.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.5.0...v1.6.0) (2023-04-26) + + +### Features + +* Enable instance-level connection ([#316](https://github.com/googleapis/python-spanner-sqlalchemy/issues/316)) ([9af8e86](https://github.com/googleapis/python-spanner-sqlalchemy/commit/9af8e863f7fb0fa8bea050ca022bbe4e05315d6d)) + +## [1.5.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.4.0...v1.5.0) (2023-04-19) + + +### Features + +* Feat: SQLAlchemy 2.0 support ([#314](https://github.com/googleapis/python-spanner-sqlalchemy/issues/314)) ([61d836b](https://github.com/googleapis/python-spanner-sqlalchemy/commit/61d836bade2a89d04b5c61e4ca9c56e7163f6cc6)) + +## [1.4.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.3.0...v1.4.0) (2023-04-06) + + +### Features + +* User provided client ([#311](https://github.com/googleapis/python-spanner-sqlalchemy/issues/311)) ([5b07111](https://github.com/googleapis/python-spanner-sqlalchemy/commit/5b0711102bb45f5775addbda61cb4da5231c96d7)) + +## [1.3.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.2.2...v1.3.0) (2023-03-20) + + +### Features + +* Implement get_view_names() method ([#306](https://github.com/googleapis/python-spanner-sqlalchemy/issues/306)) ([63461e6](https://github.com/googleapis/python-spanner-sqlalchemy/commit/63461e67364b5214e7ea8a2d89c0fda4d9ced72d)), closes [#303](https://github.com/googleapis/python-spanner-sqlalchemy/issues/303) +* Support request priorities ([#286](https://github.com/googleapis/python-spanner-sqlalchemy/issues/286)) ([3aecf2d](https://github.com/googleapis/python-spanner-sqlalchemy/commit/3aecf2d651e6eb9f3af72a3ed3599aa51b4158a9)) + + +### Bug Fixes + +* Alembic incompatibility with sqlalchemy < 1.3.11 ([#290](https://github.com/googleapis/python-spanner-sqlalchemy/issues/290)) ([f99f3a7](https://github.com/googleapis/python-spanner-sqlalchemy/commit/f99f3a78477aecc71af70deba41b861e12d51c28)) +* Introspect constraints, keeping their order ([#289](https://github.com/googleapis/python-spanner-sqlalchemy/issues/289)) ([7f65972](https://github.com/googleapis/python-spanner-sqlalchemy/commit/7f659729e15848c1493cb271e832b6968d7ab031)) +* Test fix ([#310](https://github.com/googleapis/python-spanner-sqlalchemy/issues/310)) ([c376d42](https://github.com/googleapis/python-spanner-sqlalchemy/commit/c376d422ab455ee88bb94e2cd136aa9ef865e375)) + +## [1.2.2](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.2.1...v1.2.2) (2022-10-04) + + +### Bug Fixes + +* Adding requirements ([#250](https://github.com/googleapis/python-spanner-sqlalchemy/issues/250)) ([61a13d4](https://github.com/googleapis/python-spanner-sqlalchemy/commit/61a13d4ba152a24d5fa6083594aa86f46d5395de)) +* Don't introspect internal UNIQUE constraints ([#244](https://github.com/googleapis/python-spanner-sqlalchemy/issues/244)) ([51cdc53](https://github.com/googleapis/python-spanner-sqlalchemy/commit/51cdc534856b5ab933213803257679faa33be41c)) +* Spanner auto managed indexes should not be introspected ([#241](https://github.com/googleapis/python-spanner-sqlalchemy/issues/241)) ([c3b5907](https://github.com/googleapis/python-spanner-sqlalchemy/commit/c3b59077ff8d6d8916007bf204f90e1d1ed41c00)) +* Update dialect name for ALTER operation overrides ([#234](https://github.com/googleapis/python-spanner-sqlalchemy/issues/234)) ([f9e8ebe](https://github.com/googleapis/python-spanner-sqlalchemy/commit/f9e8ebedc863b2b84b2decffc1831125001785c8)) + + +### Documentation + +* Add auto retry mechanism explanation ([#243](https://github.com/googleapis/python-spanner-sqlalchemy/issues/243)) ([68b9bc8](https://github.com/googleapis/python-spanner-sqlalchemy/commit/68b9bc8b389c29451317cf78989578e0a7369dad)) +* Mention autocommit_block as a solution for Aborted transaction … ([#239](https://github.com/googleapis/python-spanner-sqlalchemy/issues/239)) ([f23e599](https://github.com/googleapis/python-spanner-sqlalchemy/commit/f23e599ef6a9d8f198c41f32a586e42af840280d)), closes [#229](https://github.com/googleapis/python-spanner-sqlalchemy/issues/229) +* Mention package install with pip ([#245](https://github.com/googleapis/python-spanner-sqlalchemy/issues/245)) ([528a9b0](https://github.com/googleapis/python-spanner-sqlalchemy/commit/528a9b0ba1bb9f0b96e35c809faa923f292684a0)) + +## [1.2.1](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.2.0...v1.2.1) (2022-08-09) + + +### Bug Fixes + +* alembic migration fails in case of a sequential upgrade ([#200](https://github.com/googleapis/python-spanner-sqlalchemy/issues/200)) ([f62f664](https://github.com/googleapis/python-spanner-sqlalchemy/commit/f62f664f31ec052068e241729344aec5f605c4f8)) +* don't reset attributes of non-Spanner connections ([#222](https://github.com/googleapis/python-spanner-sqlalchemy/issues/222)) ([072415e](https://github.com/googleapis/python-spanner-sqlalchemy/commit/072415eb9ea0bf701be2a35c4cc3dc80854ca831)) +* incorrect DDL generated when using server_default ([#209](https://github.com/googleapis/python-spanner-sqlalchemy/issues/209)) ([#220](https://github.com/googleapis/python-spanner-sqlalchemy/issues/220)) ([7ab1742](https://github.com/googleapis/python-spanner-sqlalchemy/commit/7ab174233dc75fd34d4127cb06dd49c216d92abc)) + + +### Documentation + +* add a note about connection URL prefixes ([#219](https://github.com/googleapis/python-spanner-sqlalchemy/issues/219)) ([a986949](https://github.com/googleapis/python-spanner-sqlalchemy/commit/a9869498f220a529a1dcc51c89d53af54311074c)) + +## [1.2.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.1.0...v1.2.0) (2022-06-03) + + +### Features + +* allow SQLAlchemy 1.4 use ([#198](https://github.com/googleapis/python-spanner-sqlalchemy/issues/198)) ([7793b7d](https://github.com/googleapis/python-spanner-sqlalchemy/commit/7793b7ddfcbd99e966e3ef6f7ec13d7dc04d39fb)) + +## [1.1.0](https://github.com/googleapis/python-spanner-sqlalchemy/compare/v1.0.0...v1.1.0) (2022-01-28) + + +### Features + +* drop read_only on a connection returned back to a pool ([#189](https://github.com/googleapis/python-spanner-sqlalchemy/issues/189)) ([16388c1](https://github.com/googleapis/python-spanner-sqlalchemy/commit/16388c1c9ba7798c0c0df786f2e4a8c86b7767c2)) +* rollback a connection returned back to pool ([#193](https://github.com/googleapis/python-spanner-sqlalchemy/issues/193)) ([13ff9cb](https://github.com/googleapis/python-spanner-sqlalchemy/commit/13ff9cb73049d989bacb97fd8be3ad3bdce7023c)) +* support SQLAlchemy 1.4 ([#191](https://github.com/googleapis/python-spanner-sqlalchemy/issues/191)) ([029b181](https://github.com/googleapis/python-spanner-sqlalchemy/commit/029b18109c1ff21318c3820da5aa0945b6d6325d)) + + +### Bug Fixes + +* bump up google-cloud-spanner required version ([#171](https://github.com/googleapis/python-spanner-sqlalchemy/issues/171)) ([33c86e8](https://github.com/googleapis/python-spanner-sqlalchemy/commit/33c86e8fdeac4fd65569c438e8613dcb86e15edc)) +* connection reset fails when an additional dialect is used ([#188](https://github.com/googleapis/python-spanner-sqlalchemy/issues/188)) ([417b8b8](https://github.com/googleapis/python-spanner-sqlalchemy/commit/417b8b81911417ee3a1f826c37a9e490641944ac)) +* delete stale instance with delay of 5 seconds ([#194](https://github.com/googleapis/python-spanner-sqlalchemy/issues/194)) ([2932a02](https://github.com/googleapis/python-spanner-sqlalchemy/commit/2932a02bb58c4e2800da1e18452babcfc74617d6)) +* NOT NULL computed column creation failure ([#173](https://github.com/googleapis/python-spanner-sqlalchemy/issues/173)) ([e336735](https://github.com/googleapis/python-spanner-sqlalchemy/commit/e3367354d3b24328d7162fd2ccc778f23c630cd2)) + + +### Documentation + +* add a README section for the autoincremented ids ([#180](https://github.com/googleapis/python-spanner-sqlalchemy/issues/180)) ([4c610ea](https://github.com/googleapis/python-spanner-sqlalchemy/commit/4c610eaecd32679f23cae2f70d299d3c3d33d024)) +* explicitly recommend uuid to generate PKs ([#182](https://github.com/googleapis/python-spanner-sqlalchemy/issues/182)) ([b10f2ca](https://github.com/googleapis/python-spanner-sqlalchemy/commit/b10f2cae0eb13eb5496d08cbeae77a626b4ad6f1)), closes [#181](https://github.com/googleapis/python-spanner-sqlalchemy/issues/181) + +## [1.0.0](https://www.github.com/googleapis/python-spanner-sqlalchemy/compare/v0.1.0...v1.0.0) (2021-12-08) + + +### Features + +* add code samples ([#55](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/55)) ([406c34b](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/406c34bdb21e01a1317c074fab34d87bb3d61020)) +* set user-agent string to distinguish SQLAlchemy requests ([#116](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/116)) ([b5e1a21](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/b5e1a211a0475690feed36fd222a41c216d8fb82)) +* support computed columns ([#139](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/139)) ([046ca97](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/046ca975778f4793e2c37d70d2a602546f9d4699)), closes [#137](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/137) +* support JSON data type ([#135](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/135)) ([184a7d5](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/184a7d576a790bbbd049fe80d589af78831379b4)) +* support read_only connections ([#125](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/125)) ([352c47d](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/352c47de7bb4ea1c30b50a7fe5aee0c4d102e80e)) +* support stale reads ([#146](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/146)) ([d80cb27](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/d80cb2792437731c24905c7a6919468c37779c67)) + + +### Bug Fixes + +* ALTER COLUMN NOT NULL directive fails because of inappropriate syntax ([#124](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/124)) ([c433cda](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/c433cda99fd8544810c878328a272a3a9430630f)) +* array columns reflection ([#119](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/119)) ([af3b97b](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/af3b97bfa4b3ed4b223384c9ed3fa0643204d8c9)), closes [#118](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/118) +* calculate limit value correctly for offset only queries ([#160](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/160)) ([6844336](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/684433682ed29d9cde8c9898796024cefeb38493)) +* correct typo in spanner_interleave_on_delete_cascade keyword ([#99](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/99)) ([a0ebf75](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/a0ebf758eda351c0a20103f9e8c2243f002b2e6e)) +* raise Unimplemented error when creating temporary tables ([#159](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/159)) ([646d6ac](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/646d6ac24ccd0643b67abff9da28118e0a6f6e55)) +* rollback failed exception log ([#106](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/106)) ([809e6ab](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/809e6abb29f82a7fbe6587d606e8d75283f2a2fe)) + + +### Documentation + +* add query hints example ([#153](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/153)) ([9c23804](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/9c23804746bc8c638b6c22f2cb6ea57778f7fd19)) +* reformatted README titles ([#141](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/141)) ([a3ccbac](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/a3ccbac476679fe8048ed2109e5489b873278c9c)) +* update benchmarks ([#155](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/155)) ([3500653](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/35006536e4de31dbcba022b73f0aadf39bc89e39)) + + +### Miscellaneous Chores + +* setup release 1.0.0 ([#165](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/165)) ([37a415d](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/37a415d071d39e99f233a1c15c1c4b89bd436570)) diff --git a/packages/sqlalchemy-spanner/CONTRIBUTING.md b/packages/sqlalchemy-spanner/CONTRIBUTING.md new file mode 100644 index 000000000000..6294c4529e49 --- /dev/null +++ b/packages/sqlalchemy-spanner/CONTRIBUTING.md @@ -0,0 +1,54 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution; +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code Reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## Community Guidelines + +This project follows [Google's Open Source Community +Guidelines](https://opensource.google/conduct/). + +## Running tests + +SQLAlchemy Spanner dialect includes a test suite, which can be executed both on a live service and Spanner emulator. + +**Using pytest** +To execute the test suite with standard `pytest` package you only need to checkout to the package folder and run: +``` +pytest -v +``` + +**Using nox** +The package includes a configuration file for `nox` package, which allows to execute the dialect test suite in an isolated virtual environment. To execute all the `nox` sessions checkout to the dialect folder and then run command: +``` +nox +``` +To execute only the dialect compliance test suite execute command: +``` +nox -s compliance_test +``` + +**Live service** +To run the test suite on a live service use [setup.cfg](https://github.com/cloudspannerecosystem/python-spanner-sqlalchemy/blob/main/setup.cfg) `db.default` attribute to set URI of the project, instance and database, where the tests should be executed. + +**Emulator** +As the dialect is built on top of the Spanner DB API, it also supports running on Spanner emulator. To make it happen you need to set an environment variable, pointing to the emulator service, for example `SPANNER_EMULATOR_HOST=localhost:9010` \ No newline at end of file diff --git a/packages/sqlalchemy-spanner/LICENSE b/packages/sqlalchemy-spanner/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/packages/sqlalchemy-spanner/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/packages/sqlalchemy-spanner/README.rst b/packages/sqlalchemy-spanner/README.rst new file mode 100644 index 000000000000..fc9e5b6c0f75 --- /dev/null +++ b/packages/sqlalchemy-spanner/README.rst @@ -0,0 +1,564 @@ +Spanner dialect for SQLAlchemy +============================== + +Spanner dialect for SQLAlchemy represents an interface API designed to +make it possible to control Cloud Spanner databases with SQLAlchemy API. +The dialect is built on top of `the Spanner DB +API `__, +which is designed in accordance with +`PEP-249 `__. + +Known limitations are listed `here <#features-and-limitations>`__. All +supported features have been tested and verified to work with the test +configurations. There may be configurations and/or data model variations +that have not yet been covered by the tests and that show unexpected +behavior. Please report any problems that you might encounter by +`creating a new +issue `__. + +- `Cloud Spanner product + documentation `__ +- `SQLAlchemy product documentation `__ + +Quick Start +----------- + +In order to use this package, you first need to go through the following +steps: + +1. `Select or create a Cloud Platform + project. `__ +2. `Enable billing for your + project. `__ +3. `Enable the Google Cloud Spanner + API. `__ +4. `Setup + Authentication. `__ + +Installation +------------ +Stable released version of the package is available on PyPi: + +:: + + pip install sqlalchemy-spanner + +To install an in-development version of the package, clone its +Git-repository: + +:: + + git clone https://github.com/googleapis/google-cloud-python.git + +Next install the package from the package ``setup.py`` file: + +:: + + python setup.py install + +During setup the dialect will be registered with entry points. + +Samples +------------- + +The `samples directory `__ +contains multiple examples for how to configure and use common Spanner features. + + +A Minimal App +------------- + +Database URL +~~~~~~~~~~~~ + +In order to connect to a database one have to use its URL on connection +creation step. SQLAlchemy 1.3 and 1.4 versions have a bit of difference +on this step in a dialect prefix part: + +.. code:: python + + # for SQLAlchemy 1.3: + spanner:///projects/project-id/instances/instance-id/databases/database-id + + # for SQLAlchemy 1.4 and 2.0: + spanner+spanner:///projects/project-id/instances/instance-id/databases/database-id + +To pass your custom client object directly to be be used, create engine as following: + +.. code:: python + + engine = create_engine( + "spanner+spanner:///projects/project-id/instances/instance-id/databases/database-id", + connect_args={'client': spanner.Client(project="project-id")}, + isolation_level="SERIALIZABLE" + ) + +Create a table +~~~~~~~~~~~~~~ + +.. code:: python + + from sqlalchemy import ( + Column, + Integer, + MetaData, + String, + Table, + create_engine, + ) + + engine = create_engine( + "spanner:///projects/project-id/instances/instance-id/databases/database-id" + ) + metadata = MetaData(bind=engine) + + user = Table( + "users", + metadata, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + ) + + metadata.create_all(engine) + +Insert a row +~~~~~~~~~~~~ + +.. code:: python + + import uuid + + from sqlalchemy import ( + MetaData, + Table, + create_engine, + ) + + engine = create_engine( + "spanner:///projects/project-id/instances/instance-id/databases/database-id" + ) + user = Table("users", MetaData(bind=engine), autoload=True) + user_id = uuid.uuid4().hex[:6].lower() + + with engine.begin() as connection: + connection.execute(user.insert(), {"user_id": user_id, "user_name": "Full Name"}) + +Read +~~~~ + +.. code:: python + + from sqlalchemy import MetaData, Table, create_engine, select + + engine = create_engine( + "spanner:///projects/project-id/instances/instance-id/databases/database-id" + ) + table = Table("users", MetaData(bind=engine), autoload=True) + + with engine.begin() as connection: + for row in connection.execute(select(["*"], from_obj=table)).fetchall(): + print(row) + +Migration +--------- + +SQLAlchemy uses `Alembic `__ +tool to organize database migrations. + +Spanner dialect doesn't provide a default migration environment, it's up +to user to write it. One thing to be noted here - one should explicitly +set ``alembic_version`` table not to use migration revision id as a +primary key: + +.. code:: python + + with connectable.connect() as connection: + context.configure( + connection=connection, + target_metadata=target_metadata, + version_table_pk=False, # don't use primary key in the versions table + ) + +As Spanner restricts changing a primary key value, not setting the ``version_table_pk`` flag +to ``False`` can cause migration problems. If ``alembic_versions`` table was already created with a primary key, setting the flag to ``False`` will not work, because the flag is only applied on table creation. + +Notice that DDL statements in Spanner are not transactional. They will not be automatically reverted in case of a migration fail. Also Spanner encourage use of the `autocommit_block() `__ for migrations in order to prevent DDLs from aborting migration transactions with schema modifications. + +| **Warning!** +| A migration script can produce a lot of DDL statements. If each of the + statements is executed separately, performance issues can occur. To + avoid it, it's highly recommended to use the `Alembic batch + context `__ + feature to pack DDL statements into groups of statements. + +Features and limitations +------------------------ + +Interleaved tables +~~~~~~~~~~~~~~~~~~ + +| Cloud Spanner dialect includes two dialect-specific arguments for + ``Table`` constructor, which help to define interleave relations: + ``spanner_interleave_in`` - a parent table name + ``spanner_inverleave_on_delete_cascade`` - a flag specifying if + ``ON DELETE CASCADE`` statement must be used for the interleave + relation +| An example of interleave relations definition: + +.. code:: python + + team = Table( + "team", + metadata, + Column("team_id", Integer, primary_key=True), + Column("team_name", String(16), nullable=False), + ) + team.create(engine) + + client = Table( + "client", + metadata, + Column("team_id", Integer, primary_key=True), + Column("client_id", Integer, primary_key=True), + Column("client_name", String(16), nullable=False), + spanner_interleave_in="team", + spanner_interleave_on_delete_cascade=True, + ) + client.add_is_dependent_on(team) + + client.create(engine) + +**Note**: Interleaved tables have a dependency between them, so the +parent table must be created before the child table. When creating +tables with this feature, make sure to call ``add_is_dependent_on()`` on +the child table to request SQLAlchemy to create the parent table before +the child table. + +Commit timestamps +~~~~~~~~~~~~~~~~~~ + +The dialect offers the ``spanner_allow_commit_timestamp`` option to +column constructors for creating commit timestamp columns. + +.. code:: python + + Table( + "table", + metadata, + Column("last_update_time", DateTime, spanner_allow_commit_timestamp=True), + ) + +`See this documentation page for more details `__. + +Unique constraints +~~~~~~~~~~~~~~~~~~ + +Cloud Spanner doesn't support direct UNIQUE constraints creation. In +order to achieve column values uniqueness, UNIQUE indexes should be used. + +Instead of direct UNIQUE constraint creation: + +.. code:: python + + Table( + 'table', + metadata, + Column('col1', Integer), + UniqueConstraint('col1', name='uix_1') + ) + +Create a UNIQUE index: + +.. code:: python + + Table( + 'table', + metadata, + Column('col1', Integer), + Index("uix_1", "col1", unique=True), + ) + +Autocommit mode +~~~~~~~~~~~~~~~ + +Spanner dialect supports ``SERIALIZABLE``, ``REPEATABLE_READ``, and +``AUTOCOMMIT`` isolation levels. ``SERIALIZABLE`` is the default +isolation level. + +``AUTOCOMMIT`` mode corresponds to automatically committing each +insert/update/delete statement right after is has been executed. +Queries that are executed in ``AUTOCOMMIT`` mode use a single-use +read-only transaction. These do not take any locks and do not need +to be committed. + +Workloads that only read data, should use either ``AUTOCOMMIT`` or +a read-only transaction. + +Isolation level change example: + +.. code:: python + + from sqlalchemy import create_engine + + eng = create_engine("spanner:///projects/project-id/instances/instance-id/databases/database-id") + autocommit_engine = eng.execution_options(isolation_level="AUTOCOMMIT") + +Automatic transaction retry +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +In ``SERIALIZABLE`` isolation mode, transactions may fail with an ``Aborted`` exception. +This happens if there are conflicts between different transactions, for example if one +transaction tries to read data that another transaction has modified. Aborted transactions +should be retried by the client. The Spanner SQLAlchemy provider automatically retries +aborted transactions. + +Isolation level ``SERIALIZABLE`` takes lock for both **reads and writes**. + +Use isolation level ``REPEATABLE READ`` to reduce the amount of locks that +are taken by read/write transactions. ``REPEATABLE READ`` only takes locks +for **writes** and for queries that use a ``FOR UPDATE`` clause. + +Auto-increment primary keys +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Spanner uses IDENTITY columns for auto-increment primary key values. +IDENTITY columns use a backing bit-reversed sequence to generate unique +values that are safe to use as primary values in Spanner. These values +work the same as standard auto-increment values, except that they are +not monotonically increasing. This prevents hot-spotting for tables that +receive a large number of writes. + +`See this documentation page for more details `__. + +Auto-generated primary keys must be returned by Spanner after each insert +statement using a ``THEN RETURN`` clause. ``THEN RETURN`` clauses are not +supported with `Batch DML `__. +It is therefore recommended to use for example client-side generated UUIDs +as primary key values instead. + +Query hints +~~~~~~~~~~~ + +Spanner dialect supports `query +hints `__, +which give the ability to set additional query execution parameters. +Usage example: + +.. code:: python + + session = Session(engine) + + Base = declarative_base() + + class User(Base): + """Data model.""" + + __tablename__ = "users" + id = Column(Integer, primary_key=True) + name = Column(String(50)) + + + query = session.query(User) + query = query.with_hint( + selectable=User, text="@{FORCE_INDEX=index_name}" + ) + query = query.filter(User.name.in_(["val1", "val2"])) + query.statement.compile(session.bind) + +Read-only transactions +~~~~~~~~~~~~~~~~~~~~~~ + +By default, transactions produced by a Spanner connection are in +ReadWrite mode. However, workloads that only read data perform better +if they use read-only transactions, as Spanner does not need to take +locks for the data that is read; for these cases, the Spanner dialect +supports the ``read_only`` execution option, which switches a connection +into ReadOnly mode: + +.. code:: python + + with engine.connect().execution_options(read_only=True) as connection: + connection.execute(select(["*"], from_obj=table)).fetchall() + +See the `Read-only transaction sample +`__ +for a concrete example. + +ReadOnly/ReadWrite mode of a connection can't be changed while a +transaction is in progress - you must commit or rollback the current +transaction before changing the mode. + +Stale reads +~~~~~~~~~~~ + +To use the Spanner `Stale +Reads `__ +with SQLAlchemy you can tweak the connection execution options with a +wanted staleness value. For example: + +.. code:: python + + # maximum staleness + with engine.connect().execution_options( + read_only=True, + staleness={"max_staleness": datetime.timedelta(seconds=5)} + ) as connection: + connection.execute(select(["*"], from_obj=table)).fetchall() + +.. code:: python + + # exact staleness + with engine.connect().execution_options( + read_only=True, + staleness={"exact_staleness": datetime.timedelta(seconds=5)} + ) as connection: + connection.execute(select(["*"], from_obj=table)).fetchall() + +.. code:: python + + # min read timestamp + with engine.connect().execution_options( + read_only=True, + staleness={"min_read_timestamp": datetime.datetime(2021, 11, 17, 12, 55, 30)} + ) as connection: + connection.execute(select(["*"], from_obj=table)).fetchall() + +.. code:: python + + # read timestamp + with engine.connect().execution_options( + read_only=True, + staleness={"read_timestamp": datetime.datetime(2021, 11, 17, 12, 55, 30)} + ) as connection: + connection.execute(select(["*"], from_obj=table)).fetchall() + +Note that the set option will be dropped when the connection is returned +back to the pool. + +Request priority +~~~~~~~~~~~~~~~~~~~~~ +In order to use Request Priorities feature in Cloud Spanner, SQLAlchemy provides an ``execution_options`` parameter: + +.. code:: python + + from google.cloud.spanner_v1 import RequestOptions + + with engine.connect().execution_options( + request_priority=RequestOptions.Priority.PRIORITY_MEDIUM + ) as connection: + connection.execute(select(["*"], from_obj=table)).fetchall() + +DDL and transactions +~~~~~~~~~~~~~~~~~~~~ + +DDL statements are executed outside the regular transactions mechanism, +which means DDL statements will not be rolled back on normal transaction +rollback. + +Dropping a table +~~~~~~~~~~~~~~~~ + +Cloud Spanner, by default, doesn't drop tables, which have secondary +indexes and/or foreign key constraints. In Spanner dialect for +SQLAlchemy, however, this restriction is omitted - if a table you are +trying to delete has indexes/foreign keys, they will be dropped +automatically right before dropping the table. + +Data types +~~~~~~~~~~ + +Data types table mapping SQLAlchemy types to Cloud Spanner types: + +========== ========= +SQLAlchemy Spanner +========== ========= +INTEGER INT64 +BIGINT INT64 +DECIMAL NUMERIC +FLOAT FLOAT64 +TEXT STRING +ARRAY ARRAY +BINARY BYTES +VARCHAR STRING +CHAR STRING +BOOLEAN BOOL +DATETIME TIMESTAMP +NUMERIC NUMERIC +========== ========= + +Other limitations +~~~~~~~~~~~~~~~~~ + +- WITH RECURSIVE statement is not supported. +- Temporary tables are not supported. +- Numeric type dimensions (scale and precision) are constant. See the + `docs `__. + +Best practices +-------------- + +When a SQLAlchemy function is called, a new connection to a database is +established and a Spanner session object is fetched. In case of +connectionless execution these fetches are done for every ``execute()`` +call, which can cause a significant latency. To avoid initiating a +Spanner session on every ``execute()`` call it's recommended to write +code in connection-bounded fashion. Once a ``Connection()`` object is +explicitly initiated, it fetches a Spanner session object and uses it +for all the following calls made on this ``Connection()`` object. + +Non-optimal connectionless use: + +.. code:: python + + # execute() is called on object, which is not a Connection() object + insert(user).values(user_id=1, user_name="Full Name").execute() + +Optimal connection-bounded use: + +.. code:: python + + with engine.begin() as connection: + # execute() is called on a Connection() object + connection.execute(user.insert(), {"user_id": 1, "user_name": "Full Name"}) + +Connectionless way of use is also deprecated since SQLAlchemy 2.0 and +soon will be removed (see in `SQLAlchemy +docs `__). + +Running tests +------------- + +Spanner dialect includes a compliance, migration and unit test suite. To +run the tests the ``nox`` package commands can be used: + +:: + + # Run the whole suite + $ nox + + # Run a particular test session + $ nox -s migration_test + +Running tests on Spanner emulator +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The dialect test suite can be run on `Spanner +emulator `__. Several +tests, relating to ``NULL`` values of data types, are skipped when +executed on emulator. + +Contributing +------------ + +Contributions to this library are welcome and encouraged. Please report +issues, file feature requests, and send pull requests. See +`CONTRIBUTING `__ +for more information on how to get started. + +**Note that this project is not officially supported by Google as part +of the Cloud Spanner product.** + +Please note that this project is released with a Contributor Code of +Conduct. By participating in this project you agree to abide by its +terms. See the `Code of +Conduct `__ +for more information. diff --git a/packages/sqlalchemy-spanner/create_test_config.py b/packages/sqlalchemy-spanner/create_test_config.py new file mode 100644 index 000000000000..3ca1ab9bce88 --- /dev/null +++ b/packages/sqlalchemy-spanner/create_test_config.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import configparser +import sys + + +def set_test_config( + project, instance, database, user=None, password=None, host=None, port=None +): + config = configparser.ConfigParser() + if ( + user is not None + and password is not None + and host is not None + and port is not None + ): + url = ( + f"spanner+spanner://{user}:{password}@{host}:{port}" + f"/projects/{project}/instances/{instance}/" + f"databases/{database}" + ) + else: + url = ( + f"spanner+spanner:///projects/{project}/instances/{instance}/" + f"databases/{database}" + ) + config.add_section("db") + config["db"]["default"] = url + + with open("test.cfg", "w") as configfile: + config.write(configfile) + + +def main(argv): + project = argv[0] + instance = argv[1] + database = argv[2] + if len(argv) == 7: + user = argv[3] + password = argv[4] + host = argv[5] + port = argv[6] + else: + user = None + password = None + host = None + port = None + set_test_config(project, instance, database, user, password, host, port) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/packages/sqlalchemy-spanner/create_test_database.py b/packages/sqlalchemy-spanner/create_test_database.py new file mode 100644 index 000000000000..a72e44aa4ed3 --- /dev/null +++ b/packages/sqlalchemy-spanner/create_test_database.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time + +from google.api_core import datetime_helpers +from google.api_core.exceptions import AlreadyExists, ResourceExhausted +from google.cloud.spanner_v1 import Client +from google.cloud.spanner_v1.database import Database +from google.cloud.spanner_v1.instance import Instance + +from create_test_config import set_test_config + +USE_EMULATOR = os.getenv("SPANNER_EMULATOR_HOST") is not None + +PROJECT = os.getenv( + "GOOGLE_CLOUD_PROJECT", + os.getenv("PROJECT_ID", "emulator-test-project"), +) +CLIENT = None + +if USE_EMULATOR: + from google.auth.credentials import AnonymousCredentials + + CLIENT = Client(project=PROJECT, credentials=AnonymousCredentials()) +else: + CLIENT = Client(project=PROJECT) + + +def delete_stale_test_instances(): + """Delete test instances that are older than four hours.""" + cutoff = int(time.time()) - 4 * 60 * 60 + instances_pbs = CLIENT.list_instances( + "labels.python-spanner-sqlalchemy-systest:true" + ) + for instance_pb in instances_pbs: + instance = Instance.from_pb(instance_pb, CLIENT) + if "created" not in instance.labels: + continue + create_time = int(instance.labels["created"]) + if create_time > cutoff: + continue + # Backups are not used in sqlalchemy dialect test, + # therefore instance can just be deleted. + try: + instance.delete() + time.sleep(5) # Sleep for 5 seconds to give time for cooldown. + except ResourceExhausted: + print( + "Unable to drop stale instance '{}'. May need manual delete.".format( + instance.instance_id + ) + ) + + +def delete_stale_test_databases(): + """Delete test databases that are older than four hours.""" + cutoff = (int(time.time()) - 4 * 60 * 60) * 1000 + instance = CLIENT.instance("sqlalchemy-dialect-test") + if not instance.exists(): + return + database_pbs = instance.list_databases() + for database_pb in database_pbs: + database = Database.from_pb(database_pb, instance) + # The emulator does not return a create_time for databases. + if database.create_time is None: + continue + create_time = datetime_helpers.to_milliseconds(database_pb.create_time) + if create_time > cutoff: + continue + try: + database.drop() + except ResourceExhausted: + print( + "Unable to drop stale database '{}'. May need manual delete.".format( + database.database_id + ) + ) + + +def create_test_instance(): + instance_id = "sqlalchemy-dialect-test" + instance = CLIENT.instance(instance_id) + if not instance.exists(): + instance_config = f"projects/{PROJECT}/instanceConfigs/regional-us-east1" + if USE_EMULATOR: + configs = list(CLIENT.list_instance_configs()) + instance_config = configs[0].name + create_time = str(int(time.time())) + labels = {"python-spanner-sqlalchemy-systest": "true", "created": create_time} + + instance = CLIENT.instance(instance_id, instance_config, labels=labels) + + try: + created_op = instance.create() + created_op.result(1800) # block until completion + except AlreadyExists: + pass # instance was already created + + unique_resource_id = "%s%d" % ("-", 1000 * time.time()) + database_id = "sqlalchemy-test" + unique_resource_id + + try: + database = instance.database(database_id) + created_op = database.create() + created_op.result(1800) + except AlreadyExists: + pass # database was already created + + set_test_config(PROJECT, instance_id, database_id) + + +delete_stale_test_databases() +create_test_instance() diff --git a/packages/sqlalchemy-spanner/drop_test_database.py b/packages/sqlalchemy-spanner/drop_test_database.py new file mode 100644 index 000000000000..b7facf1f9ea1 --- /dev/null +++ b/packages/sqlalchemy-spanner/drop_test_database.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import configparser +import os +import re +import time + +from google.api_core import datetime_helpers +from google.api_core.exceptions import AlreadyExists, ResourceExhausted +from google.cloud.spanner_v1 import Client +from google.cloud.spanner_v1.database import Database +from google.cloud.spanner_v1.instance import Instance + +from create_test_config import set_test_config + +USE_EMULATOR = os.getenv("SPANNER_EMULATOR_HOST") is not None + +PROJECT = os.getenv( + "GOOGLE_CLOUD_PROJECT", + os.getenv("PROJECT_ID", "emulator-test-project"), +) +CLIENT = None + +if USE_EMULATOR: + from google.auth.credentials import AnonymousCredentials + + CLIENT = Client(project=PROJECT, credentials=AnonymousCredentials()) +else: + CLIENT = Client(project=PROJECT) + + +def delete_test_database(): + """Delete the currently configured test database.""" + config = configparser.ConfigParser() + if os.path.exists("test.cfg"): + config.read("test.cfg") + else: + config.read("setup.cfg") + db_url = config.get("db", "default") + + instance_id = re.findall(r"instances(.*?)databases", db_url) + database_id = re.findall(r"databases(.*?)$", db_url) + + instance = CLIENT.instance(instance_id="".join(instance_id).replace("/", "")) + database = instance.database("".join(database_id).replace("/", "")) + database.drop() + + +delete_test_database() diff --git a/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/__init__.py b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/__init__.py new file mode 100644 index 000000000000..a5a387d1271f --- /dev/null +++ b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .sqlalchemy_spanner import SpannerDialect + +from .version import __version__ + + +__all__ = (SpannerDialect, __version__) diff --git a/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/_opentelemetry_tracing.py b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/_opentelemetry_tracing.py new file mode 100644 index 000000000000..960442c57d09 --- /dev/null +++ b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/_opentelemetry_tracing.py @@ -0,0 +1,74 @@ +# Copyright 2021 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Manages OpenTelemetry trace creation and handling""" + +import collections +import os + +from contextlib import contextmanager + +from google.api_core.exceptions import GoogleAPICallError +from google.cloud.spanner_v1 import SpannerClient + +try: + from opentelemetry import trace + from opentelemetry.trace.status import Status, StatusCode + + HAS_OPENTELEMETRY_INSTALLED = True + tracer = trace.get_tracer(__name__) +except ImportError: + HAS_OPENTELEMETRY_INSTALLED = False + tracer = None + + +@contextmanager +def trace_call(name, extra_attributes=None): + if not HAS_OPENTELEMETRY_INSTALLED: + # Empty context manager. Users will have to check if the generated value + # is None or a span + yield None + return + + # Set base attributes that we know for every trace created + attributes = { + "db.type": "spanner", + "db.engine": "sqlalchemy_spanner", + "db.url": SpannerClient.DEFAULT_ENDPOINT, + "net.host.name": SpannerClient.DEFAULT_ENDPOINT, + } + + if extra_attributes: + if os.environ.get("SQLALCHEMY_SPANNER_TRACE_HIDE_QUERY_PARAMETERS"): + extra_attributes.pop("db.params", None) + + # Stringify "db.params" sequence values before sending to OpenTelemetry, + # otherwise OpenTelemetry may log a Warning if types differ. + if isinstance(extra_attributes, dict): + for k, v in extra_attributes.items(): + if k == "db.params" and isinstance(v, collections.abc.Sequence): + extra_attributes[k] = [str(e) for e in v] + + attributes.update(extra_attributes) + + with tracer.start_as_current_span( + name, kind=trace.SpanKind.CLIENT, attributes=attributes + ) as span: + try: + yield span + except GoogleAPICallError as error: + span.set_status(Status(StatusCode.ERROR)) + span.record_exception(error) + raise + span.set_status(Status(StatusCode.OK)) diff --git a/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/dml.py b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/dml.py new file mode 100644 index 000000000000..311aee0db487 --- /dev/null +++ b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/dml.py @@ -0,0 +1,26 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import Insert, insert +from sqlalchemy.sql._typing import _DMLTableArgument + + +def insert_or_update(table: _DMLTableArgument) -> Insert: + """Construct a Spanner-specific insert-or-update statement.""" + return insert(table).prefix_with("OR UPDATE") + + +def insert_or_ignore(table: _DMLTableArgument) -> Insert: + """Construct a Spanner-specific insert-or-ignore statement.""" + return insert(table).prefix_with("OR IGNORE") diff --git a/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/provision.py b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/provision.py new file mode 100644 index 000000000000..f56aaccf70b5 --- /dev/null +++ b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/provision.py @@ -0,0 +1,20 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy.testing.provision import temp_table_keyword_args + + +@temp_table_keyword_args.for_db("spanner") # pragma: no cover +def _spanner_temp_table_keyword_args(cfg, eng): + return {"prefixes": ["TEMPORARY"]} diff --git a/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/requirements.py b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/requirements.py new file mode 100644 index 000000000000..04d21dc8d095 --- /dev/null +++ b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/requirements.py @@ -0,0 +1,119 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy.testing import exclusions +from sqlalchemy.testing.requirements import SuiteRequirements +from sqlalchemy.testing.exclusions import against, only_on + + +class Requirements(SuiteRequirements): # pragma: no cover + @property + def json_type(self): + return exclusions.open() + + @property + def computed_columns(self): + return exclusions.open() + + @property + def computed_columns_stored(self): + return exclusions.open() + + @property + def sane_rowcount(self): + return exclusions.closed() + + @property + def sane_multi_rowcount(self): + return exclusions.closed() + + @property + def foreign_key_constraint_name_reflection(self): + return exclusions.open() + + @property + def schema_reflection(self): + return exclusions.open() + + @property + def array_type(self): + return only_on([lambda config: against(config, "postgresql")]) + + @property + def uuid_data_type(self): + """Return databases that support the UUID datatype.""" + return only_on(("postgresql >= 8.3", "mariadb >= 10.7.0")) + + @property + def implicitly_named_constraints(self): + return exclusions.open() + + @property + def autocommit(self): + return exclusions.open() + + @property + def order_by_collation(self): + return exclusions.open() + + @property + def implements_get_lastrowid(self): + return exclusions.closed() + + @property + def ctes(self): + return exclusions.open() + + @property + def isolation_level(self): + return exclusions.open() + + @property + def sequences(self): + return exclusions.open() + + @property + def temporary_tables(self): + """Target database supports temporary tables.""" + return exclusions.closed() + + def get_order_by_collation(self, _): + """Get the default collation name. + + Returns: + str: Collation name. + """ + return '"unicode"' + + def get_isolation_levels(self, _): + """Get isolation levels supported by the dialect. + + Returns: + dict: isolation levels description. + """ + return { + "default": "SERIALIZABLE", + "supported": ["SERIALIZABLE", "REPEATABLE READ", "AUTOCOMMIT"], + } + + @property + def precision_numerics_enotation_large(self): + """target backend supports Decimal() objects using E notation + to represent very large values.""" + return exclusions.open() + + @property + def views(self): + """Target database must support VIEWs.""" + return exclusions.open() diff --git a/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/sqlalchemy_spanner.py b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/sqlalchemy_spanner.py new file mode 100644 index 000000000000..a8ca59bd0712 --- /dev/null +++ b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/sqlalchemy_spanner.py @@ -0,0 +1,1926 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import base64 + +import re + +from alembic.ddl.base import ( + ColumnNullable, + ColumnType, + alter_column, + alter_table, + format_server_default, + format_type, +) +from google.api_core.client_options import ClientOptions +from google.auth.credentials import AnonymousCredentials +from google.cloud.spanner_v1 import Client, TransactionOptions +from sqlalchemy.exc import NoSuchTableError +from sqlalchemy.sql import elements +from sqlalchemy import ForeignKeyConstraint, types, TypeDecorator, PickleType +from sqlalchemy.engine.base import Engine +from sqlalchemy.engine.default import DefaultDialect, DefaultExecutionContext +from sqlalchemy.event import listens_for +from sqlalchemy.ext.compiler import compiles +from sqlalchemy.pool import Pool +from sqlalchemy.sql.compiler import ( + selectable, + DDLCompiler, + GenericTypeCompiler, + IdentifierPreparer, + SQLCompiler, + OPERATORS, + RESERVED_WORDS, +) +from sqlalchemy.sql.default_comparator import operator_lookup +from sqlalchemy.sql.operators import json_getitem_op +from sqlalchemy.sql import expression + +from google.cloud.spanner_v1.data_types import JsonObject +from google.cloud import spanner_dbapi +from google.cloud.sqlalchemy_spanner._opentelemetry_tracing import trace_call +from google.cloud.sqlalchemy_spanner import version as sqlalchemy_spanner_version +import sqlalchemy + +USING_SQLACLCHEMY_20 = False +if sqlalchemy.__version__.split(".")[0] == "2": + USING_SQLACLCHEMY_20 = True + +if USING_SQLACLCHEMY_20: + from sqlalchemy.engine.reflection import ObjectKind + + +@listens_for(Pool, "reset") +def reset_connection(dbapi_conn, connection_record, reset_state=None): + """An event of returning a connection back to a pool.""" + if hasattr(dbapi_conn, "connection"): + dbapi_conn = dbapi_conn.connection + if isinstance(dbapi_conn, spanner_dbapi.Connection): + if dbapi_conn.inside_transaction: + dbapi_conn.rollback() + + dbapi_conn.staleness = None + dbapi_conn.read_only = False + + +# register a method to get a single value of a JSON object +OPERATORS[json_getitem_op] = operator_lookup["json_getitem_op"] + + +# PickleType that can be used with Spanner. +# Binary values are automatically encoded/decoded to/from base64. +# Usage: +# class User(Base): +# __tablename__ = 'users' +# +# user_id = Column(Integer, primary_key=True) +# username = Column(String(50), nullable=False) +# preferences = Column(PickleType(impl=SpannerPickleType)) +class SpannerPickleType(TypeDecorator): + impl = PickleType + + def bind_processor(self, dialect): + def process(value): + if value is None: + return None + return base64.standard_b64encode(value) + + return process + + def result_processor(self, dialect, coltype): + def process(value): + if value is None: + return None + return base64.standard_b64decode(value) + + return process + + +# Spanner-to-SQLAlchemy types map +_type_map = { + "BOOL": types.Boolean, + "BYTES": types.LargeBinary, + "DATE": types.DATE, + "DATETIME": types.DATETIME, + "FLOAT32": types.REAL, + "FLOAT64": types.Float, + "INT64": types.BIGINT, + "NUMERIC": types.NUMERIC(precision=38, scale=9), + "STRING": types.String, + "TIME": types.TIME, + "TIMESTAMP": types.TIMESTAMP, + "ARRAY": types.ARRAY, + "JSON": types.JSON, +} + + +_type_map_inv = { + types.Boolean: "BOOL", + types.BINARY: "BYTES(MAX)", + types.LargeBinary: "BYTES(MAX)", + types.DATE: "DATE", + types.DATETIME: "DATETIME", + types.REAL: "FLOAT32", + types.Float: "FLOAT64", + types.BIGINT: "INT64", + types.DECIMAL: "NUMERIC", + types.String: "STRING", + types.TIME: "TIME", + types.TIMESTAMP: "TIMESTAMP", + types.Integer: "INT64", + types.NullType: "INT64", +} + +_compound_keywords = { + selectable.CompoundSelect.UNION: "UNION DISTINCT", + selectable.CompoundSelect.UNION_ALL: "UNION ALL", + selectable.CompoundSelect.EXCEPT: "EXCEPT DISTINCT", + selectable.CompoundSelect.EXCEPT_ALL: "EXCEPT ALL", + selectable.CompoundSelect.INTERSECT: "INTERSECT DISTINCT", + selectable.CompoundSelect.INTERSECT_ALL: "INTERSECT ALL", +} + +_max_size = 2621440 + + +def int_from_size(size_str): + """Convert a string column length to an integer value. + + Args: + size_str (str): The column length or the 'MAX' keyword. + + Returns: + int: The column length value. + """ + return _max_size if size_str == "MAX" else int(size_str) + + +def engine_to_connection(function): + """ + Decorator to initiate a connection to a + database in case of an engine-related use. + """ + + def wrapper(self, connection, *args, **kwargs): + """ + Args: + connection (Union[ + sqlalchemy.engine.base.Connection, + sqlalchemy.engine.Engine + ]): + SQLAlchemy connection or engine object. + """ + if isinstance(connection, Engine): + connection = connection.connect() + + return function(self, connection, *args, **kwargs) + + return wrapper + + +class SpannerExecutionContext(DefaultExecutionContext): + def pre_exec(self): + """ + Apply execution options to the DB API connection before + executing the next SQL operation. + """ + super(SpannerExecutionContext, self).pre_exec() + + read_only = self.execution_options.get("read_only") + if read_only is not None: + self._dbapi_connection.connection.read_only = read_only + + staleness = self.execution_options.get("staleness") + if staleness is not None: + self._dbapi_connection.connection.staleness = staleness + + priority = self.execution_options.get("request_priority") + if priority is not None: + self._dbapi_connection.connection.request_priority = priority + + transaction_tag = self.execution_options.get("transaction_tag") + if transaction_tag: + self._dbapi_connection.connection.transaction_tag = transaction_tag + + request_tag = self.execution_options.get("request_tag") + if request_tag: + self.cursor.request_tag = request_tag + + ignore_transaction_warnings = self.execution_options.get( + "ignore_transaction_warnings" + ) + if ignore_transaction_warnings is not None: + conn = self._dbapi_connection.connection + if conn is not None and hasattr(conn, "_connection_variables"): + conn._connection_variables[ + "ignore_transaction_warnings" + ] = ignore_transaction_warnings + + def fire_sequence(self, seq, type_): + """Builds a statement for fetching next value of the sequence.""" + return self._execute_scalar( + ( + "SELECT GET_NEXT_SEQUENCE_VALUE(SEQUENCE %s)" + % self.identifier_preparer.format_sequence(seq) + ), + type_, + ) + + +class SpannerIdentifierPreparer(IdentifierPreparer): + """Identifiers compiler. + + In Cloud Spanner backticks "`" are used for keywords escaping. + """ + + reserved_words = RESERVED_WORDS.copy() + reserved_words.update(spanner_dbapi.parse_utils.SPANNER_RESERVED_KEYWORDS) + reserved_words_lc = set(map(str.lower, reserved_words)) + + def __init__(self, dialect): + super(SpannerIdentifierPreparer, self).__init__( + dialect, initial_quote="`", final_quote="`" + ) + + def _requires_quotes(self, value): + """Return True if the given identifier requires quoting.""" + lc_value = value.lower() + if lc_value == '"unicode"': # don't escape default Spanner colation + return False + + return ( + lc_value in self.reserved_words_lc + or value[0] in self.illegal_initial_characters + or not self.legal_characters.match(str(value)) + or (lc_value != value) + ) + + +class SpannerSQLCompiler(SQLCompiler): + """Spanner SQL statements compiler.""" + + compound_keywords = _compound_keywords + + def __init__(self, *args, **kwargs): + self.tablealiases = {} + super().__init__(*args, **kwargs) + + def get_from_hint_text(self, _, text): + """Return a hint text. + + Overriden to avoid adding square brackets to the hint text. + + Args: + text (str): The hint text. + """ + return text + + def visit_now_func(self, func, **kwargs): + return "current_timestamp" + + def visit_empty_set_expr(self, type_, **kw): + """Return an empty set expression of the given type. + + Args: + type_ (sqlalchemy.sql.sqltypes.SchemaType): + A SQLAlchemy data type. + + Returns: + str: A query to select an empty set of the given type. + """ + return "SELECT CAST(1 AS {}) FROM (SELECT 1) WHERE 1 != 1".format( + _type_map_inv[type(type_[0])] + ) + + def visit_like_op_binary(self, binary, operator, **kw): + """Build a LIKE clause.""" + if binary.modifiers.get("escape", None): + raise NotImplementedError("ESCAPE keyword is not supported by Spanner") + + # TODO: use ternary here, not "and"/ "or" + return "%s LIKE %s" % ( + binary.left._compiler_dispatch(self, **kw), + binary.right._compiler_dispatch(self, **kw), + ) + + def _generate_generic_binary(self, binary, opstring, eager_grouping=False, **kw): + """The method is overriden to process JSON data type cases.""" + _in_binary = kw.get("_in_binary", False) + + kw["_in_binary"] = True + + if isinstance(opstring, str): + text = ( + binary.left._compiler_dispatch( + self, eager_grouping=eager_grouping, **kw + ) + + opstring + + binary.right._compiler_dispatch( + self, eager_grouping=eager_grouping, **kw + ) + ) + if _in_binary and eager_grouping: + text = "(%s)" % text + else: + # got JSON data + right_value = getattr( + binary.right, "value", None + ) or binary.right._compiler_dispatch( + self, eager_grouping=eager_grouping, **kw + ) + + text = ( + binary.left._compiler_dispatch( + self, eager_grouping=eager_grouping, **kw + ) + + """, "$.""" + + str(right_value) + + '"' + ) + text = "JSON_VALUE(%s)" % text + + return text + + def visit_json_path_getitem_op_binary(self, binary, operator, **kw): + """Build a JSON_VALUE() function call.""" + expr = """JSON_VALUE(%s, "$.%s")""" + + return expr % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + + def render_literal_value(self, value, type_): + """Render the value of a bind parameter as a quoted literal. + + This is used for statement sections that do not accept bind parameters + on the target driver/database. + + This should be implemented by subclasses using the quoting services + of the DBAPI. + + Cloud spanner supports prefixed backslash to escape non-alphanumeric characters + in string. Override the method to add additional escape before using it to + generate a SQL statement. + """ + if value is None and not type_.should_evaluate_none: + # issue #10535 - handle NULL in the compiler without placing + # this onto each type, except for "evaluate None" types + # (e.g. JSON) + return self.process(elements.Null._instance()) + + raw = ["\\", "'", '"', "\n", "\t", "\r"] + if isinstance(value, str) and any(single in value for single in raw): + value = 'r"""{}"""'.format(value) + return value + else: + processor = type_._cached_literal_processor(self.dialect) + if processor: + return processor(value) + else: + raise NotImplementedError( + "Don't know how to literal-quote value %r" % value + ) + + def limit_clause(self, select, **kw): + """Build LIMIT-OFFSET clause. + + Spanner doesn't support using OFFSET without a LIMIT + clause. It also doesn't support negative LIMITs, while + SQLAlchemy support both. + + The method builds LIMIT-OFFSET clauses as usual, with + only difference: when OFFSET is used without an explicit + LIMIT, the dialect compiles a statement with a LIMIT + set to the biggest integer value. + + Args: + (sqlalchemy.sql.selectable.Select): Select clause object. + + Returns: + str: LIMIT-OFFSET clause. + """ + text = "" + if select._limit_clause is not None: + text += "\n LIMIT " + self.process(select._limit_clause, **kw) + if select._offset_clause is not None: + if select._limit_clause is None: + text += f"\n LIMIT {9223372036854775807-select._offset}" + text += " OFFSET " + self.process(select._offset_clause, **kw) + return text + + def returning_clause(self, stmt, returning_cols, **kw): + # Set the spanner_is_returning flag which is passed to visit_column. + columns = [ + self._label_select_column( + None, c, True, False, {"spanner_is_returning": True} + ) + for c in expression._select_iterables( + filter( + lambda col: not col.dialect_options.get("spanner", {}).get( + "exclude_from_returning", False + ), + returning_cols, + ) + ) + ] + + return "THEN RETURN " + ", ".join(columns) + + def visit_sequence(self, seq, **kw): + """Builds a statement for fetching next value of the sequence.""" + return " GET_NEXT_SEQUENCE_VALUE(SEQUENCE %s)" % self.preparer.format_sequence( + seq + ) + + def visit_table(self, table, spanner_aliased=False, iscrud=False, **kwargs): + """Produces the table name. + + Schema names are not allowed in Spanner SELECT statements. We + need to avoid generating SQL like + + SELECT schema.tbl.id + FROM schema.tbl + + To do so, we alias the table in order to produce SQL like: + + SELECT tbl_1.id, tbl_1.col + FROM schema.tbl AS tbl_1 + + And do similar for UPDATE and DELETE statements. + + This closely mirrors the mssql dialect which also avoids + schema-qualified columns in SELECTs, although the behaviour is + currently behind a deprecated 'legacy_schema_aliasing' flag. + """ + if spanner_aliased is table or self.isinsert: + return super().visit_table(table, **kwargs) + + # Add an alias for schema-qualified tables. + # Tables in the default schema are not aliased and follow the + # standard SQLAlchemy code path. + alias = self._schema_aliased_table(table) + if alias is not None: + return self.process(alias, spanner_aliased=table, **kwargs) + else: + return super().visit_table(table, **kwargs) + + def visit_alias(self, alias, **kw): + """Produces alias statements.""" + # translate for schema-qualified table aliases + kw["spanner_aliased"] = alias.element + return super().visit_alias(alias, **kw) + + def visit_column( + self, column, add_to_result_map=None, spanner_is_returning=False, **kw + ): + """Produces column expressions. + + In tandem with visit_table, replaces schema-qualified column + names with column names qualified against an alias. + """ + if column.table is not None and not self.isinsert or self.is_subquery(): + # translate for schema-qualified table aliases + t = self._schema_aliased_table(column.table) + if t is not None: + converted = elements._corresponding_column_or_error(t, column) + if add_to_result_map is not None: + add_to_result_map( + column.name, + column.name, + (column, column.name, column.key), + column.type, + ) + + return super().visit_column(converted, **kw) + if spanner_is_returning: + # Set include_table=False because although table names are + # allowed in RETURNING clauses, schema names are not. We + # can't use the same aliasing trick above that we use with + # other statements, because INSERT statements don't result + # in visit_table calls and INSERT table names can't be + # aliased. Statements like: + # + # INSERT INTO table (id, name) + # SELECT id, name FROM another_table + # THEN RETURN another_table.id + # + # aren't legal, so the columns remain unambiguous when not + # qualified by table name. + kw["include_table"] = False + + return super().visit_column(column, add_to_result_map=add_to_result_map, **kw) + + def _schema_aliased_table(self, table): + """Creates an alias for the table if it is schema-qualified. + + If the table is schema-qualified, returns an alias for the + table and caches the alias for future references to the + table. If the table is not schema-qualified, returns None. + """ + if getattr(table, "schema", None) is not None: + if table not in self.tablealiases: + self.tablealiases[table] = table.alias() + return self.tablealiases[table] + else: + return None + + +class SpannerDDLCompiler(DDLCompiler): + """Spanner DDL statements compiler.""" + + def get_column_specification(self, column, **kwargs): + """Build new column specifications. + + Overridden to move the NOT NULL statement to front + of a computed column expression definitions. + """ + colspec = ( + self.preparer.format_column(column) + + " " + + self.dialect.type_compiler.process(column.type, type_expression=column) + ) + if not column.nullable: + colspec += " NOT NULL" + + has_identity = ( + hasattr(column, "identity") + and column.identity is not None + and self.dialect.supports_identity_columns + ) + default = self.get_column_default_string(column) + + if ( + column.primary_key + and column is column.table._autoincrement_column + and not has_identity + and default is None + ): + if ( + hasattr(self.dialect, "use_auto_increment") + and self.dialect.use_auto_increment + ): + colspec += " AUTO_INCREMENT" + else: + sequence_kind = getattr( + self.dialect, "default_sequence_kind", "BIT_REVERSED_POSITIVE" + ) + colspec += " GENERATED BY DEFAULT AS IDENTITY (%s)" % sequence_kind + elif has_identity: + colspec += " " + self.process(column.identity) + elif default is not None: + colspec += f" DEFAULT {default}" + elif hasattr(column, "computed") and column.computed is not None: + colspec += " " + self.process(column.computed) + + if column.dialect_options.get("spanner", {}).get( + "allow_commit_timestamp", False + ): + colspec += " OPTIONS (allow_commit_timestamp=true)" + + return colspec + + def get_column_default_string(self, column): + default = super().get_column_default_string(column) + if default is not None: + return f"({default})" + + return default + + def visit_computed_column(self, generated, **kw): + """Computed column operator.""" + text = "AS (%s) STORED" % self.sql_compiler.process( + generated.sqltext, include_table=False, literal_binds=True + ) + return text + + def visit_drop_table(self, drop_table, **kw): + """ + Cloud Spanner doesn't drop tables which have indexes + or foreign key constraints. This method builds several DDL + statements separated by semicolons to drop the indexes and + foreign keys constraints of the table before the DROP TABLE + statement. + + Args: + (sqlalchemy.schema.DropTable): DROP TABLE statement object. + + Returns: + str: + DDL statements separated by semicolons, which will + sequentially drop indexes, foreign keys constraints + and then the table itself. + """ + constrs = "" + for cons in drop_table.element.constraints: + if isinstance(cons, ForeignKeyConstraint) and cons.name: + effective_schema = self.preparer.schema_for_object(drop_table.element) + if effective_schema: + table = ( + f"{self.preparer.quote_schema(effective_schema)}" + "." + f"{self.preparer.quote(drop_table.element.name)}" + ) + else: + table = self.preparer.quote(drop_table.element.name) + constrs += "ALTER TABLE {table} DROP CONSTRAINT {constr};".format( + table=table, + constr=self.preparer.quote(cons.name), + ) + + indexes = "" + for index in drop_table.element.indexes: + indexes += "DROP INDEX {};".format(self.preparer.quote(index.name)) + + return indexes + constrs + super().visit_drop_table(drop_table) + + def visit_primary_key_constraint(self, constraint, **kw): + """Build primary key definition. + + Primary key in Spanner is defined outside of a table columns definition, see: + https://cloud.google.com/spanner/docs/getting-started/python#create_a_database + + The method returns None to omit primary key in a table columns definition. + """ + return None + + def visit_unique_constraint(self, constraint, **kw): + """Unique constraints in Spanner are defined with indexes: + https://cloud.google.com/spanner/docs/secondary-indexes#unique-indexes + + The method throws an exception to notify user that in + Spanner unique constraints are done with unique indexes. + """ + raise spanner_dbapi.exceptions.ProgrammingError( + "Spanner doesn't support direct UNIQUE constraints creation. " + "Create UNIQUE indexes instead." + ) + + def visit_foreign_key_constraint(self, constraint, **kw): + text = super().visit_foreign_key_constraint(constraint, **kw) + if constraint.dialect_options.get("spanner", {}).get("not_enforced", False): + text += " NOT ENFORCED" + return text + + def post_create_table(self, table): + """Build statements to be executed after CREATE TABLE. + + Includes "primary key" and "interleaved table" statements generation. + + Args: + table (sqlalchemy.schema.Table): Table to create. + + Returns: + str: primary key difinition to add to the table CREATE request. + """ + cols = [col.name for col in table.primary_key.columns] + post_cmds = " PRIMARY KEY ({})".format(", ".join(cols)) + + if "TEMPORARY" in table._prefixes: + raise NotImplementedError("Temporary tables are not supported.") + + if table.kwargs.get("spanner_interleave_in"): + post_cmds += ",\nINTERLEAVE IN PARENT {}".format( + table.kwargs["spanner_interleave_in"] + ) + + if table.kwargs.get("spanner_interleave_on_delete_cascade"): + post_cmds += " ON DELETE CASCADE" + + return post_cmds + + def visit_create_index( + self, create, include_schema=True, include_table_schema=True, **kw + ): + text = super().visit_create_index( + create, include_schema, include_table_schema, **kw + ) + index = create.element + if "spanner" in index.dialect_options: + options = index.dialect_options["spanner"] + if "storing" in options: + storing = options["storing"] + storing_columns = [ + index.table.c[col] if isinstance(col, str) else col + for col in storing + ] + text += " STORING (%s)" % ", ".join( + [self.preparer.quote(c.name) for c in storing_columns] + ) + + interleave_in = options.get("interleave_in") + if interleave_in is not None: + text += f", INTERLEAVE IN {self.preparer.quote(interleave_in)}" + + if options.get("null_filtered", False): + text = re.sub( + r"(^\s*CREATE\s+(?:UNIQUE\s+)?)INDEX", + r"\1NULL_FILTERED INDEX", + text, + flags=re.IGNORECASE, + ) + + return text + + def get_identity_options(self, identity_options): + text = ["bit_reversed_positive"] + if identity_options.start is not None: + text.append("start counter with %d" % identity_options.start) + return " ".join(text) + + def get_sequence_options(self, identity_options): + text = ["sequence_kind = 'bit_reversed_positive'"] + if identity_options.start is not None: + text.append("start_with_counter = %d" % identity_options.start) + return ", ".join(text) + + def visit_create_sequence(self, create, prefix=None, **kw): + """Builds a ``CREATE SEQUENCE`` statement for the sequence.""" + text = "CREATE SEQUENCE %s" % self.preparer.format_sequence(create.element) + options = self.get_sequence_options(create.element) + if options: + text += " OPTIONS (" + options + ")" + return text + + def visit_drop_sequence(self, drop, **kw): + """Builds a ``DROP SEQUENCE`` statement for the sequence.""" + return "DROP SEQUENCE %s" % self.preparer.format_sequence(drop.element) + + +class SpannerTypeCompiler(GenericTypeCompiler): + """Spanner types compiler. + + Maps SQLAlchemy types to Spanner data types. + """ + + def visit_INTEGER(self, type_, **kw): + return "INT64" + + def visit_DOUBLE(self, type_, **kw): + return "FLOAT64" + + def visit_FLOAT(self, type_, **kw): + # Note: This was added before Spanner supported FLOAT32. + # Changing this now to generate a FLOAT32 would be a breaking change. + # Users therefore have to use REAL to generate a FLOAT32 column. + return "FLOAT64" + + def visit_REAL(self, type_, **kw): + return "FLOAT32" + + def visit_TEXT(self, type_, **kw): + return "STRING({})".format(type_.length or "MAX") + + def visit_ARRAY(self, type_, **kw): + return "ARRAY<{}>".format(self.process(type_.item_type, **kw)) + + def visit_BINARY(self, type_, **kw): # pragma: no cover + """ + The BINARY type is superseded by large_binary in + newer versions of SQLAlchemy (>1.4). + """ + return "BYTES({})".format(type_.length or "MAX") + + def visit_large_binary(self, type_, **kw): # pragma: no cover + return "BYTES({})".format(type_.length or "MAX") + + def visit_DECIMAL(self, type_, **kw): + return "NUMERIC" + + def visit_VARCHAR(self, type_, **kw): + return "STRING({})".format(type_.length or "MAX") + + def visit_CHAR(self, type_, **kw): + return "STRING({})".format(type_.length or "MAX") + + def visit_BOOLEAN(self, type_, **kw): + return "BOOL" + + def visit_DATETIME(self, type_, **kw): + return "TIMESTAMP" + + def visit_NUMERIC(self, type_, **kw): + return "NUMERIC" + + def visit_BIGINT(self, type_, **kw): + return "INT64" + + def visit_JSON(self, type_, **kw): + return "JSON" + + +class SpannerDialect(DefaultDialect): + """Cloud Spanner dialect. + + Represents an API layer to control Cloud Spanner database with SQLAlchemy API. + """ + + name = "spanner+spanner" + driver = "spanner" + positional = False + paramstyle = "format" + encoding = "utf-8" + max_identifier_length = 256 + _legacy_binary_type_literal_encoding = "utf-8" + _default_isolation_level = "SERIALIZABLE" + + execute_sequence_format = list + + supports_alter = True + supports_sane_rowcount = False + supports_sane_multi_rowcount = False + supports_default_values = False + supports_sequences = True + sequences_optional = False + supports_identity_columns = True + supports_native_boolean = True + supports_native_decimal = True + supports_statement_cache = True + # Spanner uses protos for enums. Creating a column like + # Column("an_enum", Enum("A", "B", "C")) will result in a String + # column. Setting supports_native_enum to False allows SQLAlchemy + # to generate check constraints to enforce the enum values if the + # create_constraint=True flag is passed to the Enum constructor. + supports_native_enum = False + + postfetch_lastrowid = False + insert_returning = True + update_returning = True + delete_returning = True + supports_multivalues_insert = True + use_insertmanyvalues = True + + ddl_compiler = SpannerDDLCompiler + preparer = SpannerIdentifierPreparer + statement_compiler = SpannerSQLCompiler + type_compiler = SpannerTypeCompiler + execution_ctx_cls = SpannerExecutionContext + _json_serializer = JsonObject + _json_deserializer = JsonObject + + @classmethod + def dbapi(cls): + """A pointer to the Cloud Spanner DB API package. + + Used to initiate connections to the Cloud Spanner databases. + """ + return spanner_dbapi + + @classmethod + def import_dbapi(cls): + """A pointer to the Cloud Spanner DB API package. + + Used to initiate connections to the Cloud Spanner databases. + """ + return spanner_dbapi + + @property + def default_isolation_level(self): + """Default isolation level name. + + Returns: + str: default isolation level. + """ + return self._default_isolation_level + + @default_isolation_level.setter + def default_isolation_level(self, value): + self._default_isolation_level = value + + def _check_unicode_returns(self, connection, additional_tests=None): + """Ensure requests are returning Unicode responses.""" + return True + + def _get_default_schema_name(self, _): + """Get default Cloud Spanner schema name. + + Returns: + str: Schema name. + """ + return "" + + def _get_table_type_query(self, kind, append_query): + """ + Generates WHERE condition for Kind of Object. + Spanner supports Table and View. + """ + if not USING_SQLACLCHEMY_20: + return "" + + kind = ObjectKind.TABLE if kind is None else kind + if kind == ObjectKind.MATERIALIZED_VIEW: + raise NotImplementedError("Spanner does not support MATERIALIZED VIEWS") + switch_case = { + ObjectKind.ANY: ["BASE TABLE", "VIEW"], + ObjectKind.TABLE: ["BASE TABLE"], + ObjectKind.VIEW: ["VIEW"], + ObjectKind.ANY_VIEW: ["VIEW"], + } + + table_type_query = "" + for table_type in switch_case[kind]: + query = f"t.table_type = '{table_type}'" + if table_type_query != "": + table_type_query = table_type_query + " OR " + query + else: + table_type_query = query + + table_type_query = "(" + table_type_query + ") " + if append_query: + table_type_query = table_type_query + " AND " + return table_type_query + + def _get_table_filter_query( + self, filter_names, info_schema_table, append_query=False + ): + """ + Generates WHERE query for tables or views for which + information is reflected. + """ + table_filter_query = "" + if filter_names is not None: + for table_name in filter_names: + query = f"{info_schema_table}.table_name = '{table_name}'" + if table_filter_query != "": + table_filter_query = table_filter_query + " OR " + query + else: + table_filter_query = query + table_filter_query = "(" + table_filter_query + ") " + if append_query: + table_filter_query = table_filter_query + " AND " + + return table_filter_query + + def create_connect_args(self, url): + """Parse connection args from the given URL. + + The method prepares args suitable to send to the DB API `connect()` function. + + The given URL follows the style: + `spanner:///projects/{project-id}/instances/{instance-id}/databases/{database-id}` + or `spanner:///projects/{project-id}/instances/{instance-id}`. For the latter, + database operations will be not be possible and if required a new engine with + database-id set will need to be created. + """ + match = re.match( + ( + r"^projects/(?P.+?)/instances/" + "(?P.+?)(/databases/(?P.+)|$)" + ), + url.database, + ) + dist_version = sqlalchemy_spanner_version.__version__ + options = {"user_agent": f"gl-sqlalchemy-spanner/{dist_version}"} + connect_opts = url.translate_connect_args() + if ( + "host" in connect_opts + and "port" in connect_opts + and "password" in connect_opts + ): + # Create a test client that connects to a local Spanner (mock) server. + if ( + connect_opts["host"] == "localhost" + and connect_opts["password"] == "AnonymousCredentials" + ): + client = Client( + project=match.group("project"), + credentials=AnonymousCredentials(), + client_options=ClientOptions( + api_endpoint=f"{connect_opts['host']}:{connect_opts['port']}", + ), + ) + options["client"] = client + return ( + [match.group("instance"), match.group("database"), match.group("project")], + options, + ) + + @engine_to_connection + def get_view_names(self, connection, schema=None, **kw): + """ + Gets a list of view names. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + schema (str): Optional. Schema name + + Returns: + list: List of view names. + """ + sql = """ + SELECT table_name + FROM information_schema.views + WHERE TABLE_SCHEMA='{}' + """.format( + schema or "" + ) + + all_views = [] + with connection.connection.database.snapshot() as snap: + rows = list(snap.execute_sql(sql)) + for view in rows: + all_views.append(view[0]) + + return all_views + + @engine_to_connection + def get_sequence_names(self, connection, schema=None, **kw): + """ + Return a list of all sequence names available in the database. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + schema (str): Optional. Schema name + + Returns: + list: List of sequence names. + """ + sql = """ + SELECT name + FROM information_schema.sequences + WHERE SCHEMA='{}' + """.format( + schema or "" + ) + all_sequences = [] + with connection.connection.database.snapshot() as snap: + rows = list(snap.execute_sql(sql)) + for seq in rows: + all_sequences.append(seq[0]) + + return all_sequences + + @engine_to_connection + def get_view_definition(self, connection, view_name, schema=None, **kw): + """ + Gets definition of a particular view. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + view_name (str): Name of the view. + schema (str): Optional. Schema name + + Returns: + str: Definition of view. + """ + sql = """ + SELECT view_definition + FROM information_schema.views + WHERE TABLE_SCHEMA='{schema_name}' AND TABLE_NAME='{view_name}' + """.format( + schema_name=schema or "", view_name=view_name + ) + + with connection.connection.database.snapshot() as snap: + rows = list(snap.execute_sql(sql)) + if rows == []: + raise NoSuchTableError(f"{schema if schema else ''}.{view_name}") + result = rows[0][0] + + return result + + @engine_to_connection + def get_multi_columns( + self, connection, schema=None, filter_names=None, scope=None, kind=None, **kw + ): + """ + Return information about columns in all objects in the given + schema. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + schema (str): Optional. Schema name + filter_names (Sequence[str]): Optional. Optionally return information + only for the objects listed here. + scope (sqlalchemy.engine.reflection.ObjectScope): Optional. Specifies + if columns of default, temporary or any tables + should be reflected. Spanner does not support temporary. + kind (sqlalchemy.engine.reflection.ObjectKind): Optional. Specifies the + type of objects to reflect. + + Returns: + dictionary: a dictionary where the keys are two-tuple schema,table-name + and the values are list of dictionaries, each representing the + definition of a database column. + The schema is ``None`` if no schema is provided. + """ + table_filter_query = self._get_table_filter_query(filter_names, "col", True) + schema_filter_query = " col.table_schema = '{schema}' AND ".format( + schema=schema or "" + ) + table_type_query = self._get_table_type_query(kind, True) + + sql = """ + SELECT col.table_schema, col.table_name, col.column_name, + col.spanner_type, col.is_nullable, col.generation_expression, + col.column_default + FROM information_schema.columns as col + JOIN information_schema.tables AS t + USING (TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME) + WHERE + {table_filter_query} + {table_type_query} + {schema_filter_query} + col.table_catalog = '' + ORDER BY + col.table_catalog, + col.table_schema, + col.table_name, + col.ordinal_position + """.format( + table_filter_query=table_filter_query, + table_type_query=table_type_query, + schema_filter_query=schema_filter_query, + ) + with connection.connection.database.snapshot() as snap: + columns = list(snap.execute_sql(sql)) + result_dict = {} + + for col in columns: + column_info = { + "name": col[2], + "type": self._designate_type(col[3]), + "nullable": col[4] == "YES", + "default": col[6] if col[6] is not None else None, + } + + if col[5] is not None: + column_info["computed"] = { + "persisted": True, + "sqltext": col[5], + } + col[0] = col[0] or None + table_info = result_dict.get((col[0], col[1]), []) + table_info.append(column_info) + result_dict[(col[0], col[1])] = table_info + + return result_dict + + @engine_to_connection + def get_columns(self, connection, table_name, schema=None, **kw): + """Get the table columns description. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + table_name (str): Name of the table to introspect. + schema (str): Optional. Schema name + + Returns: + list: The table every column dict-like description. + """ + kind = None if not USING_SQLACLCHEMY_20 else ObjectKind.ANY + dict = self.get_multi_columns( + connection, schema=schema, filter_names=[table_name], kind=kind + ) + schema = schema or None + return dict.get((schema, table_name), []) + + def _designate_type(self, str_repr): + """ + Designate an SQLAlchemy data type from a Spanner + string representation. + + Args: + str_repr (str): String representation of a type. + + Returns: + An SQLAlchemy data type. + """ + if str_repr.startswith("STRING"): + end = str_repr.index(")") + size = int_from_size(str_repr[7:end]) + return _type_map["STRING"](length=size) + # add test creating a table with bytes + elif str_repr.startswith("BYTES"): + end = str_repr.index(")") + size = int_from_size(str_repr[6:end]) + return _type_map["BYTES"](length=size) + elif str_repr.startswith("ARRAY"): + inner_type_str = str_repr[6:-1] + inner_type = self._designate_type(inner_type_str) + return _type_map["ARRAY"](inner_type) + else: + return _type_map[str_repr] + + @engine_to_connection + def get_multi_indexes( + self, connection, schema=None, filter_names=None, scope=None, kind=None, **kw + ): + """ + Return information about indexes in all objects + in the given schema. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + schema (str): Optional. Schema name. + filter_names (Sequence[str]): Optional. Optionally return information + only for the objects listed here. + scope (sqlalchemy.engine.reflection.ObjectScope): Optional. Specifies + if columns of default, temporary or any tables + should be reflected. Spanner does not support temporary. + kind (sqlalchemy.engine.reflection.ObjectKind): Optional. Specifies the + type of objects to reflect. + + Returns: + dictionary: a dictionary where the keys are two-tuple schema,table-name + and the values are list of dictionaries, each representing the + definition of an index. + The schema is ``None`` if no schema is provided. + """ + table_filter_query = self._get_table_filter_query(filter_names, "i", True) + schema_filter_query = " i.table_schema = '{schema}' AND ".format( + schema=schema or "" + ) + table_type_query = self._get_table_type_query(kind, True) + + sql = """ + SELECT + i.table_schema, + i.table_name, + i.index_name, + ARRAY( + SELECT ic.column_name + FROM information_schema.index_columns ic + WHERE ic.index_name = i.index_name + AND ic.table_catalog = i.table_catalog + AND ic.table_schema = i.table_schema + AND ic.table_name = i.table_name + AND ic.column_ordering is not null + ORDER BY ic.ordinal_position + ) as columns, + i.is_unique, + ARRAY( + SELECT ic.column_ordering + FROM information_schema.index_columns ic + WHERE ic.index_name = i.index_name + AND ic.table_catalog = i.table_catalog + AND ic.table_schema = i.table_schema + AND ic.table_name = i.table_name + AND ic.column_ordering is not null + ORDER BY ic.ordinal_position + ) as column_orderings, + ARRAY( + SELECT storing.column_name + FROM information_schema.index_columns storing + WHERE storing.index_name = i.index_name + AND storing.table_catalog = i.table_catalog + AND storing.table_schema = i.table_schema + AND storing.table_name = i.table_name + AND storing.column_ordering is null + ORDER BY storing.ordinal_position + ) as storing_columns, + FROM information_schema.indexes as i + JOIN information_schema.tables AS t + ON i.table_catalog = t.table_catalog + AND i.table_schema = t.table_schema + AND i.table_name = t.table_name + WHERE + {table_filter_query} + {table_type_query} + {schema_filter_query} + i.index_type != 'PRIMARY_KEY' + AND i.spanner_is_managed = FALSE + GROUP BY i.table_catalog, i.table_schema, i.table_name, + i.index_name, i.is_unique + ORDER BY i.index_name + """.format( + table_filter_query=table_filter_query, + table_type_query=table_type_query, + schema_filter_query=schema_filter_query, + ) + + with connection.connection.database.snapshot() as snap: + rows = list(snap.execute_sql(sql)) + result_dict = {} + + for row in rows: + dialect_options = {} + include_columns = row[6] + if include_columns: + dialect_options["spanner_storing"] = include_columns + index_info = { + "name": row[2], + "column_names": row[3], + "unique": row[4], + "column_sorting": { + col: order.lower() for col, order in zip(row[3], row[5]) + }, + "include_columns": include_columns if include_columns else [], + "dialect_options": dialect_options, + } + row[0] = row[0] or None + table_info = result_dict.get((row[0], row[1]), []) + table_info.append(index_info) + result_dict[(row[0], row[1])] = table_info + + return result_dict + + @engine_to_connection + def get_indexes(self, connection, table_name, schema=None, **kw): + """Get the table indexes. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + table_name (str): Name of the table to introspect. + schema (str): Optional. Schema name + + Returns: + list: List with indexes description. + """ + kind = None if not USING_SQLACLCHEMY_20 else ObjectKind.ANY + dict = self.get_multi_indexes( + connection, schema=schema, filter_names=[table_name], kind=kind + ) + schema = schema or None + return dict.get((schema, table_name), []) + + @engine_to_connection + def get_multi_pk_constraint( + self, connection, schema=None, filter_names=None, scope=None, kind=None, **kw + ): + """ + Return information about primary key constraints in + all tables in the given schema. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + schema (str): Optional. Schema name + filter_names (Sequence[str]): Optional. Optionally return information + only for the objects listed here. + scope (sqlalchemy.engine.reflection.ObjectScope): Optional. Specifies + if columns of default, temporary or any tables + should be reflected. Spanner does not support temporary. + kind (sqlalchemy.engine.reflection.ObjectKind): Optional. Specifies the + type of objects to reflect. + + Returns: + dictionary: a dictionary where the keys are two-tuple schema,table-name + and the values are list of dictionaries, each representing the + definition of a primary key constraint. + The schema is ``None`` if no schema is provided. + """ + table_filter_query = self._get_table_filter_query(filter_names, "tc", True) + schema_filter_query = " tc.table_schema = '{schema}' AND ".format( + schema=schema or "" + ) + table_type_query = self._get_table_type_query(kind, True) + + sql = """ + SELECT tc.table_schema, tc.table_name, kcu.column_name + FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc + JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS kcu + USING (TABLE_CATALOG, TABLE_SCHEMA, CONSTRAINT_NAME) + JOIN information_schema.tables AS t + ON tc.TABLE_CATALOG = t.TABLE_CATALOG + AND tc.TABLE_SCHEMA = t.TABLE_SCHEMA + AND tc.TABLE_NAME = t.TABLE_NAME + WHERE {table_filter_query} {table_type_query} + {schema_filter_query} tc.CONSTRAINT_TYPE = "PRIMARY KEY" + ORDER BY tc.table_catalog ASC, tc.table_schema ASC, + tc.table_name ASC, kcu.ordinal_position ASC + """.format( + table_filter_query=table_filter_query, + table_type_query=table_type_query, + schema_filter_query=schema_filter_query, + ) + + with connection.connection.database.snapshot() as snap: + rows = list(snap.execute_sql(sql)) + result_dict = {} + + for row in rows: + row[0] = row[0] or None + table_info = result_dict.get( + (row[0], row[1]), {"constrained_columns": []} + ) + table_info["constrained_columns"].append(row[2]) + result_dict[(row[0], row[1])] = table_info + + return result_dict + + @engine_to_connection + def get_pk_constraint(self, connection, table_name, schema=None, **kw): + """Get the table primary key constraint. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + table_name (str): Name of the table to introspect. + schema (str): Optional. Schema name + + Returns: + dict: Dict with the primary key constraint description. + """ + kind = None if not USING_SQLACLCHEMY_20 else ObjectKind.ANY + dict = self.get_multi_pk_constraint( + connection, schema=schema, filter_names=[table_name], kind=kind + ) + schema = schema or None + return dict.get((schema, table_name), []) + + @engine_to_connection + def get_schema_names(self, connection, **kw): + """Get all the schemas in the database. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + + Returns: + list: Schema names. + """ + schemas = [] + with connection.connection.database.snapshot() as snap: + rows = snap.execute_sql( + "SELECT schema_name FROM information_schema.schemata" + ) + + for row in rows: + schemas.append(row[0]) + + return schemas + + @engine_to_connection + def get_multi_foreign_keys( + self, connection, schema=None, filter_names=None, scope=None, kind=None, **kw + ): + """ + Return information about foreign_keys in all tables + in the given schema. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + schema (str): Optional. Schema name + filter_names (Sequence[str]): Optional. Optionally return information + only for the objects listed here. + scope (sqlalchemy.engine.reflection.ObjectScope): Optional. Specifies + if columns of default, temporary or any tables + should be reflected. Spanner does not support temporary. + kind (sqlalchemy.engine.reflection.ObjectKind): Optional. Specifies the + type of objects to reflect. + + Returns: + dictionary: a dictionary where the keys are two-tuple schema,table-name + and the values are list of dictionaries, each representing + a foreign key definition. + The schema is ``None`` if no schema is provided. + """ + table_filter_query = self._get_table_filter_query(filter_names, "tc", True) + schema_filter_query = " tc.table_schema = '{schema}' AND".format( + schema=schema or "" + ) + table_type_query = self._get_table_type_query(kind, True) + + sql = """ + SELECT + tc.table_schema, + tc.table_name, + tc.constraint_name, + tc_uq.table_name, + tc_uq.table_schema, + -- Find the corresponding pairs of columns for the foreign key constraint + -- and its related unique constraint. + ARRAY( + SELECT (kcu.column_name, kcu_uq.column_name) + FROM information_schema.key_column_usage AS kcu + JOIN information_schema.key_column_usage AS kcu_uq + ON kcu_uq.constraint_catalog = rc.unique_constraint_catalog + AND kcu_uq.constraint_schema = rc.unique_constraint_schema + AND kcu_uq.constraint_name = rc.unique_constraint_name + AND kcu_uq.ordinal_position = kcu.ordinal_position + WHERE + kcu.constraint_catalog = tc.constraint_catalog + AND kcu.constraint_schema = tc.constraint_schema + AND kcu.constraint_name = tc.constraint_name + ORDER BY kcu.ordinal_position + ) + FROM information_schema.table_constraints AS tc + -- Join the foreign key constraint for the referring table. + JOIN information_schema.referential_constraints AS rc + ON rc.constraint_catalog = tc.constraint_catalog + AND rc.constraint_schema = tc.constraint_schema + AND rc.constraint_name = tc.constraint_name + -- Join the corresponding unique constraint on the referenced table. + JOIN information_schema.table_constraints AS tc_uq + ON tc_uq.constraint_catalog = rc.unique_constraint_catalog + AND tc_uq.constraint_schema = rc.unique_constraint_schema + AND tc_uq.constraint_name = rc.unique_constraint_name + -- Join in the tables view so WHERE filters can reference fields in it. + JOIN information_schema.tables AS t + ON t.table_catalog = tc.table_catalog + AND t.table_schema = tc.table_schema + AND t.table_name = tc.table_name + WHERE + {table_filter_query} + {table_type_query} + {schema_filter_query} + tc.constraint_type = "FOREIGN KEY" + """.format( + table_filter_query=table_filter_query, + table_type_query=table_type_query, + schema_filter_query=schema_filter_query, + ) + + with connection.connection.database.snapshot() as snap: + rows = list(snap.execute_sql(sql)) + result_dict = {} + + for row in rows: + row[0] = row[0] or None + table_info = result_dict.get((row[0], row[1]), []) + + constrained_columns, referred_columns = zip(*row[5]) + fk_info = { + "name": row[2], + "referred_table": row[3], + "referred_schema": row[4] or None, + "referred_columns": list(referred_columns), + "constrained_columns": list(constrained_columns), + } + + table_info.append(fk_info) + result_dict[(row[0], row[1])] = table_info + + return result_dict + + @engine_to_connection + def get_foreign_keys(self, connection, table_name, schema=None, **kw): + """Get the table foreign key constraints. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + table_name (str): Name of the table to introspect. + schema (str): Optional. Schema name + + Returns: + list: Dicts, each of which describes a foreign key constraint. + """ + kind = None if not USING_SQLACLCHEMY_20 else ObjectKind.ANY + dict = self.get_multi_foreign_keys( + connection, schema=schema, filter_names=[table_name], kind=kind + ) + schema = schema or None + return dict.get((schema, table_name), []) + + @engine_to_connection + def get_table_names(self, connection, schema=None, **kw): + """Get all the tables from the given schema. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + schema (str): Optional. Schema name. + + Returns: + list: Names of the tables within the given schema. + """ + sql = """ +SELECT table_name +FROM information_schema.tables +WHERE table_type = 'BASE TABLE' AND table_schema = '{schema}' +""".format( + schema=schema or "" + ) + + table_names = [] + with connection.connection.database.snapshot() as snap: + rows = snap.execute_sql(sql) + + for row in rows: + table_names.append(row[0]) + + return table_names + + @engine_to_connection + def get_unique_constraints(self, connection, table_name, schema=None, **kw): + """Get the table unique constraints. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + table_name (str): Name of the table to introspect. + schema (str): Optional. Schema name + + Returns: + dict: Dict with the unique constraints' descriptions. + """ + sql = """ +SELECT ccu.CONSTRAINT_NAME, ccu.COLUMN_NAME +FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc +JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS ccu + USING (TABLE_CATALOG, TABLE_SCHEMA, CONSTRAINT_NAME) +WHERE + tc.TABLE_NAME="{table_name}" + AND tc.TABLE_SCHEMA="{table_schema}" + AND tc.CONSTRAINT_TYPE = "UNIQUE" + AND tc.CONSTRAINT_NAME IS NOT NULL +""".format( + table_schema=schema or "", table_name=table_name + ) + + cols = [] + with connection.connection.database.snapshot() as snap: + rows = snap.execute_sql(sql) + + for row in rows: + cols.append({"name": row[0], "column_names": [row[1]]}) + + return cols + + @engine_to_connection + def has_table(self, connection, table_name, schema=None, **kw): + """Check if the given table exists. + + The method is used by SQLAlchemy introspection systems. + + Args: + connection (sqlalchemy.engine.base.Connection): + SQLAlchemy connection or engine object. + table_name (str): Name of the table to introspect. + schema (str): Optional. Schema name. + + Returns: + bool: True, if the given table exists, False otherwise. + """ + with connection.connection.database.snapshot() as snap: + rows = snap.execute_sql( + """ +SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="{table_schema}" AND TABLE_NAME="{table_name}" +LIMIT 1 +""".format( + table_schema=schema or "", table_name=table_name + ) + ) + + for _ in rows: + return True + + return False + + @engine_to_connection + def has_sequence(self, connection, sequence_name, schema=None, **kw): + """Check the existence of a particular sequence in the database. + + Given a :class:`_engine.Connection` object and a string + `sequence_name`, return True if the given sequence exists in + the database, False otherwise. + """ + + with connection.connection.database.snapshot() as snap: + rows = snap.execute_sql( + """ + SELECT true + FROM INFORMATION_SCHEMA.SEQUENCES + WHERE NAME="{sequence_name}" + AND SCHEMA="{schema}" + LIMIT 1 + """.format( + sequence_name=sequence_name, schema=schema or "" + ) + ) + + for _ in rows: + return True + + return False + + def set_isolation_level(self, conn_proxy, level): + """Set the connection isolation level. + + Args: + conn_proxy ( + Union[ + sqlalchemy.pool._ConnectionFairy, + spanner_dbapi.connection.Connection, + ] + ): + Database connection proxy object or the connection itself. + level (string): Isolation level. + """ + if isinstance(conn_proxy, spanner_dbapi.Connection): + conn = conn_proxy + else: + conn = conn_proxy.connection + + if level == "AUTOCOMMIT": + conn.autocommit = True + else: + if isinstance(level, str): + level = self._string_to_isolation_level(level) + conn.isolation_level = level + conn.autocommit = False + + def get_isolation_level(self, conn_proxy): + """Get the connection isolation level. + + Args: + conn_proxy ( + Union[ + sqlalchemy.pool._ConnectionFairy, + spanner_dbapi.connection.Connection, + ] + ): + Database connection proxy object or the connection itself. + + Returns: + str: the connection isolation level. + """ + if isinstance(conn_proxy, spanner_dbapi.Connection): + conn = conn_proxy + else: + conn = conn_proxy.connection + + if conn.autocommit: + return "AUTOCOMMIT" + + level = conn.isolation_level + if level == TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED: + level = TransactionOptions.IsolationLevel.SERIALIZABLE + if isinstance(level, TransactionOptions.IsolationLevel): + level = self._isolation_level_to_string(level) + + return level + + def _string_to_isolation_level(self, name): + try: + # SQLAlchemy guarantees that the isolation level string will: + # 1. Be all upper case. + # 2. Contain spaces instead of underscores. + # We change the spaces into underscores to get the enum value. + return TransactionOptions.IsolationLevel[name.replace(" ", "_")] + except KeyError: + raise ValueError("Invalid isolation level name '%s'" % name) + + def _isolation_level_to_string(self, level): + # SQLAlchemy expects isolation level names to contain spaces, + # and not underscores, so we remove those before returning. + return level.name.replace("_", " ") + + def do_rollback(self, dbapi_connection): + """ + To prevent rollback exception, don't rollback + committed/rolled back transactions. + """ + if not isinstance(dbapi_connection, spanner_dbapi.Connection): + dbapi_connection = dbapi_connection.connection + + if dbapi_connection._transaction and ( + dbapi_connection._transaction.rolled_back + or dbapi_connection._transaction.committed + ): + pass + else: + trace_attributes = { + "db.instance": dbapi_connection.database.name + if dbapi_connection.database + else "" + } + with trace_call("SpannerSqlAlchemy.Rollback", trace_attributes): + dbapi_connection.rollback() + + def do_commit(self, dbapi_connection): + trace_attributes = { + "db.instance": dbapi_connection.database.name + if dbapi_connection.database + else "" + } + with trace_call("SpannerSqlAlchemy.Commit", trace_attributes): + dbapi_connection.commit() + + def do_close(self, dbapi_connection): + trace_attributes = { + "db.instance": dbapi_connection.database.name + if dbapi_connection.database + else "" + } + with trace_call("SpannerSqlAlchemy.Close", trace_attributes): + dbapi_connection.close() + + def do_executemany(self, cursor, statement, parameters, context=None): + trace_attributes = { + "db.statement": statement, + "db.params": parameters, + "db.instance": cursor.connection.database.name, + } + with trace_call("SpannerSqlAlchemy.ExecuteMany", trace_attributes): + cursor.executemany(statement, parameters) + + def do_execute(self, cursor, statement, parameters, context=None): + trace_attributes = { + "db.statement": statement, + "db.params": parameters, + "db.instance": cursor.connection.database.name, + } + with trace_call("SpannerSqlAlchemy.Execute", trace_attributes): + cursor.execute(statement, parameters) + + def do_execute_no_params(self, cursor, statement, context=None): + trace_attributes = { + "db.statement": statement, + "db.instance": cursor.connection.database.name, + } + with trace_call("SpannerSqlAlchemy.ExecuteNoParams", trace_attributes): + cursor.execute(statement) + + +# Alembic ALTER operation override +@compiles(ColumnNullable, "spanner+spanner") +def visit_column_nullable( + element: "ColumnNullable", compiler: "SpannerDDLCompiler", **kw +) -> str: + return _format_alter_column( + compiler, + element.table_name, + element.schema, + element.column_name, + element.existing_type, + element.nullable, + element.existing_server_default, + ) + + +# Alembic ALTER operation override +@compiles(ColumnType, "spanner+spanner") +def visit_column_type( + element: "ColumnType", compiler: "SpannerDDLCompiler", **kw +) -> str: + return _format_alter_column( + compiler, + element.table_name, + element.schema, + element.column_name, + element.type_, + element.existing_nullable, + element.existing_server_default, + ) + + +def _format_alter_column( + compiler, table_name, schema, column_name, type_, nullable, server_default +): + # Older versions of SQLAlchemy pass in a boolean to indicate whether there + # is an existing DEFAULT constraint, instead of the actual DEFAULT constraint + # expression. In those cases, we do not want to explicitly include the DEFAULT + # constraint in the expression that is generated here. + if isinstance(server_default, bool): + server_default = None + return "%s %s %s%s%s" % ( + alter_table(compiler, table_name, schema), + alter_column(compiler, column_name), + format_type(compiler, type_), + "" if nullable else " NOT NULL", + ( + "" + if server_default is None + else f" DEFAULT {format_server_default(compiler, server_default)}" + ), + ) diff --git a/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/version.py b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/version.py new file mode 100644 index 000000000000..6f57db5e94f0 --- /dev/null +++ b/packages/sqlalchemy-spanner/google/cloud/sqlalchemy_spanner/version.py @@ -0,0 +1,7 @@ +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +__version__ = "1.17.2" diff --git a/packages/sqlalchemy-spanner/migration_test_cleanup.py b/packages/sqlalchemy-spanner/migration_test_cleanup.py new file mode 100644 index 000000000000..b6efb15910e0 --- /dev/null +++ b/packages/sqlalchemy-spanner/migration_test_cleanup.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import sys + +from google.cloud import spanner + + +def main(argv): + db_url = argv[0] + + project = re.findall(r"projects(.*?)instances", db_url) + instance_id = re.findall(r"instances(.*?)databases", db_url) + database_id = re.findall(r"databases(.*?)$", db_url) + + client = spanner.Client(project="".join(project).replace("/", "")) + instance = client.instance(instance_id="".join(instance_id).replace("/", "")) + database = instance.database("".join(database_id).replace("/", "")) + + database.update_ddl( + ["DROP TABLE IF EXISTS account", "DROP TABLE IF EXISTS alembic_version"] + ).result(120) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/packages/sqlalchemy-spanner/noxfile.py b/packages/sqlalchemy-spanner/noxfile.py new file mode 100644 index 000000000000..d1bf39a4afab --- /dev/null +++ b/packages/sqlalchemy-spanner/noxfile.py @@ -0,0 +1,512 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import configparser +import os +import shutil + +import nox + +ALEMBIC_CONF = """ +[alembic] +script_location = test_migration +prepend_sys_path = . +sqlalchemy.url = {} +[post_write_hooks] +[loggers] +keys = root,sqlalchemy,alembic +[handlers] +keys = console +[formatters] +keys = generic +[logger_root] +level = WARN +handlers = console +qualname = +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine +[logger_alembic] +level = INFO +handlers = +qualname = alembic +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S +""" + +UPGRADE_CODE = """def upgrade(): + op.create_table( + 'account', + sa.Column('id', sa.Integer, primary_key=True), + sa.Column('name', sa.String(50), nullable=False), + sa.Column('description', sa.Unicode(200)), + ) + op.alter_column( + 'account', + 'name', + existing_type=sa.String(70), + ) + op.alter_column( + 'account', + 'description', + existing_type=sa.Unicode(200), + nullable=False, + ) + """ + + +BLACK_VERSION = "black==23.7.0" +ISORT_VERSION = "isort==5.11.0" +BLACK_PATHS = ["google", "test", "noxfile.py", "setup.py", "samples"] +UNIT_TEST_PYTHON_VERSIONS = ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "3.14"] +ALL_PYTHON = list(UNIT_TEST_PYTHON_VERSIONS) +ALL_PYTHON.extend(["3.7"]) +SYSTEM_TEST_PYTHON_VERSIONS = ["3.12"] +SYSTEM_COMPLIANCE_MIGRATION_TEST_PYTHON_VERSIONS = ["3.8", "3.12", "3.14"] +DEFAULT_PYTHON_VERSION = "3.14" +DEFAULT_PYTHON_VERSION_FOR_SQLALCHEMY_20 = "3.14" + + +@nox.session(python=DEFAULT_PYTHON_VERSION_FOR_SQLALCHEMY_20) +def lint(session): + """Run linters. + + Returns a failure if the linters find linting errors or sufficiently + serious code quality issues. + """ + session.install("flake8", BLACK_VERSION) + session.run( + "black", + "--check", + *BLACK_PATHS, + ) + session.run( + "flake8", + "google", + "test", + "--max-line-length=88", + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION_FOR_SQLALCHEMY_20) +def blacken(session): + """Run black. + + Format code to uniform standard. + + This currently uses Python 3.6 due to the automated Kokoro run of synthtool. + That run uses an image that doesn't have 3.6 installed. Before updating this + check the state of the `gcp_ubuntu_config` we use for that Kokoro run. + """ + session.install(BLACK_VERSION) + session.run( + "black", + *BLACK_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION_FOR_SQLALCHEMY_20) +def lint_setup_py(session): + """Verify that setup.py is valid (including RST check).""" + session.install("docutils", "pygments", "setuptools") + session.run("python", "setup.py", "check", "--restructuredtext", "--strict") + + +@nox.session(python=UNIT_TEST_PYTHON_VERSIONS[0]) +def compliance_test_14(session): + """Run SQLAlchemy dialect compliance test suite.""" + + # Check the value of `RUN_COMPLIANCE_TESTS` env var. It defaults to true. + if os.environ.get("RUN_COMPLIANCE_TESTS", "true") == "false": + session.skip("RUN_COMPLIANCE_TESTS is set to false, skipping") + # Sanity check: Only run tests if the environment variable is set. + if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", "") and not os.environ.get( + "SPANNER_EMULATOR_HOST", "" + ): + session.skip( + "Credentials or emulator host must be set via environment variable" + ) + + session.install( + "pytest", + "pytest-cov", + "pytest-asyncio", + ) + + session.install("mock") + session.install(".[tracing]") + session.run("pip", "install", "sqlalchemy>=1.4,<2.0", "--force-reinstall") + session.run("python", "create_test_database.py") + session.run( + "py.test", + "--cov=google.cloud.sqlalchemy_spanner", + "--cov=test", + "--cov-append", + "--cov-config=.coveragerc", + "--cov-report=", + "--cov-fail-under=0", + "--asyncio-mode=auto", + "test/test_suite_14.py", + *session.posargs, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION_FOR_SQLALCHEMY_20) +def compliance_test_20(session): + """Run SQLAlchemy dialect compliance test suite.""" + + # Check the value of `RUN_COMPLIANCE_TESTS` env var. It defaults to true. + if os.environ.get("RUN_COMPLIANCE_TESTS", "true") == "false": + session.skip("RUN_COMPLIANCE_TESTS is set to false, skipping") + + # Sanity check: Only run tests if the environment variable is set. + if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", "") and not os.environ.get( + "SPANNER_EMULATOR_HOST", "" + ): + session.skip( + "Credentials or emulator host must be set via environment variable" + ) + + session.install( + "pytest", + "pytest-cov", + "pytest-asyncio", + ) + + session.install("mock") + session.install("-e", ".", "--force-reinstall") + session.run("python", "create_test_database.py") + + session.install("sqlalchemy>=2.0") + + session.run( + "py.test", + "--cov=google.cloud.sqlalchemy_spanner", + "--cov=test", + "--cov-append", + "--cov-config=.coveragerc", + "--cov-report=", + "--cov-fail-under=0", + "--asyncio-mode=auto", + "test/test_suite_20.py", + *session.posargs, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION_FOR_SQLALCHEMY_20) +def mockserver(session): + """Run mockserver tests.""" + # Run SQLAlchemy dialect tests using an in-mem mocked Spanner server. + session.install("setuptools") + session.install("pytest") + session.install("mock") + session.install(".") + session.install("sqlalchemy>=2.0") + session.run( + "python", + "create_test_config.py", + "my-project", + "my-instance", + "my-database", + "none", + "AnonymousCredentials", + "localhost", + "9999", + ) + session.run( + "py.test", "--quiet", os.path.join("test", "mockserver_tests"), *session.posargs + ) + + +@nox.session(python=SYSTEM_COMPLIANCE_MIGRATION_TEST_PYTHON_VERSIONS[0]) +def migration_test(session): + """Test migrations with SQLAlchemy v1.4 and Alembic""" + session.run("pip", "install", "sqlalchemy>=1.4,<2.0", "--force-reinstall") + _migration_test(session) + + +@nox.session(python=SYSTEM_COMPLIANCE_MIGRATION_TEST_PYTHON_VERSIONS[-1]) +def _migration_test(session): + """Migrate with SQLAlchemy and Alembic and check the result.""" + import glob + import os + import shutil + + session.install("pytest") + session.install(".") + session.install("alembic") + + session.run("python", "create_test_database.py") + + config = configparser.ConfigParser() + if os.path.exists("test.cfg"): + config.read("test.cfg") + else: + config.read("setup.cfg") + db_url = config.get("db", "default") + + session.run("alembic", "init", "test_migration") + + # setting testing configurations + os.remove("alembic.ini") + with open("alembic.ini", "w") as f: + f.write(ALEMBIC_CONF.format(db_url)) + + session.run("alembic", "revision", "-m", "migration_for_test") + files = glob.glob("test_migration/versions/*.py") + + # updating the upgrade-script code + with open(files[0], "rb") as f: + script_code = f.read().decode() + + script_code = script_code.replace( + """def upgrade() -> None:\n pass""", UPGRADE_CODE + ) + with open(files[0], "wb") as f: + f.write(script_code.encode()) + + os.remove("test_migration/env.py") + shutil.copyfile("test_migration_env.py", "test_migration/env.py") + + # running the test migration + session.run("alembic", "upgrade", "head") + + # clearing the migration data + os.remove("alembic.ini") + shutil.rmtree("test_migration") + session.run("python", "migration_test_cleanup.py", db_url) + if os.path.exists("test.cfg"): + os.remove("test.cfg") + + +@nox.session(python=ALL_PYTHON) +@nox.parametrize("test_type", ["unit", "mockserver"]) +def unit(session, test_type): + """Run unit tests.""" + if session.python in ("3.7",): + session.skip("Python 3.7 is no longer supported") + + if ( + test_type == "mockserver" + and session.python != DEFAULT_PYTHON_VERSION_FOR_SQLALCHEMY_20 + ): + session.skip("mockserver tests only run on python 3.14") + + if test_type == "mockserver": + mockserver(session) + return + + if test_type == "unit": + # Run SQLAlchemy dialect compliance test suite with OpenTelemetry. + session.install("setuptools") + session.install("pytest") + session.install("mock") + session.install(".") + session.install("opentelemetry-api") + session.install("opentelemetry-sdk") + session.install("opentelemetry-instrumentation") + session.run("py.test", "--quiet", os.path.join("test/unit"), *session.posargs) + return + + +@nox.session(python=SYSTEM_COMPLIANCE_MIGRATION_TEST_PYTHON_VERSIONS) +@nox.parametrize( + "test_type", + ["system", "compliance_14", "compliance_20", "migration_14", "migration_20"], +) +def system(session, test_type): + """Run SQLAlchemy dialect system test suite.""" + + if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", "") and not os.environ.get( + "SPANNER_EMULATOR_HOST", "" + ): + session.skip( + "Credentials or emulator host must be set via environment variable" + ) + + if os.environ.get("RUN_COMPLIANCE_TESTS", "true") == "false" and not os.environ.get( + "SPANNER_EMULATOR_HOST", "" + ): + session.skip("RUN_COMPLIANCE_TESTS is set to false, skipping") + + if test_type == "system" and session.python not in SYSTEM_TEST_PYTHON_VERSIONS: + session.skip("Standard system tests configured to run exclusively on 3.12") + if ( + test_type in ["compliance_14", "migration_14"] + and session.python != SYSTEM_COMPLIANCE_MIGRATION_TEST_PYTHON_VERSIONS[0] + ): + session.skip( + f"SQLAlchemy 1.4-based tests configured to run exclusively on {SYSTEM_COMPLIANCE_MIGRATION_TEST_PYTHON_VERSIONS[0]}" + ) + if ( + test_type in ["compliance_20", "migration_20"] + and session.python != DEFAULT_PYTHON_VERSION_FOR_SQLALCHEMY_20 + ): + session.skip( + f"SQLAlchemy 2.0-based tests configured to run exclusively on {DEFAULT_PYTHON_VERSION_FOR_SQLALCHEMY_20}" + ) + + if test_type == "system": + session.install("pytest", "pytest-cov", "pytest-asyncio") + session.install("mock") + session.install(".[tracing]") + session.install("opentelemetry-api") + session.install("opentelemetry-sdk") + session.install("opentelemetry-instrumentation") + session.run("python", "create_test_database.py") + session.install("sqlalchemy>=2.0") + session.run( + "py.test", "--quiet", os.path.join("test", "system"), *session.posargs + ) + session.run("python", "drop_test_database.py") + elif test_type == "compliance_14": + compliance_test_14(session) + elif test_type == "compliance_20": + compliance_test_20(session) + elif test_type == "migration_14": + migration_test(session) + elif test_type == "migration_20": + _migration_test(session) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def mypy(session): + """Run the type checker.""" + session.skip("mypy tests are not yet supported") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def core_deps_from_source(session): + """Run all tests with core dependencies installed from source""" + session.skip("Core deps from source tests are not yet supported") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def prerelease_deps(session): + """Run all tests with prerelease versions of dependencies installed.""" + session.skip("prerelease deps tests are not yet supported") + + +@nox.session(python="3.10") +def docs(session): + """Build the docs for this library.""" + session.skip("docs builds are not yet supported") + + +@nox.session(python="3.10") +def docfx(session): + """Build the docfx yaml files for this library.""" + session.skip("docfx builds are not yet supported") + + +# @nox.session(python="3.10") +# def docs(session): +# """Build the docs for this library.""" + +# session.install("-e", ".") +# session.install( +# # We need to pin to specific versions of the `sphinxcontrib-*` packages +# # which still support sphinx 4.x. +# # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344 +# # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345. +# "sphinxcontrib-applehelp==1.0.4", +# "sphinxcontrib-devhelp==1.0.2", +# "sphinxcontrib-htmlhelp==2.0.1", +# "sphinxcontrib-qthelp==1.0.3", +# "sphinxcontrib-serializinghtml==1.1.5", +# "sphinx==4.5.0", +# "alabaster", +# "recommonmark", +# ) + +# shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) +# session.run( +# "sphinx-build", +# "-W", # warnings as errors +# "-T", # show full traceback on exception +# "-N", # no colors +# "-b", +# "html", +# "-d", +# os.path.join("docs", "_build", "doctrees", ""), +# os.path.join("docs", ""), +# os.path.join("docs", "_build", "html", ""), +# ) + + +# @nox.session(python="3.10") +# def docfx(session): +# """Build the docfx yaml files for this library.""" + +# session.install("-e", ".") +# session.install( +# # We need to pin to specific versions of the `sphinxcontrib-*` packages +# # which still support sphinx 4.x. +# # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344 +# # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345. +# "sphinxcontrib-applehelp==1.0.4", +# "sphinxcontrib-devhelp==1.0.2", +# "sphinxcontrib-htmlhelp==2.0.1", +# "sphinxcontrib-qthelp==1.0.3", +# "sphinxcontrib-serializinghtml==1.1.5", +# "gcp-sphinx-docfx-yaml", +# "alabaster", +# "recommonmark", +# ) + +# shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) +# session.run( +# "sphinx-build", +# "-T", # show full traceback on exception +# "-N", # no colors +# "-D", +# ( +# "extensions=sphinx.ext.autodoc," +# "sphinx.ext.autosummary," +# "docfx_yaml.extension," +# "sphinx.ext.intersphinx," +# "sphinx.ext.coverage," +# "sphinx.ext.napoleon," +# "sphinx.ext.todo," +# "sphinx.ext.viewcode," +# "recommonmark" +# ), +# "-b", +# "html", +# "-d", +# os.path.join("docs", "_build", "doctrees", ""), +# os.path.join("docs", ""), +# os.path.join("docs", "_build", "html", ""), +# ) + + +@nox.session +def format(session: nox.sessions.Session) -> None: + session.install(BLACK_VERSION, ISORT_VERSION) + import os + + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) diff --git a/packages/sqlalchemy-spanner/requirements.in b/packages/sqlalchemy-spanner/requirements.in new file mode 100644 index 000000000000..fad90d0391be --- /dev/null +++ b/packages/sqlalchemy-spanner/requirements.in @@ -0,0 +1,13 @@ +build +click +packaging +pep517 +pip-tools +pyparsing +tomli +sqlalchemy +google-cloud-spanner +alembic +opentelemetry-api +opentelemetry-sdk +opentelemetry-instrumentation diff --git a/packages/sqlalchemy-spanner/requirements.txt b/packages/sqlalchemy-spanner/requirements.txt new file mode 100644 index 000000000000..1378b63a8fd7 --- /dev/null +++ b/packages/sqlalchemy-spanner/requirements.txt @@ -0,0 +1,730 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --generate-hashes +# +alembic==1.17.0 \ + --hash=sha256:4652a0b3e19616b57d652b82bfa5e38bf5dbea0813eed971612671cb9e90c0fe \ + --hash=sha256:80523bc437d41b35c5db7e525ad9d908f79de65c27d6a5a5eab6df348a352d99 + # via -r requirements.in +build==1.4.0 \ + --hash=sha256:6a07c1b8eb6f2b311b96fcbdbce5dab5fe637ffda0fd83c9cac622e927501596 \ + --hash=sha256:f1b91b925aa322be454f8330c6fb48b465da993d1e7e7e6fa35027ec49f3c936 + # via + # -r requirements.in + # pip-tools +cachetools==7.0.2 \ + --hash=sha256:7e7f09a4ca8b791d8bb4864afc71e9c17e607a28e6839ca1a644253c97dbeae0 \ + --hash=sha256:938dcad184827c5e94928c4fd5526e2b46692b7fb1ae94472da9131d0299343c + # via google-auth +certifi==2026.2.25 \ + --hash=sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa \ + --hash=sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7 + # via requests +charset-normalizer==3.4.4 \ + --hash=sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad \ + --hash=sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93 \ + --hash=sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394 \ + --hash=sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89 \ + --hash=sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc \ + --hash=sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86 \ + --hash=sha256:194f08cbb32dc406d6e1aea671a68be0823673db2832b38405deba2fb0d88f63 \ + --hash=sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d \ + --hash=sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f \ + --hash=sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8 \ + --hash=sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0 \ + --hash=sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505 \ + --hash=sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161 \ + --hash=sha256:2aaba3b0819274cc41757a1da876f810a3e4d7b6eb25699253a4effef9e8e4af \ + --hash=sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152 \ + --hash=sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318 \ + --hash=sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72 \ + --hash=sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4 \ + --hash=sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e \ + --hash=sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3 \ + --hash=sha256:44c2a8734b333e0578090c4cd6b16f275e07aa6614ca8715e6c038e865e70576 \ + --hash=sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c \ + --hash=sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1 \ + --hash=sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8 \ + --hash=sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1 \ + --hash=sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2 \ + --hash=sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44 \ + --hash=sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26 \ + --hash=sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88 \ + --hash=sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016 \ + --hash=sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede \ + --hash=sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf \ + --hash=sha256:5cb4d72eea50c8868f5288b7f7f33ed276118325c1dfd3957089f6b519e1382a \ + --hash=sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc \ + --hash=sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0 \ + --hash=sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84 \ + --hash=sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db \ + --hash=sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1 \ + --hash=sha256:6aee717dcfead04c6eb1ce3bd29ac1e22663cdea57f943c87d1eab9a025438d7 \ + --hash=sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed \ + --hash=sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8 \ + --hash=sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133 \ + --hash=sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e \ + --hash=sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef \ + --hash=sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14 \ + --hash=sha256:778d2e08eda00f4256d7f672ca9fef386071c9202f5e4607920b86d7803387f2 \ + --hash=sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0 \ + --hash=sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d \ + --hash=sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828 \ + --hash=sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f \ + --hash=sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf \ + --hash=sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6 \ + --hash=sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328 \ + --hash=sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090 \ + --hash=sha256:837c2ce8c5a65a2035be9b3569c684358dfbf109fd3b6969630a87535495ceaa \ + --hash=sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381 \ + --hash=sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c \ + --hash=sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb \ + --hash=sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc \ + --hash=sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a \ + --hash=sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec \ + --hash=sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc \ + --hash=sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac \ + --hash=sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e \ + --hash=sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313 \ + --hash=sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569 \ + --hash=sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3 \ + --hash=sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d \ + --hash=sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525 \ + --hash=sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894 \ + --hash=sha256:a8bf8d0f749c5757af2142fe7903a9df1d2e8aa3841559b2bad34b08d0e2bcf3 \ + --hash=sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9 \ + --hash=sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a \ + --hash=sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9 \ + --hash=sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14 \ + --hash=sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25 \ + --hash=sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50 \ + --hash=sha256:b7cf1017d601aa35e6bb650b6ad28652c9cd78ee6caff19f3c28d03e1c80acbf \ + --hash=sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1 \ + --hash=sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3 \ + --hash=sha256:c4ef880e27901b6cc782f1b95f82da9313c0eb95c3af699103088fa0ac3ce9ac \ + --hash=sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e \ + --hash=sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815 \ + --hash=sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c \ + --hash=sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6 \ + --hash=sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6 \ + --hash=sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e \ + --hash=sha256:cd4b7ca9984e5e7985c12bc60a6f173f3c958eae74f3ef6624bb6b26e2abbae4 \ + --hash=sha256:ce8a0633f41a967713a59c4139d29110c07e826d131a316b50ce11b1d79b4f84 \ + --hash=sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69 \ + --hash=sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15 \ + --hash=sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191 \ + --hash=sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0 \ + --hash=sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897 \ + --hash=sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd \ + --hash=sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2 \ + --hash=sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794 \ + --hash=sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d \ + --hash=sha256:e912091979546adf63357d7e2ccff9b44f026c075aeaf25a52d0e95ad2281074 \ + --hash=sha256:eaabd426fe94daf8fd157c32e571c85cb12e66692f15516a83a03264b08d06c3 \ + --hash=sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224 \ + --hash=sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838 \ + --hash=sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a \ + --hash=sha256:f155a433c2ec037d4e8df17d18922c3a0d9b3232a396690f17175d2946f0218d \ + --hash=sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d \ + --hash=sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f \ + --hash=sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8 \ + --hash=sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490 \ + --hash=sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966 \ + --hash=sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9 \ + --hash=sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3 \ + --hash=sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e \ + --hash=sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608 + # via requests +click==8.3.1 \ + --hash=sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a \ + --hash=sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6 + # via + # -r requirements.in + # pip-tools +deprecated==1.2.18 \ + --hash=sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d \ + --hash=sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +google-api-core[grpc]==2.26.0 \ + --hash=sha256:2b204bd0da2c81f918e3582c48458e24c11771f987f6258e6e227212af78f3ed \ + --hash=sha256:e6e6d78bd6cf757f4aee41dcc85b07f485fbb069d5daa3afb126defba1e91a62 + # via + # google-cloud-core + # google-cloud-spanner +google-auth==2.41.1 \ + --hash=sha256:754843be95575b9a19c604a848a41be03f7f2afd8c019f716dc1f51ee41c639d \ + --hash=sha256:b76b7b1f9e61f0cb7e88870d14f6a94aeef248959ef6992670efee37709cbfd2 + # via + # google-api-core + # google-cloud-core +google-cloud-core==2.4.3 \ + --hash=sha256:1fab62d7102844b278fe6dead3af32408b1df3eb06f5c7e8634cbd40edc4da53 \ + --hash=sha256:5130f9f4c14b4fafdff75c79448f9495cfade0d8775facf1b09c3bf67e027f6e + # via google-cloud-spanner +google-cloud-spanner==3.58.0 \ + --hash=sha256:00d9a809155d9a92e891a0a2b2568b920016652549864024da30940ac780cc2c \ + --hash=sha256:db1c632ac5d0a1188cfe45b31db416120d3e0b07e885d0443a398c99e9fec542 + # via -r requirements.in +googleapis-common-protos[grpc]==1.71.0 \ + --hash=sha256:1aec01e574e29da63c80ba9f7bbf1ccfaacf1da877f23609fe236ca7c72a2e2e \ + --hash=sha256:59034a1d849dc4d18971997a72ac56246570afdd17f9369a0ff68218d50ab78c + # via + # google-api-core + # grpc-google-iam-v1 + # grpcio-status +greenlet==3.3.2 \ + --hash=sha256:02b0a8682aecd4d3c6c18edf52bc8e51eacdd75c8eac52a790a210b06aa295fd \ + --hash=sha256:18cb1b7337bca281915b3c5d5ae19f4e76d35e1df80f4ad3c1a7be91fadf1082 \ + --hash=sha256:1a9172f5bf6bd88e6ba5a84e0a68afeac9dc7b6b412b245dd64f52d83c81e55b \ + --hash=sha256:1e692b2dae4cc7077cbb11b47d258533b48c8fde69a33d0d8a82e2fe8d8531d5 \ + --hash=sha256:1ebd458fa8285960f382841da585e02201b53a5ec2bac6b156fc623b5ce4499f \ + --hash=sha256:1fb39a11ee2e4d94be9a76671482be9398560955c9e568550de0224e41104727 \ + --hash=sha256:20154044d9085151bc309e7689d6f7ba10027f8f5a8c0676ad398b951913d89e \ + --hash=sha256:2eaf067fc6d886931c7962e8c6bede15d2f01965560f3359b27c80bde2d151f2 \ + --hash=sha256:34308836d8370bddadb41f5a7ce96879b72e2fdfb4e87729330c6ab52376409f \ + --hash=sha256:394ead29063ee3515b4e775216cb756b2e3b4a7e55ae8fd884f17fa579e6b327 \ + --hash=sha256:3ceec72030dae6ac0c8ed7591b96b70410a8be370b6a477b1dbc072856ad02bd \ + --hash=sha256:4375a58e49522698d3e70cc0b801c19433021b5c37686f7ce9c65b0d5c8677d2 \ + --hash=sha256:43e99d1749147ac21dde49b99c9abffcbc1e2d55c67501465ef0930d6e78e070 \ + --hash=sha256:442b6057453c8cb29b4fb36a2ac689382fc71112273726e2423f7f17dc73bf99 \ + --hash=sha256:45abe8eb6339518180d5a7fa47fa01945414d7cca5ecb745346fc6a87d2750be \ + --hash=sha256:4c956a19350e2c37f2c48b336a3afb4bff120b36076d9d7fb68cb44e05d95b79 \ + --hash=sha256:508c7f01f1791fbc8e011bd508f6794cb95397fdb198a46cb6635eb5b78d85a7 \ + --hash=sha256:527fec58dc9f90efd594b9b700662ed3fb2493c2122067ac9c740d98080a620e \ + --hash=sha256:59b3e2c40f6706b05a9cd299c836c6aa2378cabe25d021acd80f13abf81181cf \ + --hash=sha256:5d0e35379f93a6d0222de929a25ab47b5eb35b5ef4721c2b9cbcc4036129ff1f \ + --hash=sha256:63d10328839d1973e5ba35e98cccbca71b232b14051fd957b6f8b6e8e80d0506 \ + --hash=sha256:64970c33a50551c7c50491671265d8954046cb6e8e2999aacdd60e439b70418a \ + --hash=sha256:6c6f8ba97d17a1e7d664151284cb3315fc5f8353e75221ed4324f84eb162b395 \ + --hash=sha256:8b466dff7a4ffda6ca975979bab80bdadde979e29fc947ac3be4451428d8b0e4 \ + --hash=sha256:8c1fdd7d1b309ff0da81d60a9688a8bd044ac4e18b250320a96fc68d31c209ca \ + --hash=sha256:8c4dd0f3997cf2512f7601563cc90dfb8957c0cff1e3a1b23991d4ea1776c492 \ + --hash=sha256:8d1658d7291f9859beed69a776c10822a0a799bc4bfe1bd4272bb60e62507dab \ + --hash=sha256:8e2cd90d413acbf5e77ae41e5d3c9b3ac1d011a756d7284d7f3f2b806bbd6358 \ + --hash=sha256:8e4ab3cfb02993c8cc248ea73d7dae6cec0253e9afa311c9b37e603ca9fad2ce \ + --hash=sha256:94ad81f0fd3c0c0681a018a976e5c2bd2ca2d9d94895f23e7bb1af4e8af4e2d5 \ + --hash=sha256:97245cc10e5515dbc8c3104b2928f7f02b6813002770cfaffaf9a6e0fc2b94ef \ + --hash=sha256:9bc885b89709d901859cf95179ec9f6bb67a3d2bb1f0e88456461bd4b7f8fd0d \ + --hash=sha256:a2a5be83a45ce6188c045bcc44b0ee037d6a518978de9a5d97438548b953a1ac \ + --hash=sha256:a443358b33c4ec7b05b79a7c8b466f5d275025e750298be7340f8fc63dff2a55 \ + --hash=sha256:a7945dd0eab63ded0a48e4dcade82939783c172290a7903ebde9e184333ca124 \ + --hash=sha256:aa6ac98bdfd716a749b84d4034486863fd81c3abde9aa3cf8eff9127981a4ae4 \ + --hash=sha256:ab0c7e7901a00bc0a7284907273dc165b32e0d109a6713babd04471327ff7986 \ + --hash=sha256:ac8d61d4343b799d1e526db579833d72f23759c71e07181c2d2944e429eb09cd \ + --hash=sha256:ad0c8917dd42a819fe77e6bdfcb84e3379c0de956469301d9fd36427a1ca501f \ + --hash=sha256:ae9e21c84035c490506c17002f5c8ab25f980205c3e61ddb3a2a2a2e6c411fcb \ + --hash=sha256:b26b0f4428b871a751968285a1ac9648944cea09807177ac639b030bddebcea4 \ + --hash=sha256:b568183cf65b94919be4438dc28416b234b678c608cafac8874dfeeb2a9bbe13 \ + --hash=sha256:b6997d360a4e6a4e936c0f9625b1c20416b8a0ea18a8e19cabbefc712e7397ab \ + --hash=sha256:b8bddc5b73c9720bea487b3bffdb1840fe4e3656fba3bd40aa1489e9f37877ff \ + --hash=sha256:c04c5e06ec3e022cbfe2cd4a846e1d4e50087444f875ff6d2c2ad8445495cf1a \ + --hash=sha256:c2e47408e8ce1c6f1ceea0dffcdf6ebb85cc09e55c7af407c99f1112016e45e9 \ + --hash=sha256:c56692189a7d1c7606cb794be0a8381470d95c57ce5be03fb3d0ef57c7853b86 \ + --hash=sha256:ccd21bb86944ca9be6d967cf7691e658e43417782bce90b5d2faeda0ff78a7dd \ + --hash=sha256:cd6f9e2bbd46321ba3bbb4c8a15794d32960e3b0ae2cc4d49a1a53d314805d71 \ + --hash=sha256:d248d8c23c67d2291ffd47af766e2a3aa9fa1c6703155c099feb11f526c63a92 \ + --hash=sha256:d3a62fa76a32b462a97198e4c9e99afb9ab375115e74e9a83ce180e7a496f643 \ + --hash=sha256:e26e72bec7ab387ac80caa7496e0f908ff954f31065b0ffc1f8ecb1338b11b54 \ + --hash=sha256:e3cb43ce200f59483eb82949bf1835a99cf43d7571e900d7c8d5c62cdf25d2f9 + # via sqlalchemy +grpc-google-iam-v1==0.14.3 \ + --hash=sha256:7a7f697e017a067206a3dfef44e4c634a34d3dee135fe7d7a4613fe3e59217e6 \ + --hash=sha256:879ac4ef33136c5491a6300e27575a9ec760f6cdf9a2518798c1b8977a5dc389 + # via google-cloud-spanner +grpc-interceptor==0.15.4 \ + --hash=sha256:0035f33228693ed3767ee49d937bac424318db173fef4d2d0170b3215f254d9d \ + --hash=sha256:1f45c0bcb58b6f332f37c637632247c9b02bc6af0fdceb7ba7ce8d2ebbfb0926 + # via google-cloud-spanner +grpcio==1.75.1 \ + --hash=sha256:0049a7bf547dafaeeb1db17079ce79596c298bfe308fc084d023c8907a845b9a \ + --hash=sha256:030a6164bc2ca726052778c0cf8e3249617a34e368354f9e6107c27ad4af8c28 \ + --hash=sha256:06373a94fd16ec287116a825161dca179a0402d0c60674ceeec8c9fba344fe66 \ + --hash=sha256:07a554fa31c668cf0e7a188678ceeca3cb8fead29bbe455352e712ec33ca701c \ + --hash=sha256:0ee119f4f88d9f75414217823d21d75bfe0e6ed40135b0cbbfc6376bc9f7757d \ + --hash=sha256:1712b5890b22547dd29f3215c5788d8fc759ce6dd0b85a6ba6e2731f2d04c088 \ + --hash=sha256:259526a7159d39e2db40d566fe3e8f8e034d0fb2db5bf9c00e09aace655a4c2b \ + --hash=sha256:2720c239c1180eee69f7883c1d4c83fc1a495a2535b5fa322887c70bf02b16e8 \ + --hash=sha256:3652516048bf4c314ce12be37423c79829f46efffb390ad64149a10c6071e8de \ + --hash=sha256:36990d629c3c9fb41e546414e5af52d0a7af37ce7113d9682c46d7e2919e4cca \ + --hash=sha256:3bed22e750d91d53d9e31e0af35a7b0b51367e974e14a4ff229db5b207647884 \ + --hash=sha256:3d86880ecaeb5b2f0a8afa63824de93adb8ebe4e49d0e51442532f4e08add7d6 \ + --hash=sha256:3e71a2105210366bfc398eef7f57a664df99194f3520edb88b9c3a7e46ee0d64 \ + --hash=sha256:3e81d89ece99b9ace23a6916880baca613c03a799925afb2857887efa8b1b3d2 \ + --hash=sha256:4484f4b7287bdaa7a5b3980f3c7224c3c622669405d20f69549f5fb956ad0421 \ + --hash=sha256:44b62345d8403975513af88da2f3d5cc76f73ca538ba46596f92a127c2aea945 \ + --hash=sha256:491444c081a54dcd5e6ada57314321ae526377f498d4aa09d975c3241c5b9e1c \ + --hash=sha256:4b4c678e7ed50f8ae8b8dbad15a865ee73ce12668b6aaf411bf3258b5bc3f970 \ + --hash=sha256:4b7177a1cdb3c51b02b0c0a256b0a72fdab719600a693e0e9037949efffb200b \ + --hash=sha256:4e1c28f51c1cf67eccdfc1065e8e866c9ed622f09773ca60947089c117f848a1 \ + --hash=sha256:52015cf73eb5d76f6404e0ce0505a69b51fd1f35810b3a01233b34b10baafb41 \ + --hash=sha256:5573f51e3f296a1bcf71e7a690c092845fb223072120f4bdb7a5b48e111def66 \ + --hash=sha256:573855ca2e58e35032aff30bfbd1ee103fbcf4472e4b28d4010757700918e326 \ + --hash=sha256:5a2acda37fc926ccc4547977ac3e56b1df48fe200de968e8c8421f6e3093df6c \ + --hash=sha256:5b8ea230c7f77c0a1a3208a04a1eda164633fb0767b4cefd65a01079b65e5b1f \ + --hash=sha256:5b8f381eadcd6ecaa143a21e9e80a26424c76a0a9b3d546febe6648f3a36a5ac \ + --hash=sha256:5bf4001d3293e3414d0cf99ff9b1139106e57c3a66dfff0c5f60b2a6286ec133 \ + --hash=sha256:5cebe13088b9254f6e615bcf1da9131d46cfa4e88039454aca9cb65f639bd3bc \ + --hash=sha256:61c692fb05956b17dd6d1ab480f7f10ad0536dba3bc8fd4e3c7263dc244ed772 \ + --hash=sha256:62ce42d9994446b307649cb2a23335fa8e927f7ab2cbf5fcb844d6acb4d85f9c \ + --hash=sha256:664eecc3abe6d916fa6cf8dd6b778e62fb264a70f3430a3180995bf2da935446 \ + --hash=sha256:67697efef5a98d46d5db7b1720fa4043536f8b8e5072a5d61cfca762f287e939 \ + --hash=sha256:683cfc70be0c1383449097cba637317e4737a357cfc185d887fd984206380403 \ + --hash=sha256:6a4996a2c8accc37976dc142d5991adf60733e223e5c9a2219e157dc6a8fd3a2 \ + --hash=sha256:73577a93e692b3474b1bfe84285d098de36705dbd838bb4d6a056d326e4dc880 \ + --hash=sha256:745c5fe6bf05df6a04bf2d11552c7d867a2690759e7ab6b05c318a772739bd75 \ + --hash=sha256:7b888b33cd14085d86176b1628ad2fcbff94cfbbe7809465097aa0132e58b018 \ + --hash=sha256:7d4fa6ccc3ec2e68a04f7b883d354d7fea22a34c44ce535a2f0c0049cf626ddf \ + --hash=sha256:7e21400b037be29545704889e72e586c238e346dcb2d08d8a7288d16c883a9ec \ + --hash=sha256:8679aa8a5b67976776d3c6b0521e99d1c34db8a312a12bcfd78a7085cb9b604e \ + --hash=sha256:8775036efe4ad2085975531d221535329f5dac99b6c2a854a995456098f99546 \ + --hash=sha256:8d04e101bba4b55cea9954e4aa71c24153ba6182481b487ff376da28d4ba46cf \ + --hash=sha256:9f82ff474103e26351dacfe8d50214e7c9322960d8d07ba7fa1d05ff981c8b2d \ + --hash=sha256:9fe51e4a1f896ea84ac750900eae34d9e9b896b5b1e4a30b02dc31ad29f36383 \ + --hash=sha256:a8041d2f9e8a742aeae96f4b047ee44e73619f4f9d24565e84d5446c623673b6 \ + --hash=sha256:aad1c774f4ebf0696a7f148a56d39a3432550612597331792528895258966dc0 \ + --hash=sha256:b10ad908118d38c2453ade7ff790e5bce36580c3742919007a2a78e3a1e521ca \ + --hash=sha256:b1e191c5c465fa777d4cafbaacf0c01e0d5278022082c0abbd2ee1d6454ed94d \ + --hash=sha256:b1ea1bbe77ecbc1be00af2769f4ae4a88ce93be57a4f3eebd91087898ed749f9 \ + --hash=sha256:bb658f703468d7fbb5dcc4037c65391b7dc34f808ac46ed9136c24fc5eeb041d \ + --hash=sha256:c05da79068dd96723793bffc8d0e64c45f316248417515f28d22204d9dae51c7 \ + --hash=sha256:c09fba33327c3ac11b5c33dbdd8218eef8990d78f83b1656d628831812a8c0fb \ + --hash=sha256:c12121e509b9f8b0914d10054d24120237d19e870b1cd82acbb8a9b9ddd198a3 \ + --hash=sha256:c32193fa08b2fbebf08fe08e84f8a0aad32d87c3ad42999c65e9449871b1c66e \ + --hash=sha256:ce08d4e112d0d38487c2b631ec8723deac9bc404e9c7b1011426af50a79999e4 \ + --hash=sha256:cf2e760978dcce7ff7d465cbc7e276c3157eedc4c27aa6de7b594c7a295d3d61 \ + --hash=sha256:d6be2b5ee7bea656c954dcf6aa8093c6f0e6a3ef9945c99d99fcbfc88c5c0bfe \ + --hash=sha256:e19e7dfa0d7ca7dea22be464339e18ac608fd75d88c56770c646cdabe54bc724 \ + --hash=sha256:e5b425aee54cc5e3e3c58f00731e8a33f5567965d478d516d35ef99fd648ab68 \ + --hash=sha256:f4b29b9aabe33fed5df0a85e5f13b09ff25e2c05bd5946d25270a8bd5682dac9 \ + --hash=sha256:f86e92275710bea3000cb79feca1762dc0ad3b27830dd1a74e82ab321d4ee464 + # via + # google-api-core + # googleapis-common-protos + # grpc-google-iam-v1 + # grpc-interceptor + # grpcio-status +grpcio-status==1.75.1 \ + --hash=sha256:8162afa21833a2085c91089cc395ad880fac1378a1d60233d976649ed724cbf8 \ + --hash=sha256:f681b301be26dcf7abf5c765d4a22e4098765e1a65cbdfa3efca384edf8e4e3c + # via google-api-core +idna==3.11 \ + --hash=sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea \ + --hash=sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902 + # via requests +importlib-metadata==8.7.1 \ + --hash=sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb \ + --hash=sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151 + # via opentelemetry-api +mako==1.3.10 \ + --hash=sha256:99579a6f39583fa7e5630a28c3c1f440e4e97a414b80372649c0ce338da2ea28 \ + --hash=sha256:baef24a52fc4fc514a0887ac600f9f1cff3d82c61d4d700a1fa84d597b88db59 + # via alembic +MarkupSafe==3.0.3 \ + --hash=sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f \ + --hash=sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a \ + --hash=sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf \ + --hash=sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19 \ + --hash=sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf \ + --hash=sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c \ + --hash=sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175 \ + --hash=sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219 \ + --hash=sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb \ + --hash=sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6 \ + --hash=sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab \ + --hash=sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26 \ + --hash=sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1 \ + --hash=sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce \ + --hash=sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218 \ + --hash=sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634 \ + --hash=sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695 \ + --hash=sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad \ + --hash=sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73 \ + --hash=sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c \ + --hash=sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe \ + --hash=sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa \ + --hash=sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559 \ + --hash=sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa \ + --hash=sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37 \ + --hash=sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758 \ + --hash=sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f \ + --hash=sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8 \ + --hash=sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d \ + --hash=sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c \ + --hash=sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97 \ + --hash=sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a \ + --hash=sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19 \ + --hash=sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9 \ + --hash=sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9 \ + --hash=sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc \ + --hash=sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2 \ + --hash=sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4 \ + --hash=sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354 \ + --hash=sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50 \ + --hash=sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698 \ + --hash=sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9 \ + --hash=sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b \ + --hash=sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc \ + --hash=sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115 \ + --hash=sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e \ + --hash=sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485 \ + --hash=sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f \ + --hash=sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12 \ + --hash=sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025 \ + --hash=sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009 \ + --hash=sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d \ + --hash=sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b \ + --hash=sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a \ + --hash=sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5 \ + --hash=sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f \ + --hash=sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d \ + --hash=sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1 \ + --hash=sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287 \ + --hash=sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6 \ + --hash=sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f \ + --hash=sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581 \ + --hash=sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed \ + --hash=sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b \ + --hash=sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c \ + --hash=sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026 \ + --hash=sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8 \ + --hash=sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676 \ + --hash=sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6 \ + --hash=sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e \ + --hash=sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d \ + --hash=sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d \ + --hash=sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01 \ + --hash=sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7 \ + --hash=sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419 \ + --hash=sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795 \ + --hash=sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1 \ + --hash=sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5 \ + --hash=sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d \ + --hash=sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42 \ + --hash=sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe \ + --hash=sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda \ + --hash=sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e \ + --hash=sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737 \ + --hash=sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523 \ + --hash=sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591 \ + --hash=sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc \ + --hash=sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a \ + --hash=sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50 + # via mako +opentelemetry-api==1.40.0 \ + --hash=sha256:159be641c0b04d11e9ecd576906462773eb97ae1b657730f0ecf64d32071569f \ + --hash=sha256:82dd69331ae74b06f6a874704be0cfaa49a1650e1537d4a813b86ecef7d0ecf9 + # via + # -r requirements.in + # opentelemetry-instrumentation + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-instrumentation==0.48b0 \ + --hash=sha256:94929685d906380743a71c3970f76b5f07476eea1834abd5dd9d17abfe23cc35 \ + --hash=sha256:a69750dc4ba6a5c3eb67986a337185a25b739966d80479befe37b546fc870b44 + # via -r requirements.in +opentelemetry-sdk==1.40.0 \ + --hash=sha256:18e9f5ec20d859d268c7cb3c5198c8d105d073714db3de50b593b8c1345a48f2 \ + --hash=sha256:787d2154a71f4b3d81f20524a8ce061b7db667d24e46753f32a7bc48f1c1f3f1 + # via -r requirements.in +opentelemetry-semantic-conventions==0.55b1 \ + --hash=sha256:5da81dfdf7d52e3d37f8fe88d5e771e191de924cfff5f550ab0b8f7b2409baed \ + --hash=sha256:ef95b1f009159c28d7a7849f5cbc71c4c34c845bb514d66adfdf1b3fff3598b3 + # via opentelemetry-sdk +packaging==26.0 \ + --hash=sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4 \ + --hash=sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529 + # via + # -r requirements.in + # build +pep517==0.13.1 \ + --hash=sha256:1b2fa2ffd3938bb4beffe5d6146cbcb2bda996a5a4da9f31abffd8b24e07b317 \ + --hash=sha256:31b206f67165b3536dd577c5c3f1518e8fbaf38cbc57efff8369a392feff1721 + # via -r requirements.in +pip-tools==7.5.3 \ + --hash=sha256:3aac0c473240ae90db7213c033401f345b05197293ccbdd2704e52e7a783785e \ + --hash=sha256:8fa364779ebc010cbfe17cb9de404457ac733e100840423f28f6955de7742d41 + # via -r requirements.in +proto-plus==1.27.1 \ + --hash=sha256:912a7460446625b792f6448bade9e55cd4e41e6ac10e27009ef71a7f317fa147 \ + --hash=sha256:e4643061f3a4d0de092d62aa4ad09fa4756b2cbb89d4627f3985018216f9fefc + # via + # google-api-core + # google-cloud-spanner +protobuf==7.34.0 \ + --hash=sha256:3871a3df67c710aaf7bb8d214cc997342e63ceebd940c8c7fc65c9b3d697591a \ + --hash=sha256:4a72a8ec94e7a9f7ef7fe818ed26d073305f347f8b3b5ba31e22f81fd85fca02 \ + --hash=sha256:8e329966799f2c271d5e05e236459fe1cbfdb8755aaa3b0914fa60947ddea408 \ + --hash=sha256:964cf977e07f479c0697964e83deda72bcbc75c3badab506fb061b352d991b01 \ + --hash=sha256:9d7a5005fb96f3c1e64f397f91500b0eb371b28da81296ae73a6b08a5b76cdd6 \ + --hash=sha256:9f9079f1dde4e32342ecbd1c118d76367090d4aaa19da78230c38101c5b3dd40 \ + --hash=sha256:e3b914dd77fa33fa06ab2baa97937746ab25695f389869afdf03e81f34e45dc7 \ + --hash=sha256:f791ec509707a1d91bd02e07df157e75e4fb9fbdad12a81b7396201ec244e2e3 + # via + # google-api-core + # google-cloud-spanner + # googleapis-common-protos + # grpc-google-iam-v1 + # grpcio-status + # proto-plus +pyasn1==0.6.1 \ + --hash=sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629 \ + --hash=sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.2 \ + --hash=sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a \ + --hash=sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6 + # via google-auth +pyparsing==3.3.2 \ + --hash=sha256:850ba148bd908d7e2411587e247a1e4f0327839c40e2e5e6d05a007ecc69911d \ + --hash=sha256:c777f4d763f140633dcb6d8a3eda953bf7a214dc4eff598413c070bcdc117cbc + # via -r requirements.in +pyproject-hooks==1.2.0 \ + --hash=sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8 \ + --hash=sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913 + # via + # build + # pip-tools +requests==2.32.5 \ + --hash=sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6 \ + --hash=sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf + # via google-api-core +rsa==4.9.1 \ + --hash=sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762 \ + --hash=sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75 + # via google-auth +SQLAlchemy==2.0.48 \ + --hash=sha256:01f6bbd4308b23240cf7d3ef117557c8fd097ec9549d5d8a52977544e35b40ad \ + --hash=sha256:07edba08061bc277bfdc772dd2a1a43978f5a45994dd3ede26391b405c15221e \ + --hash=sha256:10853a53a4a00417a00913d270dddda75815fcb80675874285f41051c094d7dd \ + --hash=sha256:1182437cb2d97988cfea04cf6cdc0b0bb9c74f4d56ec3d08b81e23d621a28cc6 \ + --hash=sha256:144921da96c08feb9e2b052c5c5c1d0d151a292c6135623c6b2c041f2a45f9e0 \ + --hash=sha256:1a89ce07ad2d4b8cfc30bd5889ec40613e028ed80ef47da7d9dd2ce969ad30e0 \ + --hash=sha256:1b4c575df7368b3b13e0cebf01d4679f9a28ed2ae6c1cd0b1d5beffb6b2007dc \ + --hash=sha256:1ccd42229aaac2df431562117ac7e667d702e8e44afdb6cf0e50fa3f18160f0b \ + --hash=sha256:2645b7d8a738763b664a12a1542c89c940daa55196e8d73e55b169cc5c99f65f \ + --hash=sha256:288937433bd44e3990e7da2402fabc44a3c6c25d3704da066b85b89a85474ae0 \ + --hash=sha256:34634e196f620c7a61d18d5cf7dc841ca6daa7961aed75d532b7e58b309ac894 \ + --hash=sha256:348174f228b99f33ca1f773e85510e08927620caa59ffe7803b37170df30332b \ + --hash=sha256:36ac4ddc3d33e852da9cb00ffb08cea62ca05c39711dc67062ca2bb1fae35fd8 \ + --hash=sha256:3713e21ea67bca727eecd4a24bf68bcd414c403faae4989442be60994301ded0 \ + --hash=sha256:389b984139278f97757ea9b08993e7b9d1142912e046ab7d82b3fbaeb0209131 \ + --hash=sha256:426c5ca86415d9b8945c7073597e10de9644802e2ff502b8e1f11a7a2642856b \ + --hash=sha256:4599a95f9430ae0de82b52ff0d27304fe898c17cb5f4099f7438a51b9998ac77 \ + --hash=sha256:49b7bddc1eebf011ea5ab722fdbe67a401caa34a350d278cc7733c0e88fecb1f \ + --hash=sha256:53667b5f668991e279d21f94ccfa6e45b4e3f4500e7591ae59a8012d0f010dcb \ + --hash=sha256:546572a1793cc35857a2ffa1fe0e58571af1779bcc1ffa7c9fb0839885ed69a9 \ + --hash=sha256:583849c743e0e3c9bb7446f5b5addeacedc168d657a69b418063dfdb2d90081c \ + --hash=sha256:5aee45fd2c6c0f2b9cdddf48c48535e7471e42d6fb81adfde801da0bd5b93241 \ + --hash=sha256:5b193a7e29fd9fa56e502920dca47dffe60f97c863494946bd698c6058a55658 \ + --hash=sha256:5ca74f37f3369b45e1f6b7b06afb182af1fd5dde009e4ffd831830d98cbe5fe7 \ + --hash=sha256:68549c403f79a8e25984376480959975212a670405e3913830614432b5daa07a \ + --hash=sha256:69f5bc24904d3bc3640961cddd2523e361257ef68585d6e364166dfbe8c78fae \ + --hash=sha256:6bb85c546591569558571aa1b06aba711b26ae62f111e15e56136d69920e1616 \ + --hash=sha256:6f7b7243850edd0b8b97043f04748f31de50cf426e939def5c16bedb540698f7 \ + --hash=sha256:7001dc9d5f6bb4deb756d5928eaefe1930f6f4179da3924cbd95ee0e9f4dce89 \ + --hash=sha256:7a936f1bb23d370b7c8cc079d5fce4c7d18da87a33c6744e51a93b0f9e97e9b3 \ + --hash=sha256:7c998f2ace8bf76b453b75dbcca500d4f4b9dd3908c13e89b86289b37784848b \ + --hash=sha256:7cddca31edf8b0653090cbb54562ca027c421c58ddde2c0685f49ff56a1690e0 \ + --hash=sha256:8183dc57ae7d9edc1346e007e840a9f3d6aa7b7f165203a99e16f447150140d2 \ + --hash=sha256:82745b03b4043e04600a6b665cb98697c4339b24e34d74b0a2ac0a2488b6f94d \ + --hash=sha256:841a94c66577661c1f088ac958cd767d7c9bf507698f45afffe7a4017049de76 \ + --hash=sha256:858e433f12b0e5b3ed2f8da917433b634f4937d0e8793e5cb33c54a1a01df565 \ + --hash=sha256:908a3fa6908716f803b86896a09a2c4dde5f5ce2bb07aacc71ffebb57986ce99 \ + --hash=sha256:9764014ef5e58aab76220c5664abb5d47d5bc858d9debf821e55cfdd0f128485 \ + --hash=sha256:9c7d0a77e36b5f4b01ca398482230ab792061d243d715299b44a0b55c89fe617 \ + --hash=sha256:a5b429eb84339f9f05e06083f119ad814e6d85e27ecbdf9c551dfdbb128eaf8a \ + --hash=sha256:a66fe406437dd65cacd96a72689a3aaaecaebbcd62d81c5ac1c0fdbeac835096 \ + --hash=sha256:a6b764fb312bd35e47797ad2e63f0d323792837a6ac785a4ca967019357d2bc7 \ + --hash=sha256:b19151e76620a412c2ac1c6f977ab1b9fa7ad43140178345136456d5265b32ed \ + --hash=sha256:b8438ec5594980d405251451c5b7ea9aa58dda38eb7ac35fb7e4c696712ee24f \ + --hash=sha256:b8fc3454b4f3bd0a368001d0e968852dad45a873f8b4babd41bc302ec851a099 \ + --hash=sha256:bcb8ebbf2e2c36cfe01a94f2438012c6a9d494cf80f129d9753bcdf33bfc35a6 \ + --hash=sha256:d404dc897ce10e565d647795861762aa2d06ca3f4a728c5e9a835096c7059018 \ + --hash=sha256:d612c976cbc2d17edfcc4c006874b764e85e990c29ce9bd411f926bbfb02b9a2 \ + --hash=sha256:d64177f443594c8697369c10e4bbcac70ef558e0f7921a1de7e4a3d1734bcf67 \ + --hash=sha256:d854b3970067297f3a7fbd7a4683587134aa9b3877ee15aa29eea478dc68f933 \ + --hash=sha256:d8fcccbbc0c13c13702c471da398b8cd72ba740dca5859f148ae8e0e8e0d3e7e \ + --hash=sha256:e004aa9248e8cb0a5f9b96d003ca7c1c0a5da8decd1066e7b53f59eb8ce7c62b \ + --hash=sha256:e214d546c8ecb5fc22d6e6011746082abf13a9cf46eefb45769c7b31407c97b5 \ + --hash=sha256:e2d0d88686e3d35a76f3e15a34e8c12d73fc94c1dea1cd55782e695cc14086dd \ + --hash=sha256:e2f35b4cccd9ed286ad62e0a3c3ac21e06c02abc60e20aa51a3e305a30f5fa79 \ + --hash=sha256:e3070c03701037aa418b55d36532ecb8f8446ed0135acb71c678dbdf12f5b6e4 \ + --hash=sha256:e5e088bf43f6ee6fec7dbf1ef7ff7774a616c236b5c0cb3e00662dd71a56b571 \ + --hash=sha256:e83e3f959aaa1c9df95c22c528096d94848a1bc819f5d0ebf7ee3df0ca63db6c \ + --hash=sha256:f0dcbc588cd5b725162c076eb9119342f6579c7f7f55057bb7e3c6ff27e13121 \ + --hash=sha256:f27f9da0a7d22b9f981108fd4b62f8b5743423388915a563e651c20d06c1f457 \ + --hash=sha256:f8649a14caa5f8a243628b1d61cf530ad9ae4578814ba726816adb1121fc493e \ + --hash=sha256:fac0fa4e4f55f118fd87177dacb1c6522fe39c28d498d259014020fec9164c29 \ + --hash=sha256:fd08b90d211c086181caed76931ecfa2bdfc83eea3cfccdb0f82abc6c4b876cb + # via + # -r requirements.in + # alembic +sqlparse==0.5.3 \ + --hash=sha256:09f67787f56a0b16ecdbde1bfc7f5d9c3371ca683cfeaa8e6ff60b4807ec9272 \ + --hash=sha256:cf2196ed3418f3ba5de6af7e82c694a9fbdbfecccdfc72e281548517081f16ca + # via google-cloud-spanner +tomli==2.4.0 \ + --hash=sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729 \ + --hash=sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b \ + --hash=sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d \ + --hash=sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df \ + --hash=sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576 \ + --hash=sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d \ + --hash=sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1 \ + --hash=sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a \ + --hash=sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e \ + --hash=sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc \ + --hash=sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702 \ + --hash=sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6 \ + --hash=sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd \ + --hash=sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4 \ + --hash=sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776 \ + --hash=sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a \ + --hash=sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66 \ + --hash=sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87 \ + --hash=sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2 \ + --hash=sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f \ + --hash=sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475 \ + --hash=sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f \ + --hash=sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95 \ + --hash=sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9 \ + --hash=sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3 \ + --hash=sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9 \ + --hash=sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76 \ + --hash=sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da \ + --hash=sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8 \ + --hash=sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51 \ + --hash=sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86 \ + --hash=sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8 \ + --hash=sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0 \ + --hash=sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b \ + --hash=sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1 \ + --hash=sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e \ + --hash=sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d \ + --hash=sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c \ + --hash=sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867 \ + --hash=sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a \ + --hash=sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c \ + --hash=sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0 \ + --hash=sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4 \ + --hash=sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614 \ + --hash=sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132 \ + --hash=sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa \ + --hash=sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087 + # via -r requirements.in +typing-extensions==4.15.0 \ + --hash=sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466 \ + --hash=sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548 + # via + # alembic + # opentelemetry-api + # opentelemetry-sdk + # opentelemetry-semantic-conventions + # sqlalchemy +urllib3==2.5.0 \ + --hash=sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760 \ + --hash=sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc + # via requests +wheel==0.45.1 \ + --hash=sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729 \ + --hash=sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248 + # via pip-tools +wrapt==1.17.3 \ + --hash=sha256:02b551d101f31694fc785e58e0720ef7d9a10c4e62c1c9358ce6f63f23e30a56 \ + --hash=sha256:042ec3bb8f319c147b1301f2393bc19dba6e176b7da446853406d041c36c7828 \ + --hash=sha256:0610b46293c59a3adbae3dee552b648b984176f8562ee0dba099a56cfbe4df1f \ + --hash=sha256:0b02e424deef65c9f7326d8c19220a2c9040c51dc165cddb732f16198c168396 \ + --hash=sha256:0b1831115c97f0663cb77aa27d381237e73ad4f721391a9bfb2fe8bc25fa6e77 \ + --hash=sha256:0ed61b7c2d49cee3c027372df5809a59d60cf1b6c2f81ee980a091f3afed6a2d \ + --hash=sha256:0f5f51a6466667a5a356e6381d362d259125b57f059103dd9fdc8c0cf1d14139 \ + --hash=sha256:16ecf15d6af39246fe33e507105d67e4b81d8f8d2c6598ff7e3ca1b8a37213f7 \ + --hash=sha256:1f0b2f40cf341ee8cc1a97d51ff50dddb9fcc73241b9143ec74b30fc4f44f6cb \ + --hash=sha256:1f23fa283f51c890eda8e34e4937079114c74b4c81d2b2f1f1d94948f5cc3d7f \ + --hash=sha256:223db574bb38637e8230eb14b185565023ab624474df94d2af18f1cdb625216f \ + --hash=sha256:249f88ed15503f6492a71f01442abddd73856a0032ae860de6d75ca62eed8067 \ + --hash=sha256:24c2ed34dc222ed754247a2702b1e1e89fdbaa4016f324b4b8f1a802d4ffe87f \ + --hash=sha256:273a736c4645e63ac582c60a56b0acb529ef07f78e08dc6bfadf6a46b19c0da7 \ + --hash=sha256:281262213373b6d5e4bb4353bc36d1ba4084e6d6b5d242863721ef2bf2c2930b \ + --hash=sha256:30ce38e66630599e1193798285706903110d4f057aab3168a34b7fdc85569afc \ + --hash=sha256:33486899acd2d7d3066156b03465b949da3fd41a5da6e394ec49d271baefcf05 \ + --hash=sha256:343e44b2a8e60e06a7e0d29c1671a0d9951f59174f3709962b5143f60a2a98bd \ + --hash=sha256:373342dd05b1d07d752cecbec0c41817231f29f3a89aa8b8843f7b95992ed0c7 \ + --hash=sha256:3af60380ba0b7b5aeb329bc4e402acd25bd877e98b3727b0135cb5c2efdaefe9 \ + --hash=sha256:3e62d15d3cfa26e3d0788094de7b64efa75f3a53875cdbccdf78547aed547a81 \ + --hash=sha256:41b1d2bc74c2cac6f9074df52b2efbef2b30bdfe5f40cb78f8ca22963bc62977 \ + --hash=sha256:423ed5420ad5f5529db9ce89eac09c8a2f97da18eb1c870237e84c5a5c2d60aa \ + --hash=sha256:46acc57b331e0b3bcb3e1ca3b421d65637915cfcd65eb783cb2f78a511193f9b \ + --hash=sha256:4da9f45279fff3543c371d5ababc57a0384f70be244de7759c85a7f989cb4ebe \ + --hash=sha256:507553480670cab08a800b9463bdb881b2edeed77dc677b0a5915e6106e91a58 \ + --hash=sha256:53e5e39ff71b3fc484df8a522c933ea2b7cdd0d5d15ae82e5b23fde87d44cbd8 \ + --hash=sha256:54a30837587c6ee3cd1a4d1c2ec5d24e77984d44e2f34547e2323ddb4e22eb77 \ + --hash=sha256:5531d911795e3f935a9c23eb1c8c03c211661a5060aab167065896bbf62a5f85 \ + --hash=sha256:55cbbc356c2842f39bcc553cf695932e8b30e30e797f961860afb308e6b1bb7c \ + --hash=sha256:59923aa12d0157f6b82d686c3fd8e1166fa8cdfb3e17b42ce3b6147ff81528df \ + --hash=sha256:5a03a38adec8066d5a37bea22f2ba6bbf39fcdefbe2d91419ab864c3fb515454 \ + --hash=sha256:5a7b3c1ee8265eb4c8f1b7d29943f195c00673f5ab60c192eba2d4a7eae5f46a \ + --hash=sha256:5d4478d72eb61c36e5b446e375bbc49ed002430d17cdec3cecb36993398e1a9e \ + --hash=sha256:5ea5eb3c0c071862997d6f3e02af1d055f381b1d25b286b9d6644b79db77657c \ + --hash=sha256:604d076c55e2fdd4c1c03d06dc1a31b95130010517b5019db15365ec4a405fc6 \ + --hash=sha256:656873859b3b50eeebe6db8b1455e99d90c26ab058db8e427046dbc35c3140a5 \ + --hash=sha256:65d1d00fbfb3ea5f20add88bbc0f815150dbbde3b026e6c24759466c8b5a9ef9 \ + --hash=sha256:6b538e31eca1a7ea4605e44f81a48aa24c4632a277431a6ed3f328835901f4fd \ + --hash=sha256:6fd1ad24dc235e4ab88cda009e19bf347aabb975e44fd5c2fb22a3f6e4141277 \ + --hash=sha256:70d86fa5197b8947a2fa70260b48e400bf2ccacdcab97bb7de47e3d1e6312225 \ + --hash=sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22 \ + --hash=sha256:73d496de46cd2cdbdbcce4ae4bcdb4afb6a11234a1df9c085249d55166b95116 \ + --hash=sha256:7425ac3c54430f5fc5e7b6f41d41e704db073309acfc09305816bc6a0b26bb16 \ + --hash=sha256:74afa28374a3c3a11b3b5e5fca0ae03bef8450d6aa3ab3a1e2c30e3a75d023dc \ + --hash=sha256:758895b01d546812d1f42204bd443b8c433c44d090248bf22689df673ccafe00 \ + --hash=sha256:79573c24a46ce11aab457b472efd8d125e5a51da2d1d24387666cd85f54c05b2 \ + --hash=sha256:7e18f01b0c3e4a07fe6dfdb00e29049ba17eadbc5e7609a2a3a4af83ab7d710a \ + --hash=sha256:88547535b787a6c9ce4086917b6e1d291aa8ed914fdd3a838b3539dc95c12804 \ + --hash=sha256:88bbae4d40d5a46142e70d58bf664a89b6b4befaea7b2ecc14e03cedb8e06c04 \ + --hash=sha256:8cccf4f81371f257440c88faed6b74f1053eef90807b77e31ca057b2db74edb1 \ + --hash=sha256:9baa544e6acc91130e926e8c802a17f3b16fbea0fd441b5a60f5cf2cc5c3deba \ + --hash=sha256:a36692b8491d30a8c75f1dfee65bef119d6f39ea84ee04d9f9311f83c5ad9390 \ + --hash=sha256:a47681378a0439215912ef542c45a783484d4dd82bac412b71e59cf9c0e1cea0 \ + --hash=sha256:a7c06742645f914f26c7f1fa47b8bc4c91d222f76ee20116c43d5ef0912bba2d \ + --hash=sha256:a9a2203361a6e6404f80b99234fe7fb37d1fc73487b5a78dc1aa5b97201e0f22 \ + --hash=sha256:ab232e7fdb44cdfbf55fc3afa31bcdb0d8980b9b95c38b6405df2acb672af0e0 \ + --hash=sha256:ad85e269fe54d506b240d2d7b9f5f2057c2aa9a2ea5b32c66f8902f768117ed2 \ + --hash=sha256:af338aa93554be859173c39c85243970dc6a289fa907402289eeae7543e1ae18 \ + --hash=sha256:afd964fd43b10c12213574db492cb8f73b2f0826c8df07a68288f8f19af2ebe6 \ + --hash=sha256:b32888aad8b6e68f83a8fdccbf3165f5469702a7544472bdf41f582970ed3311 \ + --hash=sha256:c31eebe420a9a5d2887b13000b043ff6ca27c452a9a22fa71f35f118e8d4bf89 \ + --hash=sha256:caea3e9c79d5f0d2c6d9ab96111601797ea5da8e6d0723f77eabb0d4068d2b2f \ + --hash=sha256:cf30f6e3c077c8e6a9a7809c94551203c8843e74ba0c960f4a98cd80d4665d39 \ + --hash=sha256:d40770d7c0fd5cbed9d84b2c3f2e156431a12c9a37dc6284060fb4bec0b7ffd4 \ + --hash=sha256:d8a210b158a34164de8bb68b0e7780041a903d7b00c87e906fb69928bf7890d5 \ + --hash=sha256:dc4a8d2b25efb6681ecacad42fca8859f88092d8732b170de6a5dddd80a1c8fa \ + --hash=sha256:df7d30371a2accfe4013e90445f6388c570f103d61019b6b7c57e0265250072a \ + --hash=sha256:e01375f275f010fcbf7f643b4279896d04e571889b8a5b3f848423d91bf07050 \ + --hash=sha256:e1a4120ae5705f673727d3253de3ed0e016f7cd78dc463db1b31e2463e1f3cf6 \ + --hash=sha256:e228514a06843cae89621384cfe3a80418f3c04aadf8a3b14e46a7be704e4235 \ + --hash=sha256:e405adefb53a435f01efa7ccdec012c016b5a1d3f35459990afc39b6be4d5056 \ + --hash=sha256:e6b13af258d6a9ad602d57d889f83b9d5543acd471eee12eb51f5b01f8eb1bc2 \ + --hash=sha256:e6f40a8aa5a92f150bdb3e1c44b7e98fb7113955b2e5394122fa5532fec4b418 \ + --hash=sha256:e71d5c6ebac14875668a1e90baf2ea0ef5b7ac7918355850c0908ae82bcb297c \ + --hash=sha256:ed7c635ae45cfbc1a7371f708727bf74690daedc49b4dba310590ca0bd28aa8a \ + --hash=sha256:f38e60678850c42461d4202739f9bf1e3a737c7ad283638251e79cc49effb6b6 \ + --hash=sha256:f66eb08feaa410fe4eebd17f2a2c8e2e46d3476e9f8c783daa8e09e0faa666d0 \ + --hash=sha256:f9b2601381be482f70e5d1051a5965c25fb3625455a2bf520b5a077b22afb775 \ + --hash=sha256:fbd3c8319de8e1dc79d346929cd71d523622da527cca14e0c1d257e31c2b8b10 \ + --hash=sha256:fd341868a4b6714a5962c1af0bd44f7c404ef78720c7de4892901e540417111c + # via opentelemetry-instrumentation +zipp==3.23.0 \ + --hash=sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e \ + --hash=sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166 + # via importlib-metadata + +# WARNING: The following packages were not pinned, but pip requires them to be +# pinned when the requirements file includes hashes and the requirement is not +# satisfied by a package already installed. Consider using the --allow-unsafe flag. +# pip +# setuptools diff --git a/packages/sqlalchemy-spanner/samples/README.md b/packages/sqlalchemy-spanner/samples/README.md new file mode 100644 index 000000000000..7ebf7e9e0969 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/README.md @@ -0,0 +1,30 @@ +# Spanner SQLAlchemy Samples + +This folder contains samples for how to use common Spanner features with SQLAlchemy. The samples use +a shared [data model](model.py) and can be executed as a standalone application. The samples +automatically start the [Spanner Emulator](https://cloud.google.com/spanner/docs/emulator) in a +Docker container when they are executed. You must therefore have Docker installed on your system to +run a sample. + +You can run a sample with `nox`: + +```shell +nox -s hello_world +``` + +Change `hello_world` to run any of the other sample names. The runnable samples all end with +`_sample.py`. Omit the `_sample.py` part of the file name to run the sample. + + + +| Sample name | Description | +|-----------------------|-----------------------------------------------------------------------------| +| bit_reversed_sequence | Use a bit-reversed sequence for primary key generation. | +| date_and_timestamp | Map Spanner DATE and TIMESTAMP columns to SQLAlchemy. | +| default_column_value | Create and use a Spanner DEFAULT column constraint in SQLAlchemy. | +| generated_column | Create and use a Spanner generated column in SQLAlchemy. | +| hello_world | Shows how to connect to Spanner with SQLAlchemy and execute a simple query. | +| insert_data | Insert multiple rows to Spanner with SQLAlchemy. | +| interleaved_table | Create and use an interleaved table (INTERLEAVE IN PARENT) with SQLAlchemy. | +| transaction | Execute a read/write transaction on Spanner with SQLAlchemy. | + diff --git a/packages/sqlalchemy-spanner/samples/__init__.py b/packages/sqlalchemy-spanner/samples/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/sqlalchemy-spanner/samples/auto_generated_primary_key_sample.py b/packages/sqlalchemy-spanner/samples/auto_generated_primary_key_sample.py new file mode 100644 index 000000000000..6c74be6f7d19 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/auto_generated_primary_key_sample.py @@ -0,0 +1,64 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Venue + + +# Shows how to use an IDENTITY column for primary key generation. IDENTITY +# columns use a backing bit-reversed sequence to generate unique values that are +# safe to use for primary keys in Spanner. +# +# IDENTITY columns are used by default by the Spanner SQLAlchemy dialect for +# standard primary key columns. +# +# id: Mapped[int] = mapped_column(primary_key=True) +# +# This leads to the following table definition: +# +# CREATE TABLE ticket_sales ( +# id INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), +# ... +# ) PRIMARY KEY (id) +def auto_generated_primary_key_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + + # Add a line like the following to use AUTO_INCREMENT instead of IDENTITY + # when creating tables in SQLAlchemy. + # https://cloud.google.com/spanner/docs/primary-key-default-value#serial-auto-increment + + # engine.dialect.use_auto_increment = True + # Base.metadata.create_all(engine) + + with Session(engine) as session: + # Venue automatically generates a primary key value using an IDENTITY + # column. We therefore do not need to specify a primary key value when + # we create an instance of Venue. + venue = Venue(code="CH", name="Concert Hall", active=True) + session.add_all([venue]) + session.commit() + + print("Inserted a venue with ID %d" % venue.id) + + +if __name__ == "__main__": + run_sample(auto_generated_primary_key_sample) diff --git a/packages/sqlalchemy-spanner/samples/bit_reversed_sequence_sample.py b/packages/sqlalchemy-spanner/samples/bit_reversed_sequence_sample.py new file mode 100644 index 000000000000..bd7d80ceb52e --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/bit_reversed_sequence_sample.py @@ -0,0 +1,70 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import uuid + +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Singer, Concert, Venue, TicketSale + + +# Shows how to use a bit-reversed sequence for primary key generation. +# +# The TicketSale model uses a bit-reversed sequence for automatic primary key +# generation: +# +# id: Mapped[int] = mapped_column( +# BigInteger, +# Sequence("ticket_sale_id"), +# server_default=TextClause("GET_NEXT_SEQUENCE_VALUE(SEQUENCE ticket_sale_id)"), +# primary_key=True, +# ) +# +# This leads to the following table definition: +# +# CREATE TABLE ticket_sales ( +# id INT64 NOT NULL DEFAULT (GET_NEXT_SEQUENCE_VALUE(SEQUENCE ticket_sale_id)), +# ... +# ) PRIMARY KEY (id) +def bit_reversed_sequence_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + with Session(engine) as session: + singer = Singer(id=str(uuid.uuid4()), first_name="John", last_name="Doe") + venue = Venue(code="CH", name="Concert Hall", active=True) + concert = Concert( + venue=venue, + start_time=datetime.datetime(2024, 11, 7, 19, 30, 0), + singer=singer, + title="John Doe - Live in Concert Hall", + ) + # TicketSale automatically generates a primary key value using a + # bit-reversed sequence. We therefore do not need to specify a primary + # key value when we create an instance of TicketSale. + ticket_sale = TicketSale( + concert=concert, customer_name="Alice Doe", seats=["A010", "A011", "A012"] + ) + session.add_all([singer, venue, concert, ticket_sale]) + session.commit() + + +if __name__ == "__main__": + run_sample(bit_reversed_sequence_sample) diff --git a/packages/sqlalchemy-spanner/samples/conftest.py b/packages/sqlalchemy-spanner/samples/conftest.py new file mode 100644 index 000000000000..298d81604285 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/conftest.py @@ -0,0 +1,105 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import configparser +import datetime +import os +import uuid + +import pytest + +from sqlalchemy import ( + Column, + Integer, + MetaData, + String, + Table, + create_engine, + ForeignKey, +) + + +@pytest.fixture +def db_url(): + project = os.getenv( + "GOOGLE_CLOUD_PROJECT", + os.getenv("PROJECT_ID", "emulator-test-project"), + ) + db_url = ( + f"spanner:///projects/{project}/instances/" + "sqlalchemy-dialect-test/databases/compliance-test" + ) + + config = configparser.ConfigParser() + if os.path.exists("test.cfg"): + config.read("test.cfg") + else: + config.read("setup.cfg") + return config.get("db", "default", fallback=db_url) + + +@pytest.fixture +def table_id(): + now = datetime.datetime.now() + table_id = "example_table_{}_{}".format( + now.strftime("%Y%m%d%H%M%S"), uuid.uuid4().hex[:8] + ) + return table_id + + +@pytest.fixture +def table(db_url, table_id): + engine = create_engine(db_url) + metadata = MetaData(bind=engine) + + table = Table( + table_id, + metadata, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + ) + table.create() + yield table + table.drop() + + +@pytest.fixture +def table_w_foreign_key(db_url, table): + engine = create_engine(db_url) + metadata = MetaData(bind=engine) + + table_fk = Table( + "table_fk", + metadata, + Column("id", Integer, primary_key=True), + Column("name", String(16), nullable=False), + Column( + table.name + "_user_id", + Integer, + ForeignKey(table.c.user_id, name=table.name + "user_id"), + ), + ) + table_fk.create() + yield table_fk + table_fk.drop() + + +@pytest.fixture +def connection(db_url): + engine = create_engine(db_url) + return engine.connect() + + +def insert_data(conn, table, data): + conn.execute(table.insert(), data) diff --git a/packages/sqlalchemy-spanner/samples/database_role_sample.py b/packages/sqlalchemy-spanner/samples/database_role_sample.py new file mode 100644 index 000000000000..6edd8040ad75 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/database_role_sample.py @@ -0,0 +1,42 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Singer + + +# Shows how to set the database role for a connection. +def database_role_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + # You can set the database role in the connect arguments. + connect_args={"database_role": "my_role"}, + echo=True, + ) + with Session(engine) as session: + singer_id = str(uuid.uuid4()) + singer = Singer(id=singer_id, first_name="John", last_name="Doe") + session.add(singer) + session.commit() + + +if __name__ == "__main__": + run_sample(database_role_sample) diff --git a/packages/sqlalchemy-spanner/samples/date_and_timestamp_sample.py b/packages/sqlalchemy-spanner/samples/date_and_timestamp_sample.py new file mode 100644 index 000000000000..442ec6117492 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/date_and_timestamp_sample.py @@ -0,0 +1,64 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import uuid + +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Singer, Concert, Venue + + +# Shows how to map and use the DATE and TIMESTAMP data types in Spanner. +def date_and_timestamp_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + with Session(engine) as session: + # Singer has a property birthdate, which is mapped to a DATE column. + # Use the datetime.date type for this. + singer = Singer( + id=str(uuid.uuid4()), + first_name="John", + last_name="Doe", + birthdate=datetime.date(1979, 10, 14), + ) + venue = Venue(code="CH", name="Concert Hall", active=True) + # Concert has a property `start_time`, which is mapped to a TIMESTAMP + # column. Use the datetime.datetime type for this. + concert = Concert( + venue=venue, + start_time=datetime.datetime(2024, 11, 7, 19, 30, 0), + singer=singer, + title="John Doe - Live in Concert Hall", + ) + session.add_all([singer, venue, concert]) + session.commit() + + # Use AUTOCOMMIT for sessions that only read. This is more + # efficient than using a read/write transaction to only read. + session.connection(execution_options={"isolation_level": "AUTOCOMMIT"}) + print( + f"{singer.full_name}, born on {singer.birthdate}, has planned " + f"a concert that starts on {concert.start_time} in {venue.name}." + ) + + +if __name__ == "__main__": + run_sample(date_and_timestamp_sample) diff --git a/packages/sqlalchemy-spanner/samples/default_column_value_sample.py b/packages/sqlalchemy-spanner/samples/default_column_value_sample.py new file mode 100644 index 000000000000..82ecf566f6f4 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/default_column_value_sample.py @@ -0,0 +1,61 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import uuid + +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Singer, Album, Track + + +# Shows how to use a default column with SQLAlchemy and Spanner. +def default_column_value_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + with Session(engine) as session: + # The Track model has a `recorded_at` property that is set to + # CURRENT_TIMESTAMP if no other value is supplied. + singer = Singer(id=str(uuid.uuid4()), first_name="John", last_name="Doe") + album = Album(id=str(uuid.uuid4()), title="My album", singer=singer) + + # This track will use the default CURRENT_TIMESTAMP for the recorded_at + # property. + track1 = Track( + id=str(uuid.uuid4()), + track_number=1, + title="My track 1", + album=album, + ) + track2 = Track( + id=str(uuid.uuid4()), + track_number=2, + title="My track 2", + recorded_at=datetime.datetime(2024, 11, 7, 10, 0, 0), + album=album, + ) + session.add_all([singer, album, track1, track2]) + session.commit() + print(f"Track 1 was recorded at: " f"{track1.recorded_at}") + print(f"Track 2 was recorded at: " f"{track2.recorded_at}") + + +if __name__ == "__main__": + run_sample(default_column_value_sample) diff --git a/packages/sqlalchemy-spanner/samples/generated_column_sample.py b/packages/sqlalchemy-spanner/samples/generated_column_sample.py new file mode 100644 index 000000000000..fe6b157ba287 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/generated_column_sample.py @@ -0,0 +1,50 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Singer + + +# Shows how to use a generated column with SQLAlchemy and Spanner. +def generated_column_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + with Session(engine) as session: + # The Singer model has a `full_name` property that is generated by the + # database. + singer = Singer(id=str(uuid.uuid4()), first_name="John", last_name="Doe") + session.add(singer) + session.commit() + print( + f"The database generated a full name for the singer: " f"{singer.full_name}" + ) + + # Updating the first name or last name of the singer will also update + # the generated full name property. + singer.last_name = "Jones" + session.commit() + print(f"Updated full name for singer: " f"{singer.full_name}") + + +if __name__ == "__main__": + run_sample(generated_column_sample) diff --git a/packages/sqlalchemy-spanner/samples/hello_world_sample.py b/packages/sqlalchemy-spanner/samples/hello_world_sample.py new file mode 100644 index 000000000000..57f676c14cb1 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/hello_world_sample.py @@ -0,0 +1,31 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import create_engine, select, text +from sample_helper import run_sample + + +def quickstart(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database" + ) + with engine.connect().execution_options(isolation_level="AUTOCOMMIT") as connection: + results = connection.execute(select(text("'Hello World!'"))).fetchall() + print("\nMessage from Spanner: ", results[0][0], "\n") + + +if __name__ == "__main__": + run_sample(quickstart) diff --git a/packages/sqlalchemy-spanner/samples/informational_fk_sample.py b/packages/sqlalchemy-spanner/samples/informational_fk_sample.py new file mode 100644 index 000000000000..4e330dae5ca4 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/informational_fk_sample.py @@ -0,0 +1,92 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import uuid + +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Singer, Concert, Venue, TicketSale + + +# Shows how to create a non-enforced foreign key. +# +# The TicketSale model contains two foreign keys that are not enforced by Spanner. +# This allows the related records to be deleted without the need to delete the +# corresponding TicketSale record. +# +# __table_args__ = ( +# ForeignKeyConstraint( +# ["venue_code", "start_time", "singer_id"], +# ["concerts.venue_code", "concerts.start_time", "concerts.singer_id"], +# spanner_not_enforced=True, +# ), +# ) +# singer_id: Mapped[str] = mapped_column(String(36), ForeignKey("singers.id", spanner_not_enforced=True)) +# +# See https://cloud.google.com/spanner/docs/foreign-keys/overview#informational-foreign-keys +# for more information on informational foreign key constrains. +def informational_fk_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + # First create a singer, venue, concert and ticket_sale. + singer_id = str(uuid.uuid4()) + ticket_sale_id = None + with Session(engine) as session: + singer = Singer(id=singer_id, first_name="John", last_name="Doe") + venue = Venue(code="CH", name="Concert Hall", active=True) + concert = Concert( + venue=venue, + start_time=datetime.datetime(2024, 11, 7, 19, 30, 0), + singer=singer, + title="John Doe - Live in Concert Hall", + ) + ticket_sale = TicketSale( + concert=concert, customer_name="Alice Doe", seats=["A010", "A011", "A012"] + ) + session.add_all([singer, venue, concert, ticket_sale]) + session.commit() + ticket_sale_id = ticket_sale.id + + # Now delete both the singer and concert that are referenced by the ticket_sale record. + # This is possible as the foreign key constraints between ticket_sales and singers/concerts + # are not enforced. + with Session(engine) as session: + session.delete(concert) + session.delete(singer) + session.commit() + + # Verify that the ticket_sale record still exists, while the concert and singer have been + # deleted. + with Session(engine) as session: + ticket_sale = session.get(TicketSale, ticket_sale_id) + singer = session.get(Singer, singer_id) + concert = session.get( + Concert, ("CH", datetime.datetime(2024, 11, 7, 19, 30, 0), singer_id) + ) + print( + "Ticket sale found: {}\nSinger found: {}\nConcert found: {}\n".format( + ticket_sale is not None, singer is not None, concert is not None + ) + ) + + +if __name__ == "__main__": + run_sample(informational_fk_sample) diff --git a/packages/sqlalchemy-spanner/samples/insert_data_sample.py b/packages/sqlalchemy-spanner/samples/insert_data_sample.py new file mode 100644 index 000000000000..c2e25ad23d7c --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/insert_data_sample.py @@ -0,0 +1,74 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Singer, Album, Track + + +# Shows how to insert data using SQLAlchemy, including relationships that are +# defined both as foreign keys and as interleaved tables. +def insert_data(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + with Session(engine) as session: + singer = Singer( + id=str(uuid.uuid4()), + first_name="John", + last_name="Smith", + albums=[ + Album( + id=str(uuid.uuid4()), + title="Rainforest", + tracks=[ + # Track is INTERLEAVED IN PARENT Album, but can be treated + # as a normal relationship in SQLAlchemy. + Track(track_number=1, title="Green"), + Track(track_number=2, title="Blue"), + Track(track_number=3, title="Yellow"), + ], + ), + Album( + id=str(uuid.uuid4()), + title="Butterflies", + tracks=[ + Track(track_number=1, title="Purple"), + Track(track_number=2, title="Cyan"), + Track(track_number=3, title="Mauve"), + ], + ), + ], + ) + session.add(singer) + session.commit() + + # Use AUTOCOMMIT for sessions that only read. This is more + # efficient than using a read/write transaction to only read. + session.connection(execution_options={"isolation_level": "AUTOCOMMIT"}) + print( + f"Inserted singer {singer.full_name} with {len(singer.albums)} " + f"albums successfully" + ) + + +if __name__ == "__main__": + run_sample(insert_data) diff --git a/packages/sqlalchemy-spanner/samples/insert_or_ignore_sample.py b/packages/sqlalchemy-spanner/samples/insert_or_ignore_sample.py new file mode 100644 index 000000000000..065bb059090d --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/insert_or_ignore_sample.py @@ -0,0 +1,55 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +from sqlalchemy import create_engine, select +from sqlalchemy.orm import Session + +from google.cloud.sqlalchemy_spanner.dml import insert_or_ignore +from sample_helper import run_sample +from model import Singer + + +# Shows how to use insert-or-ignore using SQLAlchemy and Spanner. +def insert_or_ignore_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + with Session(engine) as session: + stmt = ( + insert_or_ignore(Singer) + .values( + id=str(uuid.uuid4()), + first_name="John", + last_name="Smith", + ) + .returning(Singer.id) + ) + singer_id = session.execute(stmt).scalar() + print(singer_id) + + # Use AUTOCOMMIT for sessions that only read. This is more + # efficient than using a read/write transaction to only read. + session.connection(execution_options={"isolation_level": "AUTOCOMMIT"}) + stmt = select(Singer).where(Singer.id == singer_id) + singer = session.execute(stmt).scalar() + print(f"Inserted or ignored singer {singer.full_name} successfully") + + +if __name__ == "__main__": + run_sample(insert_or_ignore_sample) diff --git a/packages/sqlalchemy-spanner/samples/insert_or_update_sample.py b/packages/sqlalchemy-spanner/samples/insert_or_update_sample.py new file mode 100644 index 000000000000..75c2f53b6a63 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/insert_or_update_sample.py @@ -0,0 +1,55 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +from sqlalchemy import create_engine, select +from sqlalchemy.orm import Session + +from google.cloud.sqlalchemy_spanner.dml import insert_or_update +from sample_helper import run_sample +from model import Singer + + +# Shows how to use insert-or-update using SQLAlchemy and Spanner. +def insert_or_update_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + with Session(engine) as session: + stmt = ( + insert_or_update(Singer) + .values( + id=str(uuid.uuid4()), + first_name="John", + last_name="Smith", + ) + .returning(Singer.id) + ) + singer_id = session.execute(stmt).scalar() + print(singer_id) + + # Use AUTOCOMMIT for sessions that only read. This is more + # efficient than using a read/write transaction to only read. + session.connection(execution_options={"isolation_level": "AUTOCOMMIT"}) + stmt = select(Singer).where(Singer.id == singer_id) + singer = session.execute(stmt).scalar() + print(f"Inserted or updated singer {singer.full_name} successfully") + + +if __name__ == "__main__": + run_sample(insert_or_update_sample) diff --git a/packages/sqlalchemy-spanner/samples/insertmany_sample.py b/packages/sqlalchemy-spanner/samples/insertmany_sample.py new file mode 100644 index 000000000000..859bc1588d69 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/insertmany_sample.py @@ -0,0 +1,84 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from datetime import datetime +import uuid +from sqlalchemy import text, String, create_engine +from sqlalchemy.orm import DeclarativeBase, Session +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column +from sample_helper import run_sample + + +class Base(DeclarativeBase): + pass + + +# To use SQLAlchemy 2.0's insertmany feature, models must have a +# unique column marked as an "insert_sentinal" with client-side +# generated values passed into it. This allows SQLAlchemy to perform a +# single bulk insert, even if the table has columns with server-side +# defaults which must be retrieved from a THEN RETURN clause, for +# operations like: +# +# with Session.begin() as session: +# session.add(Singer(name="a")) +# session.add(Singer(name="b")) +# +# Read more in the SQLAlchemy documentation of this feature: +# https://docs.sqlalchemy.org/en/20/core/connections.html#configuring-sentinel-columns + + +class Singer(Base): + __tablename__ = "singers_with_sentinel" + id: Mapped[str] = mapped_column( + String(36), + primary_key=True, + # Supply a unique UUID client-side + default=lambda: str(uuid.uuid4()), + # The column is unique and can be used as an insert_sentinel + insert_sentinel=True, + # Set a server-side default for write outside SQLAlchemy + server_default=text("GENERATE_UUID()"), + ) + name: Mapped[str] + inserted_at: Mapped[datetime] = mapped_column( + server_default=text("CURRENT_TIMESTAMP()") + ) + + +# Shows how to insert data using SQLAlchemy, including relationships that are +# defined both as foreign keys and as interleaved tables. +def insertmany(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + # Create the sample table. + Base.metadata.create_all(engine) + + # Insert two singers in one session. These two singers will be inserted using + # a single INSERT statement with a THEN RETURN clause to return the generated + # creation timestamp. + with Session(engine) as session: + session.add(Singer(name="John Smith")) + session.add(Singer(name="Jane Smith")) + session.commit() + + +if __name__ == "__main__": + run_sample(insertmany) diff --git a/packages/sqlalchemy-spanner/samples/interleaved_table_sample.py b/packages/sqlalchemy-spanner/samples/interleaved_table_sample.py new file mode 100644 index 000000000000..2b0091015859 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/interleaved_table_sample.py @@ -0,0 +1,99 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Singer, Album, Track + + +# Shows how INTERLEAVE IN PARENT can be used in SQLAlchemy. +# INTERLEAVE IN PARENT can be modelled as if it were a normal relationship +# in SQLAlchemy. SQLAlchemy can also generate the correct DDL for this. +# +# This sample uses the following table structure: +# CREATE TABLE albums ( +# id STRING(36) NOT NULL, +# title STRING(200) NOT NULL, +# release_date DATE, +# singer_id STRING(36) NOT NULL, +# FOREIGN KEY(singer_id) REFERENCES singers (id) +# ) PRIMARY KEY (id); +# +# CREATE TABLE tracks ( +# id STRING(36) NOT NULL, +# track_number INT64 NOT NULL, +# title STRING(200) NOT NULL, +# duration NUMERIC +# ) PRIMARY KEY (id, track_number), +# INTERLEAVE IN PARENT albums ON DELETE CASCADE +# +# See model.py for the full model definitions. +def interleaved_table(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + with Session(engine) as session: + # Insert a Singer row, two Albums, and six Tracks. + # Track is INTERLEAVED IN PARENT Album. + singer = Singer( + id=str(uuid.uuid4()), + first_name="John", + last_name="Smith", + albums=[ + Album( + id=str(uuid.uuid4()), + title="Rainforest", + tracks=[ + # Track is INTERLEAVED IN PARENT Album, but can be treated + # as a normal relationship in SQLAlchemy. + Track(track_number=1, title="Green"), + Track(track_number=2, title="Blue"), + Track(track_number=3, title="Yellow"), + ], + ), + Album( + id=str(uuid.uuid4()), + title="Butterflies", + tracks=[ + Track(track_number=1, title="Purple"), + Track(track_number=2, title="Cyan"), + Track(track_number=3, title="Mauve"), + ], + ), + ], + ) + session.add(singer) + session.commit() + + # Use AUTOCOMMIT for sessions that only read. This is more + # efficient than using a read/write transaction to only read. + session.connection(execution_options={"isolation_level": "AUTOCOMMIT"}) + # We can iterate over the tracks of an album as if it were a normal + # relationship. + print(f"Singer {singer.full_name} has these albums:") + for album in singer.albums: + print(f"\tAlbum {album.title} has these tracks:") + for track in album.tracks: + print(f"\t\t{track.track_number} - {track.title}") + + +if __name__ == "__main__": + run_sample(interleaved_table) diff --git a/packages/sqlalchemy-spanner/samples/isolation_level_sample.py b/packages/sqlalchemy-spanner/samples/isolation_level_sample.py new file mode 100644 index 000000000000..ceb5664375be --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/isolation_level_sample.py @@ -0,0 +1,47 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Singer + + +# Shows how to set the isolation level for a read/write transaction. +# Spanner supports the following isolation levels: +# - SERIALIZABLE (default) +# - REPEATABLE READ +def isolation_level_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + # You can set a default isolation level for an engine. + isolation_level="REPEATABLE READ", + echo=True, + ) + # You can override the default isolation level of the connection + # by setting it in the execution_options. + with Session(engine.execution_options(isolation_level="SERIALIZABLE")) as session: + singer_id = str(uuid.uuid4()) + singer = Singer(id=singer_id, first_name="John", last_name="Doe") + session.add(singer) + session.commit() + + +if __name__ == "__main__": + run_sample(isolation_level_sample) diff --git a/packages/sqlalchemy-spanner/samples/model.py b/packages/sqlalchemy-spanner/samples/model.py new file mode 100644 index 000000000000..a8c86047f3a7 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/model.py @@ -0,0 +1,219 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +from typing import Optional, List + +from sqlalchemy import ( + String, + Computed, + Date, + LargeBinary, + Integer, + Numeric, + ForeignKey, + JSON, + Boolean, + DateTime, + BigInteger, + ARRAY, + ForeignKeyConstraint, + Sequence, + TextClause, + Index, + PickleType, + text, + event, +) +from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship +from google.cloud.sqlalchemy_spanner.sqlalchemy_spanner import SpannerPickleType + + +class Base(DeclarativeBase): + pass + + +# Most models in this sample use a client-side generated UUID as primary key. +# This allows inserts to use Batch DML, as the primary key value does not need +# to be returned from Spanner using a THEN RETURN clause. +# +# The Venue model uses a standard auto-generated integer primary key. This uses +# an IDENTITY column in Spanner. IDENTITY columns use a backing bit-reversed +# sequence to generate unique values that are safe to use for primary keys. +# +# The TicketSale model uses a bit-reversed sequence for primary key generation. +# This is achieved by creating a bit-reversed sequence and assigning the id +# column of the model a server_default value that gets the next value from that +# sequence. + + +class Singer(Base): + __tablename__ = "singers" + id: Mapped[str] = mapped_column(String(36), primary_key=True) + first_name: Mapped[Optional[str]] = mapped_column(String(200), nullable=True) + last_name: Mapped[str] = mapped_column(String(200), nullable=False) + full_name: Mapped[str] = mapped_column( + String, Computed("COALESCE(first_name || ' ', '') || last_name") + ) + birthdate: Mapped[Optional[datetime.date]] = mapped_column(Date, nullable=True) + picture: Mapped[Optional[bytes]] = mapped_column(LargeBinary, nullable=True) + preferences: Mapped[Optional[object]] = mapped_column( + PickleType(impl=SpannerPickleType), nullable=True + ) + albums: Mapped[List["Album"]] = relationship( + back_populates="singer", cascade="all, delete-orphan" + ) + concerts: Mapped[List["Concert"]] = relationship( + back_populates="singer", cascade="all, delete-orphan" + ) + + +class Album(Base): + __tablename__ = "albums" + id: Mapped[str] = mapped_column(String(36), primary_key=True) + title: Mapped[str] = mapped_column(String(200), nullable=False) + release_date: Mapped[Optional[datetime.date]] = mapped_column(Date, nullable=True) + singer_id: Mapped[str] = mapped_column(ForeignKey("singers.id")) + singer: Mapped["Singer"] = relationship(back_populates="albums") + tracks: Mapped[List["Track"]] = relationship( + back_populates="album", + primaryjoin="Album.id == foreign(Track.id)", + order_by="Track.track_number", + ) + + +class Track(Base): + __tablename__ = "tracks" + __table_args__ = ( + # Use the spanner_interleave_in argument to add an INTERLEAVED IN clause to the index. + # You can read additional details at: + # https://cloud.google.com/spanner/docs/secondary-indexes#indexes_and_interleaving + Index( + "idx_tracks_id_title", + "id", + "title", + spanner_interleave_in="albums", + ), + # This interleaves the table `tracks` in its parent `albums`. + { + "spanner_interleave_in": "albums", + "spanner_interleave_on_delete_cascade": True, + }, + ) + id: Mapped[str] = mapped_column(String(36), primary_key=True) + track_number: Mapped[int] = mapped_column(Integer, primary_key=True) + title: Mapped[str] = mapped_column(String(200), nullable=False) + duration: Mapped[Optional[float]] = mapped_column(Numeric, nullable=True) + recorded_at: Mapped[Optional[datetime.datetime]] = mapped_column( + DateTime, + nullable=True, + # TODO: Enable this once 'func.now()' is mapped to CURRENT_TIMESTAMP + # server_default=func.now(), + server_default=TextClause("CURRENT_TIMESTAMP"), + ) + album: Mapped["Album"] = relationship( + back_populates="tracks", + foreign_keys=[id], + primaryjoin="Track.id == Album.id", + remote_side="Album.id", + ) + + +# SQLAlchemy does not know what 'spanner_interleave_in' means, so we need to +# explicitly tell SQLAlchemy that `tracks` depends on `albums`, and that +# `albums` therefore must be created before `tracks`. +Track.__table__.add_is_dependent_on(Album.__table__) + + +class Venue(Base): + __tablename__ = "venues" + __table_args__ = (Index("venues_code_unique", "code", unique=True),) + # Venue uses a standard auto-generated primary key. + # This translates to an IDENTITY column in Spanner. + id: Mapped[int] = mapped_column(primary_key=True) + code: Mapped[str] = mapped_column(String(10)) + name: Mapped[str] = mapped_column(String(200), nullable=False) + description: Mapped[str] = mapped_column(JSON, nullable=True) + active: Mapped[bool] = mapped_column(Boolean, nullable=False) + concerts: Mapped[List["Concert"]] = relationship( + back_populates="venue", cascade="all, delete-orphan" + ) + + +class Concert(Base): + __tablename__ = "concerts" + venue_code: Mapped[str] = mapped_column( + String(10), ForeignKey("venues.code"), primary_key=True + ) + start_time: Mapped[Optional[datetime.datetime]] = mapped_column( + DateTime, primary_key=True, nullable=False + ) + singer_id: Mapped[str] = mapped_column( + String(36), ForeignKey("singers.id"), primary_key=True + ) + title: Mapped[str] = mapped_column(String(200), nullable=False) + singer: Mapped["Singer"] = relationship(back_populates="concerts") + venue: Mapped["Venue"] = relationship(back_populates="concerts") + ticket_sales: Mapped[List["TicketSale"]] = relationship( + back_populates="concert", passive_deletes=True + ) + + +class TicketSale(Base): + __tablename__ = "ticket_sales" + __table_args__ = ( + ForeignKeyConstraint( + ["venue_code", "start_time", "singer_id"], + ["concerts.venue_code", "concerts.start_time", "concerts.singer_id"], + spanner_not_enforced=True, + ), + ) + id: Mapped[int] = mapped_column( + BigInteger, + Sequence("ticket_sale_id"), + server_default=TextClause("GET_NEXT_SEQUENCE_VALUE(SEQUENCE ticket_sale_id)"), + primary_key=True, + ) + customer_name: Mapped[str] = mapped_column(String(200), nullable=False) + seats: Mapped[list[str]] = mapped_column(ARRAY(String(20)), nullable=False) + concert: Mapped["Concert"] = relationship(back_populates="ticket_sales") + venue_code: Mapped[str] = mapped_column(String(10), ForeignKey("venues.code")) + start_time: Mapped[Optional[datetime.datetime]] = mapped_column( + DateTime, nullable=False + ) + # Create an informational foreign key that is not enforced by Spanner. + # See https://cloud.google.com/spanner/docs/foreign-keys/overview#informational-foreign-keys + # for more information. + singer_id: Mapped[str] = mapped_column( + String(36), ForeignKey("singers.id", spanner_not_enforced=True) + ) + # Create a commit timestamp column and set a client-side default of + # PENDING_COMMIT_TIMESTAMP(). An event handler below is responsible for + # setting PENDING_COMMIT_TIMESTAMP() on updates. If using SQLAlchemy + # core rather than the ORM, callers will need to supply their own + # PENDING_COMMIT_TIMESTAMP() values in their inserts & updates. + # + # Columns that use PENDING_COMMIT_TIMESTAMP() cannot be included in a + # THEN RETURN clause. + last_update_time: Mapped[datetime.datetime] = mapped_column( + spanner_allow_commit_timestamp=True, + spanner_exclude_from_returning=True, + default=text("PENDING_COMMIT_TIMESTAMP()"), + ) + + +@event.listens_for(TicketSale, "before_update") +def ticket_sale_before_update(mapper, connection, target): + """Updates the commit timestamp when the row is updated.""" + target.last_update_time = text("PENDING_COMMIT_TIMESTAMP()") diff --git a/packages/sqlalchemy-spanner/samples/noxfile.py b/packages/sqlalchemy-spanner/samples/noxfile.py new file mode 100644 index 000000000000..29709cd1c37b --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/noxfile.py @@ -0,0 +1,125 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from os import listdir +from os.path import isfile, join + +import nox + + +@nox.session() +def hello_world(session): + _sample(session) + + +@nox.session() +def auto_generated_primary_key(session): + _sample(session) + + +@nox.session() +def bit_reversed_sequence(session): + _sample(session) + + +@nox.session() +def date_and_timestamp(session): + _sample(session) + + +@nox.session() +def default_column_value(session): + _sample(session) + + +@nox.session() +def generated_column(session): + _sample(session) + + +@nox.session() +def insert_data(session): + _sample(session) + + +@nox.session() +def interleaved_table(session): + _sample(session) + + +@nox.session() +def transaction(session): + _sample(session) + + +@nox.session() +def tags(session): + _sample(session) + + +@nox.session() +def isolation_level(session): + _sample(session) + + +@nox.session() +def stale_read(session): + _sample(session) + + +@nox.session() +def read_only_transaction(session): + _sample(session) + + +@nox.session() +def database_role(session): + _sample(session) + + +@nox.session() +def informational_fk(session): + _sample(session) + + +@nox.session() +def insertmany(session): + _sample(session) + + +@nox.session() +def parse_json(session): + _sample(session) + + +@nox.session() +def _all_samples(session): + _sample(session) + + +def _sample(session): + session.install("testcontainers") + session.install("sqlalchemy") + session.install("setuptools") + session.install( + "git+https://github.com/googleapis/python-spanner.git#egg=google-cloud-spanner" + ) + session.install("../.") + if session.name == "_all_samples": + files = [ + f for f in listdir(".") if isfile(join(".", f)) and f.endswith("_sample.py") + ] + for file in files: + session.run("python", file) + else: + session.run("python", session.name + "_sample.py") diff --git a/packages/sqlalchemy-spanner/samples/null_filtered_index.py b/packages/sqlalchemy-spanner/samples/null_filtered_index.py new file mode 100644 index 000000000000..d8d9556f2bc4 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/null_filtered_index.py @@ -0,0 +1,75 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +from sqlalchemy import create_engine, Index +from sqlalchemy.exc import IntegrityError +from sqlalchemy.orm import mapped_column, DeclarativeBase, Mapped, Session + +from sample_helper import run_sample + +# Shows how to create a null-filtered index. +# +# A null-filtered index does not index NULL values. This is useful for +# maintaining smaller indexes over sparse columns. +# https://cloud.google.com/spanner/docs/secondary-indexes#null-indexing-disable + + +class Base(DeclarativeBase): + pass + + +class Singer(Base): + __tablename__ = "singers_with_null_filtered_index" + __table_args__ = ( + Index("uq_null_filtered_name", "name", unique=True, spanner_null_filtered=True), + ) + + id: Mapped[str] = mapped_column(primary_key=True, default=lambda: str(uuid.uuid4())) + name: Mapped[str | None] + + +def null_filtered_index_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + Base.metadata.create_all(engine) + + # We can create singers with a name of jdoe and NULL. + with Session(engine) as session: + session.add(Singer(name="jdoe")) + session.add(Singer(name=None)) + session.commit() + + # The unique index will stop us from adding another jdoe. + with Session(engine) as session: + session.add(Singer(name="jdoe")) + try: + session.commit() + except IntegrityError: + session.rollback() + + # The index is null filtered, so we can still add another + # NULL name. The NULL values are not part of the index. + with Session(engine) as session: + session.add(Singer(name=None)) + session.commit() + + +if __name__ == "__main__": + run_sample(null_filtered_index_sample) diff --git a/packages/sqlalchemy-spanner/samples/parse_json_sample.py b/packages/sqlalchemy-spanner/samples/parse_json_sample.py new file mode 100644 index 000000000000..34488fa6472e --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/parse_json_sample.py @@ -0,0 +1,51 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import create_engine, func, text +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Venue + + +# Shows how to use the PARSE_JSON function in Spanner using SQLAlchemy. +def parse_json_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + with Session(engine) as session: + venue = Venue( + code="LCH", + active=True, + name="Large Concert Hall", + # The SQLAlchemy func function is very lenient and allows you to call any + # database function that Spanner supports. Use a text instance to add a + # specific SQL fragment to the function call. + description=func.parse_json( + '{"type": "Stadium", "size": 13.7391432}', + text("wide_number_mode=>'round'"), + ), + ) + session.add(venue) + session.commit() + + venue = session.query(Venue).filter_by(code="LCH").one() + print(venue.description) + + +if __name__ == "__main__": + run_sample(parse_json_sample) diff --git a/packages/sqlalchemy-spanner/samples/partitioned_dml_sample.py b/packages/sqlalchemy-spanner/samples/partitioned_dml_sample.py new file mode 100644 index 000000000000..e799a39d7367 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/partitioned_dml_sample.py @@ -0,0 +1,46 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.spanner_dbapi.parsed_statement import AutocommitDmlMode +from sqlalchemy import create_engine, text + +from sample_helper import run_sample + + +# Shows how to use Partitioned DML using SQLAlchemy and Spanner. +def partitioned_dml_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + # Get a connection in auto-commit mode. + # Partitioned DML can only be executed in auto-commit mode, as each + # Partitioned DML transaction can only consist of one statement. + with engine.connect().execution_options(isolation_level="AUTOCOMMIT") as connection: + # Set the DML mode to PARTITIONED_NON_ATOMIC. + connection.connection.set_autocommit_dml_mode( + AutocommitDmlMode.PARTITIONED_NON_ATOMIC + ) + # Use a bulk update statement to back-fill a column. + lower_bound_rowcount = connection.execute( + text("update venues set active=true where active is null") + ).rowcount + # Partitioned DML returns the lower-bound update count. + print("Updated at least ", lower_bound_rowcount, " venue records") + + +if __name__ == "__main__": + run_sample(partitioned_dml_sample) diff --git a/packages/sqlalchemy-spanner/samples/pickle_type_sample.py b/packages/sqlalchemy-spanner/samples/pickle_type_sample.py new file mode 100644 index 000000000000..f3dce513590f --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/pickle_type_sample.py @@ -0,0 +1,56 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Singer + + +# Shows how to use PickleType with Spanner. +def pickle_type(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + with Session(engine) as session: + singer = Singer( + id=str(uuid.uuid4()), + first_name="John", + last_name="Smith", + # Preferences are stored as an opaque BYTES column + # in the database. + preferences={ + "wakeup_call": "yes", + "vegetarian": "no", + }, + ) + session.add(singer) + session.commit() + + # Use AUTOCOMMIT for sessions that only read. This is more + # efficient than using a read/write transaction to only read. + session.connection(execution_options={"isolation_level": "AUTOCOMMIT"}) + print( + f"Inserted singer {singer.full_name} has these preferences: {singer.preferences}" + ) + + +if __name__ == "__main__": + run_sample(pickle_type) diff --git a/packages/sqlalchemy-spanner/samples/read_only_transaction_sample.py b/packages/sqlalchemy-spanner/samples/read_only_transaction_sample.py new file mode 100644 index 000000000000..35ef84e7fd12 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/read_only_transaction_sample.py @@ -0,0 +1,64 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import uuid + +from sqlalchemy import create_engine, Engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Singer, Concert, Venue + + +# Shows how to execute a read-only transaction on Spanner using SQLAlchemy. +def read_only_transaction_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + # First insert a few test rows that can be queried in a read-only transaction. + insert_test_data(engine) + + # Create a session that uses a read-only transaction. + # Read-only transactions do not take locks, and are therefore preferred + # above read/write transactions for workloads that only read data on Spanner. + with Session(engine.execution_options(read_only=True)) as session: + print("Singers ordered by last name") + singers = session.query(Singer).order_by(Singer.last_name).all() + for singer in singers: + print("Singer: ", singer.full_name) + + print() + print("Singers ordered by first name") + singers = session.query(Singer).order_by(Singer.first_name).all() + for singer in singers: + print("Singer: ", singer.full_name) + + +def insert_test_data(engine: Engine): + with Session(engine) as session: + session.add_all( + [ + Singer(id=str(uuid.uuid4()), first_name="John", last_name="Doe"), + Singer(id=str(uuid.uuid4()), first_name="Jane", last_name="Doe"), + ] + ) + session.commit() + + +if __name__ == "__main__": + run_sample(read_only_transaction_sample) diff --git a/packages/sqlalchemy-spanner/samples/sample_helper.py b/packages/sqlalchemy-spanner/samples/sample_helper.py new file mode 100644 index 000000000000..f10268b2cb8c --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/sample_helper.py @@ -0,0 +1,100 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import Callable + +from google.api_core.client_options import ClientOptions +from google.api_core.exceptions import AlreadyExists +from google.auth.credentials import AnonymousCredentials +from google.cloud.spanner_v1 import Client +from google.cloud.spanner_v1.database import Database +from sqlalchemy import create_engine +from sqlalchemy.dialects import registry +from testcontainers.core.container import DockerContainer +from testcontainers.core.waiting_utils import wait_for_logs + +from model import Base + + +def run_sample(sample_method: Callable): + registry.register( + "spanner", + "google.cloud.sqlalchemy_spanner.sqlalchemy_spanner", + "SpannerDialect", + ) + emulator = None + if os.getenv("USE_EXISTING_EMULATOR") == "true": + if os.getenv("SPANNER_EMULATOR_HOST") is None: + os.environ["SPANNER_EMULATOR_HOST"] = "localhost:9010" + _create_instance_and_database("9010") + else: + os.environ["SPANNER_EMULATOR_HOST"] = "" + emulator, port = start_emulator() + os.environ["SPANNER_EMULATOR_HOST"] = "localhost:" + str(port) + try: + _create_tables() + sample_method() + finally: + if emulator is not None: + emulator.stop() + + +def start_emulator() -> (DockerContainer, str): + emulator = DockerContainer( + "gcr.io/cloud-spanner-emulator/emulator" + ).with_exposed_ports(9010) + emulator.start() + wait_for_logs(emulator, "gRPC server listening at 0.0.0.0:9010") + port = str(emulator.get_exposed_port(9010)) + _create_instance_and_database(port) + return emulator, port + + +def _create_instance_and_database(port: str): + client = Client( + project="sample-project", + credentials=AnonymousCredentials(), + client_options=ClientOptions( + api_endpoint="localhost:" + port, + ), + ) + configs = list(client.list_instance_configs()) + instance_config = configs[0].name + instance_id = "sample-instance" + database_id = "sample-database" + + instance = client.instance(instance_id, instance_config) + try: + created_op = instance.create() + created_op.result(1800) # block until completion + except AlreadyExists: + # Ignore + print("Using existing instance") + + database: Database = instance.database(database_id) + if database.exists(): + database.drop() + created_op = database.create() + created_op.result(1800) + + +def _create_tables(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + Base.metadata.create_all(engine) diff --git a/packages/sqlalchemy-spanner/samples/snippets.py b/packages/sqlalchemy-spanner/samples/snippets.py new file mode 100644 index 000000000000..b309ea0b3ff6 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/snippets.py @@ -0,0 +1,337 @@ +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bs + +""" +This application demonstrates how to do basic operations with Cloud +Spanner database. +For more information, see the README.md under /python-spanner-sqlalchemy. +""" + +from sqlalchemy import ( + Column, + create_engine, + Index, + Integer, + inspect, + MetaData, + String, + Table, +) + + +# [START spanner_sqlalchemy_autocommit_on] +def enable_autocommit_mode(connection, url): + """Enable AUTOCOMMIT mode.""" + level = connection.get_isolation_level() + print("Connection default mode is {}.".format(level)) + + connection.execution_options(isolation_level="AUTOCOMMIT") + level = connection.get_isolation_level() + print("Connection mode is now {}.".format(level)) + + +# [END spanner_sqlalchemy_autocommit_on] + + +# [START spanner_sqlalchemy_create_table] +def create_table(url, table_id): + """Create a table.""" + engine = create_engine(url) + metadata = MetaData(bind=engine) + + table = Table( + table_id, + metadata, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + ) + table.create() + + print("Table {} successfully created.".format(table.name)) + + +# [END spanner_sqlalchemy_create_table] + + +# [START spanner_sqlalchemy_drop_table] +def drop_table(table): + """Drop the table.""" + table.drop() + + print("Table {} successfully dropped.".format(table.name)) + + +# [END spanner_sqlalchemy_drop_table] + + +# [START spanner_sqlalchemy_get_table_names] +def get_table_names(url): + """Retrieve the list of the table names in the database. + + The table must already exist and can be created using + `create_table.` + """ + engine = create_engine(url) + insp = inspect(engine) + names = insp.get_table_names() + + print("Retrieved table names:") + for name in names: + print(name) + + +# [END spanner_sqlalchemy_get_table_names] + + +# [START spanner_sqlalchemy_create_unique_index] +def create_unique_index(table): + """Create unique index. + + The table must already exist and can be created using + `create_table.` + """ + index = Index("some_index", table.c.user_name, unique=True) + index.create() + print("Index created.") + + +# [END spanner_sqlalchemy_create_unique_index] + + +# [START spanner_sqlalchemy_delete_all_rows] +def delete_all_rows(connection, table): + """Delete all rows from the table. + + The table must already exist and can be created using + `create_table.` + """ + rows = connection.execute(table.select()).fetchall() + print("Row count:", len(rows)) + + connection.execute(table.delete()) + + rows = connection.execute(table.select()).fetchall() + print("Row count after deletion:", len(rows)) + + +# [END spanner_sqlalchemy_delete_all_rows] + + +# [START spanner_sqlalchemy_delete_row] +def delete_row_with_where_clause(connection, table): + """Delete a row. + + The table must already exist and can be created using + `create_table.` + """ + rows = connection.execute(table.select()).fetchall() + print("Row count:", len(rows)) + + connection.execute(table.delete().where(table.c.user_id == 1)) + + rows = connection.execute(table.select()).fetchall() + print("Row count after deletion:", len(rows)) + + +# [END spanner_sqlalchemy_delete_row] + + +# [START spanner_sqlalchemy_table_exists] +def table_exists(table): + """Check the table exists. + + The table must already exist and can be created using + `create_table.` + """ + result = table.exists() + if result is True: + print("Table exists.") + + +# [END spanner_sqlalchemy_table_exists] + + +# [START spanner_sqlalchemy_fetch_rows] +def fetch_rows(connection, table): + """Fetch all rows from the table. + + The table must already exist and can be created using + `create_table.` + """ + rows = connection.execute(table.select()).fetchall() + + print("Fetched rows: ", rows) + + +# [END spanner_sqlalchemy_fetch_rows] + + +# [START spanner_sqlalchemy_fetch_row] +def fetch_row_with_where_clause(connection, table): + """Fetch row with a WHERE clause. + + The table must already exist and can be created using + `create_table.` + """ + row = list(connection.execute(table.select().where(table.c.user_id == 1))) + + print("Fetched row: ", row) + + +# [END spanner_sqlalchemy_fetch_row] + + +# [START spanner_sqlalchemy_fetch_rows_with_limit_offset] +def fetch_rows_with_limit_offset(connection, table): + """Fetch rows from the table with LIMIT and OFFSET clauses. + + The table must already exist and can be created using + `create_table.` + """ + rows = list(connection.execute(table.select().limit(2).offset(1))) + + print("Fetched rows: ", rows) + + +# [END spanner_sqlalchemy_fetch_rows_with_limit_offset] + + +# [START spanner_sqlalchemy_fetch_rows_with_order_by] +def fetch_rows_with_order_by(connection, table): + """Fetch all rows ordered. + + The table must already exist and can be created using + `create_table.` + """ + rows = list( + connection.execute(table.select().order_by(table.c.user_name)).fetchall() + ) + print("Ordered rows: ", rows) + + +# [END spanner_sqlalchemy_fetch_rows_with_order_by] + + +# [START spanner_sqlalchemy_filter_data_startswith] +def filter_data_startswith(connection, table): + """Filter data with STARTSWITH clause. + + The table must already exist and can be created using + `create_table.` + """ + rows = list( + connection.execute(table.select().where(table.c.user_name.startswith("abcd%"))) + ) + print("Fetched rows: ", rows) + + +# [END spanner_sqlalchemy_filter_data_startswith] + + +# [START spanner_sqlalchemy_get_table_columns] +def get_table_columns(url, table): + """Retrieve the list of columns of the table. + + The table must already exist and can be created using + `create_table.` + """ + engine = create_engine(url) + insp = inspect(engine) + columns = insp.get_columns(table.name) + + print("Fetched columns: ", columns) + + +# [END spanner_sqlalchemy_get_table_columns] + + +# [START spanner_sqlalchemy_get_foreign_key] +def get_table_foreign_key(url, table): + """Retrieve a Foreign Key. + + The table must already exist and can be created using + `create_table.` + """ + engine = create_engine(url) + insp = inspect(engine) + f_keys = insp.get_foreign_keys(table.name) + + if f_keys: + print("Fetched foreign keys: ", f_keys) + + +# [END spanner_sqlalchemy_get_foreign_key] + + +# [START spanner_sqlalchemy_get_indexes] +def get_table_indexes(url, table): + """Retrieve the table indexes. + + The table must already exist and can be created using + `create_table.` + """ + engine = create_engine(url) + insp = inspect(engine) + indexes = insp.get_indexes(table.name) + + if indexes: + print("Fetched indexes: ", indexes) + + +# [END spanner_sqlalchemy_get_indexes] + + +# [START spanner_sqlalchemy_get_primary_key] +def get_table_primary_key(url, table): + """Retrieve the table Primary Key. + + The table must already exist and can be created using + `create_table.` + """ + engine = create_engine(url) + insp = inspect(engine) + p_key = insp.get_pk_constraint(table.name) + + if p_key: + print("Fetched primary key: ", p_key) + + +# [END spanner_sqlalchemy_get_primary_key] + + +# [START spanner_sqlalchemy_insert_row] +def insert_row(connection, table): + """Insert row into the table. + + The table must already exist and can be created using + `create_table.` + """ + connection.execute(table.insert(), {"user_id": 1, "user_name": "ABC"}) + + row = list(connection.execute(table.select())) + + print("Inserted row: ", row) + + +# [END spanner_sqlalchemy_insert_row] + + +# [START spanner_sqlalchemy_update_row] +def update_row(connection, table): + """Update a row in the table. + + The table must already exist and can be created using + `create_table.` + """ + connection.execute( + table.update().where(table.c.user_id == 2).values(user_name="GEH") + ) + row = list(connection.execute(table.select().where(table.c.user_id == 2))) + + print("Updated row: ", row) + + +# [END spanner_sqlalchemy_update_row] diff --git a/packages/sqlalchemy-spanner/samples/snippets_test.py b/packages/sqlalchemy-spanner/samples/snippets_test.py new file mode 100644 index 000000000000..9866d638a3de --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/snippets_test.py @@ -0,0 +1,227 @@ +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bs + +from samples import snippets +from samples.conftest import insert_data +from sqlalchemy import ( + Column, + create_engine, + Index, + Integer, + inspect, + MetaData, + String, + Table, +) + +DATA = [ + {"user_id": 1, "user_name": "abcdefg"}, + {"user_id": 2, "user_name": "ab/cdefg"}, + {"user_id": 3, "user_name": "ab%cdefg"}, + {"user_id": 4, "user_name": "ab_cdefg"}, + {"user_id": 5, "user_name": "abcde/fg"}, + {"user_id": 6, "user_name": "abcde%fg"}, +] + + +def table_obj(database_url, tab_id): + """Helper to produce a `Table` object for the given table id.""" + engine = create_engine(database_url) + metadata = MetaData(bind=engine) + + table = Table( + tab_id, + metadata, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + ) + return table + + +def test_enable_autocommit_mode(capsys, connection, db_url): + snippets.enable_autocommit_mode(connection, db_url) + + out, err = capsys.readouterr() + assert "Connection default mode is SERIALIZABLE" in out + assert "Connection mode is now AUTOCOMMIT" in out + + +def test_create_table(capsys, db_url, table_id): + snippets.create_table(db_url, table_id) + + out, err = capsys.readouterr() + assert "Table {} successfully created".format(table_id) in out + + table = table_obj(db_url, table_id) + assert table.exists() is True + table.drop() + + +def test_drop_table(capsys, db_url, table_id): + table = table_obj(db_url, table_id) + table.create() + + snippets.drop_table(table) + + out, err = capsys.readouterr() + assert "Table {} successfully dropped".format(table_id) in out + assert table.exists() is False + + +def test_get_table_names(capsys, db_url, table): + snippets.get_table_names(db_url) + + out, err = capsys.readouterr() + assert "Retrieved table names:" in out + assert table.name in out + + +def test_table_create_unique_index(capsys, db_url, table): + snippets.create_unique_index(table) + + engine = create_engine(db_url) + insp = inspect(engine) + indexes = insp.get_indexes(table.name) + + out, err = capsys.readouterr() + + assert "Index created" in out + assert indexes[0]["unique"] is True + + +def test_table_delete_all_rows(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.delete_all_rows(connection, table) + + out, err = capsys.readouterr() + assert "Row count: 6" in out + assert "Row count after deletion: 0" in out + + +def test_table_delete_row_with_where_clause(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.delete_row_with_where_clause(connection, table) + + out, err = capsys.readouterr() + assert "Row count: 6" in out + assert "Row count after deletion: 5" in out + + +def test_exists_table(capsys, table): + snippets.table_exists(table) + + out, err = capsys.readouterr() + assert "Table exists" in out + + +def test_table_fetch_rows(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.fetch_rows(connection, table) + + out, err = capsys.readouterr() + assert "Fetched rows:" in out + + for row in DATA: # check that all rows were fetched + assert str(tuple(row.values())) in out + + +def test_table_fetch_row_with_where_clause(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.fetch_row_with_where_clause(connection, table) + + out, err = capsys.readouterr() + assert str(tuple(DATA[0].values())) in out + + +def test_table_fetch_rows_with_limit_offset(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.fetch_rows_with_limit_offset(connection, table) + + out, err = capsys.readouterr() + assert "Fetched rows:" in out + assert str(tuple(DATA[1].values())) in out + assert str(tuple(DATA[2].values())) in out + + +def test_table_fetch_rows_with_order_by(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.fetch_rows_with_order_by(connection, table) + + out, err = capsys.readouterr() + assert "Ordered rows:" in out + + rows = [] + for row in sorted(DATA, key=lambda r: r["user_name"]): + rows.append(tuple(row.values())) + + assert str(rows) in out + + +def test_table_filter_data_startswith(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.filter_data_startswith(connection, table) + + out, err = capsys.readouterr() + assert "Fetched rows:" in out + + rows = [] + for ind in (0, 4, 5): + rows.append(tuple(DATA[ind].values())) + + assert str(rows) in out + + +def test_table_get_columns(capsys, db_url, table): + snippets.get_table_columns(db_url, table) + out, err = capsys.readouterr() + assert "Fetched columns:" in out + + for col in table.columns: + assert col.name in out + + +def test_table_get_foreign_key(capsys, db_url, table_w_foreign_key): + snippets.get_table_foreign_key(db_url, table_w_foreign_key) + out, err = capsys.readouterr() + + assert "Fetched foreign keys:" in out + + +def test_table_get_indexes(capsys, db_url, table): + index = Index("some_index", table.c.user_name, unique=True) + index.create() + + snippets.get_table_indexes(db_url, table) + out, err = capsys.readouterr() + + assert "Fetched indexes:" in out + + +def test_table_get_primary_key(capsys, db_url, table): + snippets.get_table_primary_key(db_url, table) + out, err = capsys.readouterr() + assert "Fetched primary key:" in out + + +def test_table_insert_row(capsys, connection, table): + snippets.insert_row(connection, table) + + out, err = capsys.readouterr() + assert "Inserted row:" in out + + rows = list(connection.execute(table.select())) + assert len(rows) == 1 + + +def test_table_update_row(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.update_row(connection, table) + + out, err = capsys.readouterr() + assert "Updated row:" in out + + rows = list(connection.execute(table.select())) + rows[0][1] == "GEH" diff --git a/packages/sqlalchemy-spanner/samples/stale_read_sample.py b/packages/sqlalchemy-spanner/samples/stale_read_sample.py new file mode 100644 index 000000000000..989a0c13ca0e --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/stale_read_sample.py @@ -0,0 +1,96 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +from sqlalchemy import create_engine, Engine, select, text +from sqlalchemy.orm import Session +from sample_helper import run_sample +from model import Singer + + +# Shows how to execute stale reads on Spanner using SQLAlchemy. +def stale_read_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + # First get the current database timestamp. We can use this timestamp to + # query the database at a point in time where we know it was empty. + with Session(engine.execution_options(isolation_level="AUTOCOMMIT")) as session: + timestamp = session.execute(select(text("current_timestamp"))).one()[0] + print(timestamp) + + # Insert a few test rows. + insert_test_data(engine) + + # Create a session that uses a read-only transaction with a strong timestamp + # bound. This means that it will read all data that has been committed at the + # time this transaction starts. + # Read-only transactions do not take locks, and are therefore preferred + # above read/write transactions for workloads that only read data on Spanner. + with Session(engine.execution_options(read_only=True)) as session: + print("Found singers with strong timestamp bound:") + singers = session.query(Singer).order_by(Singer.last_name).all() + for singer in singers: + print("Singer: ", singer.full_name) + + # Create a session that uses a read-only transaction that selects data in + # the past. We'll use the timestamp that we retrieved before inserting the + # test data for this transaction. + with Session( + engine.execution_options( + read_only=True, staleness={"read_timestamp": timestamp} + ) + ) as session: + print("Searching for singers using a read timestamp in the past:") + singers = session.query(Singer).order_by(Singer.last_name).all() + if singers: + for singer in singers: + print("Singer: ", singer.full_name) + else: + print("No singers found.") + + # Spanner also supports min_read_timestamp and max_staleness as staleness + # options. These can only be used in auto-commit mode. + # Spanner will choose a read timestamp that satisfies the given restriction + # and that can be served as efficiently as possible. + with Session( + engine.execution_options( + isolation_level="AUTOCOMMIT", staleness={"max_staleness": {"seconds": 15}} + ) + ) as session: + print("Searching for singers using a max staleness of 15 seconds:") + singers = session.query(Singer).order_by(Singer.last_name).all() + if singers: + for singer in singers: + print("Singer: ", singer.full_name) + else: + print("No singers found.") + + +def insert_test_data(engine: Engine): + with Session(engine) as session: + session.add_all( + [ + Singer(id=str(uuid.uuid4()), first_name="John", last_name="Doe"), + Singer(id=str(uuid.uuid4()), first_name="Jane", last_name="Doe"), + ] + ) + session.commit() + + +if __name__ == "__main__": + run_sample(stale_read_sample) diff --git a/packages/sqlalchemy-spanner/samples/tags_sample.py b/packages/sqlalchemy-spanner/samples/tags_sample.py new file mode 100644 index 000000000000..a75bddd53eb4 --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/tags_sample.py @@ -0,0 +1,58 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Singer + + +# Shows how to transaction tags and statement tags with Spanner and SQLAlchemy. +def tags_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + # Set a transaction_tag in the execution options for the session to set + # a transaction tag. + with Session( + engine.execution_options(transaction_tag="my_transaction_tag") + ) as session: + # The transaction that is automatically started by SQLAlchemy will use the + # transaction tag that is specified in the execution options. + + # Execute a query with a request tag. + singer_id = str(uuid.uuid4()) + singer = session.get( + Singer, singer_id, execution_options={"request_tag": "my_tag_1"} + ) + + # Add the singer if it was not found. + if singer is None: + # The session.Add(..) function does not support execution_options, but we can + # set the execution_options on the connection of this session. This will be + # propagated to the next statement that is executed on the connection. + session.connection().execution_options(request_tag="insert_singer") + singer = Singer(id=singer_id, first_name="John", last_name="Doe") + session.add(singer) + session.commit() + + +if __name__ == "__main__": + run_sample(tags_sample) diff --git a/packages/sqlalchemy-spanner/samples/transaction_sample.py b/packages/sqlalchemy-spanner/samples/transaction_sample.py new file mode 100644 index 000000000000..b1f42ede86db --- /dev/null +++ b/packages/sqlalchemy-spanner/samples/transaction_sample.py @@ -0,0 +1,82 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import uuid + +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + +from sample_helper import run_sample +from model import Singer, Concert, Venue + + +# Shows how to execute a read/write transaction on Spanner using SQLAlchemy. +def transaction_sample(): + engine = create_engine( + "spanner:///projects/sample-project/" + "instances/sample-instance/" + "databases/sample-database", + echo=True, + ) + with Session(engine) as session: + # SQLAlchemy by default automatically starts a transaction the first + # time the database is accessed. + singer_id = str(uuid.uuid4()) + singer = Singer(id=singer_id, first_name="John", last_name="Doe") + session.add(singer) + # Flush the session. This pushes all changes in this session to the + # database without committing the current transaction. + session.flush([singer]) + + # Verify that we can read this singer from the database during the + # same transaction. The Singer model has a `full_name` property that is + # generated by the database. This will be read from the database when + # we call refresh. + session.refresh(singer) + print( + f"The database generated a full name for the singer: " f"{singer.full_name}" + ) + + # We cannot read the singer in a different session, as the current + # transaction has not yet committed. + # We use AUTOCOMMIT for the additional session, as we do not need a + # read/write transaction for just trying to read one row. + with Session( + engine.execution_options(isolation_level="AUTOCOMMIT") + ) as session2: + # singer2 will be None, as the row will not be found. + singer2 = session2.get(Singer, singer_id) + print( + f"Fetching singer in a different transaction before the " + f"transaction was committed: {singer2}" + ) + + session.commit() + + # Now that the original transaction has committed, we can read the + # singer in a different session. + with Session( + engine.execution_options(isolation_level="AUTOCOMMIT") + ) as session2: + # singer2 will now return the actual row. + singer2 = session2.get(Singer, singer_id) + print( + f"Fetching singer in a different transaction after the " + f"transaction was committed: {singer2.full_name}" + ) + + +if __name__ == "__main__": + run_sample(transaction_sample) diff --git a/packages/sqlalchemy-spanner/setup.cfg b/packages/sqlalchemy-spanner/setup.cfg new file mode 100644 index 000000000000..212e7545a10b --- /dev/null +++ b/packages/sqlalchemy-spanner/setup.cfg @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[tool:pytest] +addopts= --tb native -v -r fxX --maxfail=25 -p no:warnings +python_classes=*Test + +[sqla_testing] +requirement_cls=google.cloud.sqlalchemy_spanner.requirements:Requirements +profile_file=test/profiles.txt + +[db] +default=sqlite:///:memory: diff --git a/packages/sqlalchemy-spanner/setup.py b/packages/sqlalchemy-spanner/setup.py new file mode 100644 index 000000000000..9bf2183e2982 --- /dev/null +++ b/packages/sqlalchemy-spanner/setup.py @@ -0,0 +1,87 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import os + +import setuptools + +# Package metadata. + +name = "sqlalchemy-spanner" +description = "SQLAlchemy dialect integrated into Cloud Spanner database" +dependencies = [ + "sqlalchemy>=1.1.13", + "google-cloud-spanner>=3.55.0", + "alembic", +] +extras = { + "tracing": [ + "opentelemetry-api >= 1.1.0", + "opentelemetry-sdk >= 1.1.0", + "opentelemetry-instrumentation >= 0.20b0", + ] +} + +BASE_DIR = os.path.dirname(__file__) +VERSION_FILENAME = os.path.join(BASE_DIR, "google/cloud/sqlalchemy_spanner/version.py") +PACKAGE_INFO = {} +with open(VERSION_FILENAME) as f: + exec(f.read(), PACKAGE_INFO) +version = PACKAGE_INFO["__version__"] + +package_root = os.path.abspath(os.path.dirname(__file__)) +readme_filename = os.path.join(package_root, "README.rst") +with io.open(readme_filename, encoding="utf-8") as readme_file: + readme = readme_file.read() + +# Only include packages under the 'google' namespace. Do not include tests, +# benchmarks, etc. +packages = [ + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") +] + +setuptools.setup( + author="Google LLC", + author_email="googleapis-packages@google.com", + license="Apache 2.0", + classifiers=[ + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + ], + description=description, + long_description=readme, + entry_points={ + "sqlalchemy.dialects": [ + "spanner.spanner = google.cloud.sqlalchemy_spanner:SpannerDialect" + ] + }, + install_requires=dependencies, + extras_require=extras, + name=name, + packages=packages, + url="https://github.com/cloudspannerecosystem/python-spanner-sqlalchemy", + version=version, + include_package_data=True, + zip_safe=False, +) diff --git a/packages/sqlalchemy-spanner/test/__init__.py b/packages/sqlalchemy-spanner/test/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/sqlalchemy-spanner/test/_helpers.py b/packages/sqlalchemy-spanner/test/_helpers.py new file mode 100644 index 000000000000..3dd57fd5d77d --- /dev/null +++ b/packages/sqlalchemy-spanner/test/_helpers.py @@ -0,0 +1,105 @@ +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + + +import configparser +import mock +import os +from sqlalchemy.testing import fixtures + +try: + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.trace.export import SimpleSpanProcessor + from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter, + ) + from opentelemetry.trace.status import StatusCode + + trace.set_tracer_provider(TracerProvider()) + + HAS_OPENTELEMETRY_INSTALLED = True +except ImportError: + HAS_OPENTELEMETRY_INSTALLED = False + + StatusCode = mock.Mock() + +_TEST_OT_EXPORTER = None +_TEST_OT_PROVIDER_INITIALIZED = False + + +PROJECT = os.getenv( + "GOOGLE_CLOUD_PROJECT", + os.getenv("PROJECT_ID", "emulator-test-project"), +) +DB_URL = ( + f"spanner:///projects/{PROJECT}/instances/" + "sqlalchemy-dialect-test/databases/compliance-test" +) + + +def get_db_url(): + config = configparser.ConfigParser() + if os.path.exists("test.cfg"): + config.read("test.cfg") + else: + config.read("setup.cfg") + return config.get("db", "default", fallback=DB_URL) + + +def get_test_ot_exporter(): + global _TEST_OT_EXPORTER + + if _TEST_OT_EXPORTER is None: + _TEST_OT_EXPORTER = InMemorySpanExporter() + return _TEST_OT_EXPORTER + + +def use_test_ot_exporter(): + global _TEST_OT_PROVIDER_INITIALIZED + + if _TEST_OT_PROVIDER_INITIALIZED: + return + + provider = trace.get_tracer_provider() + if not hasattr(provider, "add_span_processor"): + return + provider.add_span_processor(SimpleSpanProcessor(get_test_ot_exporter())) + _TEST_OT_PROVIDER_INITIALIZED = True + + +def get_project(): + return PROJECT + + +class OpenTelemetryBase(fixtures.TestBase): + @classmethod + def setup_class(cls): + if HAS_OPENTELEMETRY_INSTALLED: + use_test_ot_exporter() + cls.ot_exporter = get_test_ot_exporter() + + def teardown_method(self): + if HAS_OPENTELEMETRY_INSTALLED: + self.ot_exporter.clear() + + def assertNoSpans(self): + if HAS_OPENTELEMETRY_INSTALLED: + span_list = self.ot_exporter.get_finished_spans() + self.assertEqual(len(span_list), 0) + + def assertSpanAttributes( + self, name, status=StatusCode.OK, attributes=None, span=None + ): + if HAS_OPENTELEMETRY_INSTALLED: + if not span: + span_list = self.ot_exporter.get_finished_spans() + self.assertEqual(len(span_list), 1) + span = span_list[0] + + self.assertEqual(span.name, name) + self.assertEqual(span.status.status_code, status) + self.assertEqual(dict(span.attributes), attributes) diff --git a/packages/sqlalchemy-spanner/test/benchmark.py b/packages/sqlalchemy-spanner/test/benchmark.py new file mode 100644 index 000000000000..0260ffc0592c --- /dev/null +++ b/packages/sqlalchemy-spanner/test/benchmark.py @@ -0,0 +1,387 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A test suite to check Spanner dialect for SQLAlchemy performance +in comparison with the original Spanner client. +""" +import base64 +import datetime +import random +from scipy.stats import sem +import statistics +import time + +from google.api_core.exceptions import Aborted +from google.api_core.exceptions import NotFound +from google.cloud import spanner +from google.cloud import spanner_dbapi +from google.cloud.spanner_v1 import Client, KeySet +from sqlalchemy import ( + create_engine, + insert, + select, + text, + MetaData, + Table, +) + +PROJECT = "project-id" +INSTANCE = "instance-id" +DATABASE = "database-id" + + +def measure_execution_time(function): + """Decorator to measure a wrapped method execution time.""" + + def wrapper(self, measures): + """Execute the wrapped method and measure its execution time. + + Args: + measures (dict): Test cases and their execution time. + """ + t_start = time.time() + try: + function(self) + measures[function.__name__] = round(time.time() - t_start, 2) + except Aborted: + measures[function.__name__] = 0 + + return wrapper + + +class BenchmarkTestBase: + """Base class for performance testing. + + Organizes testing data preparation and cleanup. + """ + + _many_rows_ids = [] + _many_rows2_ids = [] + + def __init__(self): + self._cleanup() + self._create_table() + + self._one_row = { + "id": 1, + "first_name": "Pete", + "last_name": "Allison", + "birth_date": datetime.date(1998, 10, 6), + "picture": b"123", + } + self.keys = set([1]) + if not self._many_rows_ids: + for i in range(99): + self._many_rows_ids.append(self._generate_id()) + self._many_rows2_ids.append(self._generate_id()) + + def _cleanup(self): + """Drop the test table.""" + conn = spanner_dbapi.connect(INSTANCE, DATABASE) + try: + conn.database.update_ddl(["DROP TABLE Singers"]) + except NotFound: + pass + conn.close() + + def _create_table(self): + """Create a table for performace testing.""" + conn = spanner_dbapi.connect(INSTANCE, DATABASE) + try: + conn.database.update_ddl(["DROP TABLE Singers"]) + except NotFound: + pass + conn.database.update_ddl( + [ + """ +CREATE TABLE Singers ( + id INT64, + first_name STRING(1024), + last_name STRING(1024), + birth_date DATE, + picture BYTES(1024), +) PRIMARY KEY (id) + """ + ] + ).result() + + conn.close() + + def _generate_id(self): + num = 1 + while num in self.keys: + num = round(random.random() * 1000000) + self.keys.add(num) + return num + + def run(self): + """Execute every test case.""" + measures = {} + for method in ( + self.insert_one_row_with_fetch_after, + self.read_one_row, + self.insert_many_rows, + self.select_many_rows, + self.insert_many_rows_with_mutations, + ): + method(measures) + + self._cleanup() + return measures + + +class SpannerBenchmarkTest(BenchmarkTestBase): + """The original Spanner performance testing class.""" + + def __init__(self): + super().__init__() + self._client = Client() + self._instance = self._client.instance(INSTANCE) + self._database = self._instance.database(DATABASE) + + self._many_rows = [] + self._many_rows2 = [] + birth_date = datetime.date(1998, 10, 6) + picture = base64.b64encode("123".encode()) + for num in self._many_rows_ids: + self._many_rows.append( + { + "id": num, + "first_name": "Pete", + "last_name": "Allison", + "birth_date": birth_date, + "picture": picture, + } + ) + for num in self._many_rows2_ids: + self._many_rows2.append((num, "Pete", "Allison", birth_date, picture)) + + # initiate a session + with self._database.snapshot(): + pass + + @measure_execution_time + def insert_one_row_with_fetch_after(self): + self._database.run_in_transaction(insert_one_row, self._one_row) + + @measure_execution_time + def insert_many_rows(self): + self._database.run_in_transaction(insert_many_rows, self._many_rows) + + @measure_execution_time + def insert_many_rows_with_mutations(self): + with self._database.batch() as batch: + batch.insert( + table="Singers", + columns=("id", "first_name", "last_name", "birth_date", "picture"), + values=self._many_rows2, + ) + + @measure_execution_time + def read_one_row(self): + with self._database.snapshot() as snapshot: + keyset = KeySet(all_=True) + snapshot.read( + table="Singers", + columns=("id", "first_name", "last_name", "birth_date", "picture"), + keyset=keyset, + ).one() + + @measure_execution_time + def select_many_rows(self): + with self._database.snapshot() as snapshot: + rows = list( + snapshot.execute_sql("SELECT * FROM Singers ORDER BY last_name") + ) + if len(rows) != 100: + raise ValueError("Wrong number of rows read") + + +class SQLAlchemyBenchmarkTest(BenchmarkTestBase): + """Spanner dialect for SQLAlchemy performance testing class.""" + + def __init__(self): + super().__init__() + self._engine = create_engine( + "spanner:///projects/{project}/instances/{instance}/databases/{db}".format( + project=PROJECT, + instance=INSTANCE, + db=DATABASE, + ) + ) + metadata = MetaData(bind=self._engine) + self._table = Table("Singers", metadata, autoload=True) + + self._conn = self._engine.connect() + + self._many_rows = [] + self._many_rows2 = [] + birth_date = datetime.date(1998, 10, 6) + for num in self._many_rows_ids: + self._many_rows.append( + { + "id": num, + "first_name": "Pete", + "last_name": "Allison", + "birth_date": birth_date, + "picture": b"123", + } + ) + for num in self._many_rows2_ids: + self._many_rows2.append( + { + "id": num, + "first_name": "Pete", + "last_name": "Allison", + "birth_date": birth_date, + "picture": b"123", + } + ) + + @measure_execution_time + def insert_one_row_with_fetch_after(self): + self._conn.execute(insert(self._table).values(self._one_row)) + last_name = self._conn.execute( + select([text("last_name")], from_obj=self._table) + ).fetchone()[0] + if last_name != "Allison": + raise ValueError("Received invalid last name: " + last_name) + + @measure_execution_time + def insert_many_rows(self): + self._conn.execute( + self._table.insert(), + self._many_rows, + ) + + @measure_execution_time + def insert_many_rows_with_mutations(self): + self._conn.execute( + self._table.insert(), + self._many_rows2, + ) + + @measure_execution_time + def read_one_row(self): + row = self._conn.execute(select(["*"], from_obj=self._table)).fetchone() + if not row: + raise ValueError("No rows read") + + @measure_execution_time + def select_many_rows(self): + rows = self._conn.execute(select(["*"], from_obj=self._table)).fetchall() + if len(rows) != 100: + raise ValueError("Wrong number of rows read") + + +def insert_one_row(transaction, one_row): + """A transaction-function for the original Spanner client. + + Inserts a single row into a database and then fetches it back. + """ + transaction.execute_update( + "INSERT INTO `Singers` (id, first_name, last_name, birth_date, picture)" + " VALUES (@id, @first_name, @last_name, @birth_date, @picture)", + params=one_row, + param_types={ + "id": spanner.param_types.INT64, + "first_name": spanner.param_types.STRING, + "last_name": spanner.param_types.STRING, + "birth_date": spanner.param_types.DATE, + "picture": spanner.param_types.BYTES, + }, + ) + last_name = transaction.execute_sql( + "SELECT last_name FROM Singers WHERE id=1" + ).one()[0] + if last_name != "Allison": + raise ValueError("Received invalid last name: " + last_name) + + +def insert_many_rows(transaction, many_rows): + """A transaction-function for the original Spanner client. + + Insert 100 rows into a database. + """ + statements = [] + for row in many_rows: + statements.append( + ( + "INSERT INTO `Singers` (id, first_name, last_name, birth_date, picture)" + " VALUES (@id, @first_name, @last_name, @birth_date, @picture)", + row, + { + "id": spanner.param_types.INT64, + "first_name": spanner.param_types.STRING, + "last_name": spanner.param_types.STRING, + "birth_date": spanner.param_types.DATE, + "picture": spanner.param_types.BYTES, + }, + ) + ) + _, count = transaction.batch_update(statements) + if sum(count) != 99: + raise ValueError("Wrong number of inserts: " + str(sum(count))) + + +def compare_measurements(spanner, alchemy): + """ + Compare the original Spanner client performance measures + with Spanner dialect for SQLAlchemy ones. + """ + comparison = {} + for key in spanner.keys(): + comparison[key] = { + "Spanner, sec": spanner[key], + "SQLAlchemy, sec": alchemy[key], + "SQLAlchemy deviation": round(alchemy[key] - spanner[key], 2), + "SQLAlchemy to Spanner, %": round(alchemy[key] / spanner[key] * 100), + } + return comparison + + +measures = [] +for _ in range(50): + spanner_measures = SpannerBenchmarkTest().run() + alchemy_measures = SQLAlchemyBenchmarkTest().run() + measures.append((spanner_measures, alchemy_measures)) + +agg = {"spanner": {}, "alchemy": {}} + +for span, alch in measures: + for key, value in span.items(): + agg["spanner"].setdefault(key, []).append(value) + agg["alchemy"].setdefault(key, []).append(alch[key]) + +spanner_stats = {} +for key, value in agg["spanner"].items(): + while 0 in value: + value.remove(0) + spanner_stats[key + "_aver"] = round(statistics.mean(value), 2) + spanner_stats[key + "_error"] = round(sem(value), 2) + spanner_stats[key + "_std_dev"] = round(statistics.pstdev(value), 2) + +alchemy_stats = {} +for key, value in agg["alchemy"].items(): + while 0 in value: + value.remove(0) + alchemy_stats[key + "_aver"] = round(statistics.mean(value), 2) + alchemy_stats[key + "_error"] = round(sem(value), 2) + alchemy_stats[key + "_std_dev"] = round(statistics.pstdev(value), 2) + +for key in spanner_stats: + print(key + ":") + print("spanner: ", spanner_stats[key]) + print("alchemy: ", alchemy_stats[key]) diff --git a/packages/sqlalchemy-spanner/test/conftest.py b/packages/sqlalchemy-spanner/test/conftest.py new file mode 100644 index 000000000000..744995c9714d --- /dev/null +++ b/packages/sqlalchemy-spanner/test/conftest.py @@ -0,0 +1,117 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from contextlib import contextmanager +import importlib +import google.cloud.spanner_v1._opentelemetry_tracing as spanner_tracing +from unittest.mock import MagicMock +from sqlalchemy.dialects import registry +from sqlalchemy.testing.schema import Column +from sqlalchemy.testing.schema import Table +from sqlalchemy.sql.elements import literal + + +# Aggressively monkeypatch trace_call to avoid OpenTelemetry usage entirely. +# This prevents warnings from OpenTelemetry, which would otherwise cause the +# conformance tests to fail. +@contextmanager +def no_op_trace_call(*args, **kwargs): + yield MagicMock() + + +# Patch the definition module +spanner_tracing.trace_call = no_op_trace_call + +# Patch consumers +modules_to_patch = [ + "google.cloud.spanner_v1.snapshot", + "google.cloud.spanner_v1.transaction", + "google.cloud.spanner_v1.session", + "google.cloud.spanner_v1.database", +] +for module_name in modules_to_patch: + try: + module = importlib.import_module(module_name) + module.trace_call = no_op_trace_call + except ImportError: + pass + +registry.register("spanner", "google.cloud.sqlalchemy_spanner", "SpannerDialect") + +pytest.register_assert_rewrite("sqlalchemy.testing.assertions") + +from sqlalchemy.testing.plugin.pytestplugin import * # noqa: E402, F401, F403 + + +@pytest.fixture +def literal_round_trip_spanner(metadata, connection): + # for literal, we test the literal render in an INSERT + # into a typed column. we can then SELECT it back as its + # official type; + + def run( + type_, + input_, + output, + filter_=None, + compare=None, + support_whereclause=True, + ): + t = Table("t", metadata, Column("x", type_)) + t.create(connection) + + for value in input_: + ins = t.insert().values(x=literal(value, type_, literal_execute=True)) + connection.execute(ins) + + if support_whereclause: + if compare: + stmt = t.select().where( + t.c.x + == literal( + compare, + type_, + literal_execute=True, + ), + t.c.x + == literal( + input_[0], + type_, + literal_execute=True, + ), + ) + else: + stmt = t.select().where( + t.c.x + == literal( + compare if compare is not None else input_[0], + type_, + literal_execute=True, + ) + ) + else: + stmt = t.select() + + rows = connection.execute(stmt).all() + assert rows, "No rows returned" + for row in rows: + value = row[0] + if filter_ is not None: + value = filter_(value) + assert value in output + + return run diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/__init__.py b/packages/sqlalchemy-spanner/test/mockserver_tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/auto_increment_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/auto_increment_model.py new file mode 100644 index 000000000000..eb67ab8996e3 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/auto_increment_model.py @@ -0,0 +1,28 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import String +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column + + +class Base(DeclarativeBase): + pass + + +class Singer(Base): + __tablename__ = "singers" + id: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] = mapped_column(String) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/bit_reversed_sequence_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/bit_reversed_sequence_model.py new file mode 100644 index 000000000000..b76cdd3f955f --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/bit_reversed_sequence_model.py @@ -0,0 +1,33 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import String, BigInteger, Sequence, TextClause +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column + + +class Base(DeclarativeBase): + pass + + +class Singer(Base): + __tablename__ = "singers" + id: Mapped[int] = mapped_column( + BigInteger, + Sequence("singer_id"), + server_default=TextClause("GET_NEXT_SEQUENCE_VALUE(SEQUENCE singer_id)"), + primary_key=True, + ) + name: Mapped[str] = mapped_column(String) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/commit_timestamp_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/commit_timestamp_model.py new file mode 100644 index 000000000000..28c58b8668d3 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/commit_timestamp_model.py @@ -0,0 +1,32 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column + + +class Base(DeclarativeBase): + pass + + +class Singer(Base): + __tablename__ = "singers" + id: Mapped[str] = mapped_column(primary_key=True) + name: Mapped[str] + updated_at: Mapped[datetime.datetime] = mapped_column( + spanner_allow_commit_timestamp=True + ) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/default_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/default_model.py new file mode 100644 index 000000000000..6a363c572598 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/default_model.py @@ -0,0 +1,30 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import func +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column + + +class Base(DeclarativeBase): + pass + + +class Singer(Base): + __tablename__ = "singers" + id: Mapped[str] = mapped_column( + server_default=func.GENERATE_UUID(), primary_key=True + ) + name: Mapped[str] diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/float32_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/float32_model.py new file mode 100644 index 000000000000..b6987e97a64e --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/float32_model.py @@ -0,0 +1,30 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import String +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column +from sqlalchemy.types import REAL + + +class Base(DeclarativeBase): + pass + + +class Number(Base): + __tablename__ = "numbers" + number: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] = mapped_column(String(30)) + ln: Mapped[float] = mapped_column(REAL) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/insertmany_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/insertmany_model.py new file mode 100644 index 000000000000..a196e142db16 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/insertmany_model.py @@ -0,0 +1,48 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import datetime +import uuid +from sqlalchemy import text, String +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column + + +class Base(DeclarativeBase): + pass + + +class SingerUUID(Base): + __tablename__ = "singers_uuid" + id: Mapped[str] = mapped_column( + String(36), + primary_key=True, + server_default=text("GENERATE_UUID()"), + default=lambda: str(uuid.uuid4()), + insert_sentinel=True, + ) + name: Mapped[str] + inserted_at: Mapped[datetime] = mapped_column( + server_default=text("CURRENT_TIMESTAMP()") + ) + + +class SingerIntID(Base): + __tablename__ = "singers_int_id" + id: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] = mapped_column(String) + inserted_at: Mapped[datetime] = mapped_column( + server_default=text("CURRENT_TIMESTAMP()") + ) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/interleaved_index.py b/packages/sqlalchemy-spanner/test/mockserver_tests/interleaved_index.py new file mode 100644 index 000000000000..7e59b8e1008d --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/interleaved_index.py @@ -0,0 +1,74 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import ForeignKey, Index, String +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column + + +class Base(DeclarativeBase): + pass + + +class Singer(Base): + __tablename__ = "singers" + + singer_id: Mapped[str] = mapped_column(String(36), primary_key=True) + first_name: Mapped[str] + last_name: Mapped[str] + + +class Album(Base): + __tablename__ = "albums" + __table_args__ = { + "spanner_interleave_in": "singers", + "spanner_interleave_on_delete_cascade": True, + } + + singer_id: Mapped[str] = mapped_column( + ForeignKey("singers.singer_id"), primary_key=True + ) + album_id: Mapped[str] = mapped_column(String(36), primary_key=True) + album_title: Mapped[str] + + +class Track(Base): + __tablename__ = "tracks" + __table_args__ = ( + Index( + "idx_name", + "singer_id", + "album_id", + "song_name", + spanner_interleave_in="albums", + ), + { + "spanner_interleave_in": "albums", + "spanner_interleave_on_delete_cascade": True, + }, + ) + + singer_id: Mapped[str] = mapped_column( + ForeignKey("singers.singer_id"), primary_key=True + ) + album_id: Mapped[str] = mapped_column( + ForeignKey("albums.album_id"), primary_key=True + ) + track_id: Mapped[str] = mapped_column(String(36), primary_key=True) + song_name: Mapped[str] + + +Album.__table__.add_is_dependent_on(Singer.__table__) +Track.__table__.add_is_dependent_on(Album.__table__) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/isolation_level_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/isolation_level_model.py new file mode 100644 index 000000000000..9965dbf03dfa --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/isolation_level_model.py @@ -0,0 +1,28 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import String, BigInteger +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column + + +class Base(DeclarativeBase): + pass + + +class Singer(Base): + __tablename__ = "singers" + id: Mapped[int] = mapped_column(BigInteger, primary_key=True) + name: Mapped[str] = mapped_column(String) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/json_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/json_model.py new file mode 100644 index 000000000000..7a8ca5303141 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/json_model.py @@ -0,0 +1,30 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import String, BigInteger +from sqlalchemy.sql.sqltypes import JSON +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column + + +class Base(DeclarativeBase): + pass + + +class Venue(Base): + __tablename__ = "venues" + id: Mapped[int] = mapped_column(BigInteger, primary_key=True) + name: Mapped[str] = mapped_column(String) + description = mapped_column(JSON) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/mock_database_admin.py b/packages/sqlalchemy-spanner/test/mockserver_tests/mock_database_admin.py new file mode 100644 index 000000000000..acd739c8de51 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/mock_database_admin.py @@ -0,0 +1,38 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.protobuf import empty_pb2 +import test.mockserver_tests.spanner_database_admin_pb2_grpc as database_admin_grpc +from google.longrunning import operations_pb2 as operations_pb2 + + +# An in-memory mock DatabaseAdmin server that can be used for testing. +class DatabaseAdminServicer(database_admin_grpc.DatabaseAdminServicer): + def __init__(self): + self._requests = [] + + @property + def requests(self): + return self._requests + + def clear_requests(self): + self._requests = [] + + def UpdateDatabaseDdl(self, request, context): + self._requests.append(request) + operation = operations_pb2.Operation() + operation.done = True + operation.name = "projects/test-project/operations/test-operation" + operation.response.Pack(empty_pb2.Empty()) + return operation diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/mock_server_test_base.py b/packages/sqlalchemy-spanner/test/mockserver_tests/mock_server_test_base.py new file mode 100644 index 000000000000..6cdf97337e58 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/mock_server_test_base.py @@ -0,0 +1,193 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from google.cloud.spanner_dbapi.parsed_statement import AutocommitDmlMode +from sqlalchemy import Engine, create_engine +from sqlalchemy.testing.plugin.plugin_base import fixtures +import google.cloud.spanner_v1.types.type as spanner_type +import google.cloud.spanner_v1.types.result_set as result_set +from google.api_core.client_options import ClientOptions +from google.auth.credentials import AnonymousCredentials +from google.cloud.spanner_v1 import ( + Client, + ResultSet, + TypeCode, +) +from google.cloud.spanner_v1.database import Database +from google.cloud.spanner_v1.instance import Instance +import grpc + +# TODO: Replace this with the mock server in the Spanner client lib +from test.mockserver_tests.mock_spanner import SpannerServicer, start_mock_server +from test.mockserver_tests.mock_database_admin import DatabaseAdminServicer + + +def add_result(sql: str, result: ResultSet): + MockServerTestBase.spanner_service.mock_spanner.add_result(sql, result) + + +def add_update_count( + sql: str, count: int, dml_mode: AutocommitDmlMode = AutocommitDmlMode.TRANSACTIONAL +): + if dml_mode == AutocommitDmlMode.PARTITIONED_NON_ATOMIC: + stats = dict(row_count_lower_bound=count) + else: + stats = dict(row_count_exact=count) + result = result_set.ResultSet(dict(stats=result_set.ResultSetStats(stats))) + add_result(sql, result) + + +def add_select1_result(): + add_single_result("select 1", "c", TypeCode.INT64, [("1",)]) + + +def add_single_result( + sql: str, column_name: str, type_code: spanner_type.TypeCode, row +): + result = result_set.ResultSet( + dict( + metadata=result_set.ResultSetMetadata( + dict( + row_type=spanner_type.StructType( + dict( + fields=[ + spanner_type.StructType.Field( + dict( + name=column_name, + type=spanner_type.Type(dict(code=type_code)), + ) + ) + ] + ) + ) + ) + ), + ) + ) + result.rows.extend(row) + MockServerTestBase.spanner_service.mock_spanner.add_result(sql, result) + + +def add_singer_query_result(sql: str): + result = result_set.ResultSet( + dict( + metadata=result_set.ResultSetMetadata( + dict( + row_type=spanner_type.StructType( + dict( + fields=[ + spanner_type.StructType.Field( + dict( + name="singers_id", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.INT64) + ), + ) + ), + spanner_type.StructType.Field( + dict( + name="singers_name", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.STRING) + ), + ) + ), + ] + ) + ) + ) + ), + ) + ) + result.rows.extend( + [ + ( + "1", + "Jane Doe", + ), + ( + "2", + "John Doe", + ), + ] + ) + add_result(sql, result) + + +class MockServerTestBase(fixtures.TestBase): + server: grpc.Server = None + spanner_service: SpannerServicer = None + database_admin_service: DatabaseAdminServicer = None + port: int = None + logger: logging.Logger = None + + @classmethod + def setup_class(cls): + MockServerTestBase.logger = logging.getLogger("level warning") + MockServerTestBase.logger.setLevel(logging.WARN) + ( + MockServerTestBase.server, + MockServerTestBase.spanner_service, + MockServerTestBase.database_admin_service, + MockServerTestBase.port, + ) = start_mock_server() + + @classmethod + def teardown_class(cls): + if MockServerTestBase.server is not None: + MockServerTestBase.server.stop(grace=None) + MockServerTestBase.server = None + + def setup_method(self): + self._client = None + self._instance = None + self._database = None + _ = self.database + + def teardown_method(self): + MockServerTestBase.spanner_service.clear_requests() + MockServerTestBase.database_admin_service.clear_requests() + + def create_engine(self) -> Engine: + return create_engine( + "spanner:///projects/p/instances/i/databases/d", + connect_args={"client": self.client, "logger": MockServerTestBase.logger}, + ) + + @property + def client(self) -> Client: + if self._client is None: + self._client = Client( + project="p", + credentials=AnonymousCredentials(), + client_options=ClientOptions( + api_endpoint="localhost:" + str(MockServerTestBase.port), + ), + ) + return self._client + + @property + def instance(self) -> Instance: + if self._instance is None: + self._instance = self.client.instance("i") + return self._instance + + @property + def database(self) -> Database: + logger = logging.getLogger("level warning") + logger.setLevel(logging.WARN) + if self._database is None: + self._database = self.instance.database("d", logger=logger) + return self._database diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/mock_spanner.py b/packages/sqlalchemy-spanner/test/mockserver_tests/mock_spanner.py new file mode 100644 index 000000000000..932f6371e42b --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/mock_spanner.py @@ -0,0 +1,230 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.spanner_v1 import ( + TransactionOptions, + ResultSetMetadata, + ExecuteSqlRequest, +) +from google.protobuf import empty_pb2 +import test.mockserver_tests.spanner_pb2_grpc as spanner_grpc +import test.mockserver_tests.spanner_database_admin_pb2_grpc as database_admin_grpc +from test.mockserver_tests.mock_database_admin import DatabaseAdminServicer +import google.cloud.spanner_v1.types.result_set as result_set +import google.cloud.spanner_v1.types.transaction as transaction +import google.cloud.spanner_v1.types.commit_response as commit +import google.cloud.spanner_v1.types.spanner as spanner +from concurrent import futures +import grpc +import base64 + + +class MockSpanner: + def __init__(self): + self.results = {} + + def add_result(self, sql: str, result: result_set.ResultSet): + self.results[sql.lower().strip()] = result + + def get_result(self, sql: str) -> result_set.ResultSet: + result = self.results.get(sql.lower().strip()) + if result is None: + raise ValueError(f"No result found for {sql}") + return result + + def get_result_as_partial_result_sets( + self, sql: str, started_transaction: transaction.Transaction + ) -> [result_set.PartialResultSet]: + result: result_set.ResultSet = self.get_result(sql) + partials = [] + first = True + if len(result.rows) == 0: + partial = result_set.PartialResultSet() + partial.metadata = ResultSetMetadata(result.metadata) + partials.append(partial) + else: + for row in result.rows: + partial = result_set.PartialResultSet() + if first: + partial.metadata = ResultSetMetadata(result.metadata) + partial.values.extend(row) + partials.append(partial) + partials[len(partials) - 1].stats = result.stats + if started_transaction: + partials[0].metadata.transaction = started_transaction + return partials + + +# An in-memory mock Spanner server that can be used for testing. +class SpannerServicer(spanner_grpc.SpannerServicer): + def __init__(self): + self._requests = [] + self.session_counter = 0 + self.sessions = {} + self.transaction_counter = 0 + self.transactions = {} + self._mock_spanner = MockSpanner() + + @property + def mock_spanner(self): + return self._mock_spanner + + @property + def requests(self): + return self._requests + + def clear_requests(self): + self._requests = [] + + def CreateSession(self, request, context): + self._requests.append(request) + return self.__create_session(request.database, request.session) + + def BatchCreateSessions(self, request, context): + self._requests.append(request) + sessions = [] + for i in range(request.session_count): + sessions.append( + self.__create_session(request.database, request.session_template) + ) + return spanner.BatchCreateSessionsResponse(dict(session=sessions)) + + def __create_session(self, database: str, session_template: spanner.Session): + self.session_counter += 1 + session = spanner.Session() + session.name = database + "/sessions/" + str(self.session_counter) + session.multiplexed = session_template.multiplexed + session.labels.MergeFrom(session_template.labels) + session.creator_role = session_template.creator_role + self.sessions[session.name] = session + return session + + def GetSession(self, request, context): + self._requests.append(request) + return spanner.Session() + + def ListSessions(self, request, context): + self._requests.append(request) + return [spanner.Session()] + + def DeleteSession(self, request, context): + self._requests.append(request) + return empty_pb2.Empty() + + def ExecuteSql(self, request, context): + self._requests.append(request) + return result_set.ResultSet() + + def ExecuteStreamingSql(self, request: ExecuteSqlRequest, context): + self._requests.append(request) + started_transaction = None + if not request.transaction.begin == TransactionOptions(): + started_transaction = self.__create_transaction( + request.session, request.transaction.begin + ) + partials = self.mock_spanner.get_result_as_partial_result_sets( + request.sql, started_transaction + ) + for result in partials: + yield result + + def ExecuteBatchDml(self, request, context): + self._requests.append(request) + response = spanner.ExecuteBatchDmlResponse() + started_transaction = None + if not request.transaction.begin == TransactionOptions(): + started_transaction = self.__create_transaction( + request.session, request.transaction.begin + ) + first = True + for statement in request.statements: + result = self.mock_spanner.get_result(statement.sql) + if first and started_transaction is not None: + result = result_set.ResultSet( + self.mock_spanner.get_result(statement.sql) + ) + result.metadata = ResultSetMetadata(result.metadata) + result.metadata.transaction = started_transaction + response.result_sets.append(result) + return response + + def Read(self, request, context): + self._requests.append(request) + return result_set.ResultSet() + + def StreamingRead(self, request, context): + self._requests.append(request) + for result in [result_set.PartialResultSet(), result_set.PartialResultSet()]: + yield result + + def BeginTransaction(self, request, context): + self._requests.append(request) + return self.__create_transaction(request.session, request.options) + + def __create_transaction( + self, session: str, options: TransactionOptions + ) -> transaction.Transaction: + session = self.sessions[session] + if session is None: + raise ValueError(f"Session not found: {session}") + self.transaction_counter += 1 + id_bytes = bytes( + f"{session.name}/transactions/{self.transaction_counter}", "UTF-8" + ) + transaction_id = base64.urlsafe_b64encode(id_bytes) + self.transactions[transaction_id] = options + return transaction.Transaction(dict(id=transaction_id)) + + def Commit(self, request, context): + self._requests.append(request) + tx = self.transactions[request.transaction_id] + if tx is None: + raise ValueError(f"Transaction not found: {request.transaction_id}") + del self.transactions[request.transaction_id] + return commit.CommitResponse() + + def Rollback(self, request, context): + self._requests.append(request) + return empty_pb2.Empty() + + def PartitionQuery(self, request, context): + self._requests.append(request) + return spanner.PartitionResponse() + + def PartitionRead(self, request, context): + self._requests.append(request) + return spanner.PartitionResponse() + + def BatchWrite(self, request, context): + self._requests.append(request) + for result in [spanner.BatchWriteResponse(), spanner.BatchWriteResponse()]: + yield result + + +def start_mock_server() -> (grpc.Server, SpannerServicer, DatabaseAdminServicer, int): + # Create a gRPC server. + spanner_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + + # Add the Spanner services to the gRPC server. + spanner_servicer = SpannerServicer() + spanner_grpc.add_SpannerServicer_to_server(spanner_servicer, spanner_server) + database_admin_servicer = DatabaseAdminServicer() + database_admin_grpc.add_DatabaseAdminServicer_to_server( + database_admin_servicer, spanner_server + ) + + # Start the server on a random port. + port = spanner_server.add_insecure_port("[::]:0") + spanner_server.start() + return spanner_server, spanner_servicer, database_admin_servicer, port diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/not_enforced_fk_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/not_enforced_fk_model.py new file mode 100644 index 000000000000..36965f01dfd4 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/not_enforced_fk_model.py @@ -0,0 +1,37 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import ForeignKey +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column + + +class Base(DeclarativeBase): + pass + + +class Singer(Base): + __tablename__ = "singers" + id: Mapped[str] = mapped_column(primary_key=True) + name: Mapped[str] + + +class Album(Base): + __tablename__ = "albums" + id: Mapped[str] = mapped_column(primary_key=True) + name: Mapped[str] + singer_id: Mapped[str] = mapped_column( + ForeignKey("singers.id", spanner_not_enforced=True) + ) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/null_filtered_index.py b/packages/sqlalchemy-spanner/test/mockserver_tests/null_filtered_index.py new file mode 100644 index 000000000000..e4ca5d69d778 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/null_filtered_index.py @@ -0,0 +1,37 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import Index +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column + + +class Base(DeclarativeBase): + pass + + +class Singer(Base): + __tablename__ = "singers" + __table_args__ = ( + Index("idx_name", "name"), + Index("idx_uq_name", "name", unique=True), + Index("idx_null_filtered_name", "name", spanner_null_filtered=True), + Index( + "idx_uq_null_filtered_name", "name", unique=True, spanner_null_filtered=True + ), + ) + + id: Mapped[str] = mapped_column(primary_key=True) + name: Mapped[str] diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/pickle_type_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/pickle_type_model.py new file mode 100644 index 000000000000..b3bb47c4bf3c --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/pickle_type_model.py @@ -0,0 +1,31 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import Column, Integer, String, PickleType +from sqlalchemy.orm import DeclarativeBase + +from google.cloud.sqlalchemy_spanner.sqlalchemy_spanner import SpannerPickleType + + +class Base(DeclarativeBase): + pass + + +class UserPreferences(Base): + __tablename__ = "user_preferences" + + user_id = Column(Integer, primary_key=True) + username = Column(String(50), nullable=False) + preferences = Column(PickleType(impl=SpannerPickleType), nullable=True) + created_at = Column(String(30), nullable=False) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/quickstart_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/quickstart_model.py new file mode 100644 index 000000000000..ccfa22d7a44c --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/quickstart_model.py @@ -0,0 +1,50 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List +from typing import Optional +from sqlalchemy import ForeignKey +from sqlalchemy import String +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column +from sqlalchemy.orm import relationship + + +class Base(DeclarativeBase): + pass + + +class User(Base): + __tablename__ = "user_account" + id: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] = mapped_column(String(30)) + fullname: Mapped[Optional[str]] + addresses: Mapped[List["Address"]] = relationship( + back_populates="user", cascade="all, delete-orphan" + ) + + def __repr__(self) -> str: + return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + + +class Address(Base): + __tablename__ = "address" + id: Mapped[int] = mapped_column(primary_key=True) + email_address: Mapped[str] + user_id: Mapped[int] = mapped_column(ForeignKey("user_account.id")) + user: Mapped["User"] = relationship(back_populates="addresses") + + def __repr__(self) -> str: + return f"Address(id={self.id!r}, email_address={self.email_address!r})" diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/read_only_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/read_only_model.py new file mode 100644 index 000000000000..b76cdd3f955f --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/read_only_model.py @@ -0,0 +1,33 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import String, BigInteger, Sequence, TextClause +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column + + +class Base(DeclarativeBase): + pass + + +class Singer(Base): + __tablename__ = "singers" + id: Mapped[int] = mapped_column( + BigInteger, + Sequence("singer_id"), + server_default=TextClause("GET_NEXT_SEQUENCE_VALUE(SEQUENCE singer_id)"), + primary_key=True, + ) + name: Mapped[str] = mapped_column(String) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/spanner_database_admin_pb2_grpc.py b/packages/sqlalchemy-spanner/test/mockserver_tests/spanner_database_admin_pb2_grpc.py new file mode 100644 index 000000000000..6798f8d90ebb --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/spanner_database_admin_pb2_grpc.py @@ -0,0 +1,1269 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! + + +# Generated with the following commands: +# +# pip install grpcio-tools +# git clone git@github.com:googleapis/googleapis.git +# cd googleapis +# python -m grpc_tools.protoc \ +# -I . \ +# --python_out=. --pyi_out=. --grpc_python_out=. \ +# ./google/spanner/admin/database/v1/*.proto + +# flake8: noqa + +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 +from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 +from google.longrunning import ( + operations_pb2 as google_dot_longrunning_dot_operations__pb2, +) +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +from google.cloud.spanner_admin_database_v1.types import ( + backup as google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2, +) +from google.cloud.spanner_admin_database_v1.types import ( + backup_schedule as google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2, +) +from google.cloud.spanner_admin_database_v1.types import ( + spanner_database_admin as google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2, +) + +GRPC_GENERATED_VERSION = "1.67.0" +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + + _version_not_supported = first_version_is_lower( + GRPC_VERSION, GRPC_GENERATED_VERSION + ) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f"The grpc package installed is at version {GRPC_VERSION}," + + f" but the generated code in google/spanner/admin/database/v1/spanner_database_admin_pb2_grpc.py depends on" + + f" grpcio>={GRPC_GENERATED_VERSION}." + + f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}" + + f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}." + ) + + +class DatabaseAdminServicer(object): + """Cloud Spanner Database Admin API + + The Cloud Spanner Database Admin API can be used to: + * create, drop, and list databases + * update the schema of pre-existing databases + * create, delete, copy and list backups for a database + * restore a database from an existing backup + """ + + def ListDatabases(self, request, context): + """Lists Cloud Spanner databases.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CreateDatabase(self, request, context): + """Creates a new Cloud Spanner database and starts to prepare it for serving. + The returned [long-running operation][google.longrunning.Operation] will + have a name of the format `/operations/` and + can be used to track preparation of the database. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + The [response][google.longrunning.Operation.response] field type is + [Database][google.spanner.admin.database.v1.Database], if successful. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetDatabase(self, request, context): + """Gets the state of a Cloud Spanner database.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateDatabase(self, request, context): + """Updates a Cloud Spanner database. The returned + [long-running operation][google.longrunning.Operation] can be used to track + the progress of updating the database. If the named database does not + exist, returns `NOT_FOUND`. + + While the operation is pending: + + * The database's + [reconciling][google.spanner.admin.database.v1.Database.reconciling] + field is set to true. + * Cancelling the operation is best-effort. If the cancellation succeeds, + the operation metadata's + [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time] + is set, the updates are reverted, and the operation terminates with a + `CANCELLED` status. + * New UpdateDatabase requests will return a `FAILED_PRECONDITION` error + until the pending operation is done (returns successfully or with + error). + * Reading the database via the API continues to give the pre-request + values. + + Upon completion of the returned operation: + + * The new values are in effect and readable via the API. + * The database's + [reconciling][google.spanner.admin.database.v1.Database.reconciling] + field becomes false. + + The returned [long-running operation][google.longrunning.Operation] will + have a name of the format + `projects//instances//databases//operations/` + and can be used to track the database modification. The + [metadata][google.longrunning.Operation.metadata] field type is + [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata]. + The [response][google.longrunning.Operation.response] field type is + [Database][google.spanner.admin.database.v1.Database], if successful. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateDatabaseDdl(self, request, context): + """Updates the schema of a Cloud Spanner database by + creating/altering/dropping tables, columns, indexes, etc. The returned + [long-running operation][google.longrunning.Operation] will have a name of + the format `/operations/` and can be used to + track execution of the schema change(s). The + [metadata][google.longrunning.Operation.metadata] field type is + [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + The operation has no response. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DropDatabase(self, request, context): + """Drops (aka deletes) a Cloud Spanner database. + Completed backups for the database will be retained according to their + `expire_time`. + Note: Cloud Spanner might continue to accept requests for a few seconds + after the database has been deleted. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetDatabaseDdl(self, request, context): + """Returns the schema of a Cloud Spanner database as a list of formatted + DDL statements. This method does not show pending schema updates, those may + be queried using the [Operations][google.longrunning.Operations] API. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def SetIamPolicy(self, request, context): + """Sets the access control policy on a database or backup resource. + Replaces any existing policy. + + Authorization requires `spanner.databases.setIamPolicy` + permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. + For backups, authorization requires `spanner.backups.setIamPolicy` + permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetIamPolicy(self, request, context): + """Gets the access control policy for a database or backup resource. + Returns an empty policy if a database or backup exists but does not have a + policy set. + + Authorization requires `spanner.databases.getIamPolicy` permission on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. + For backups, authorization requires `spanner.backups.getIamPolicy` + permission on [resource][google.iam.v1.GetIamPolicyRequest.resource]. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def TestIamPermissions(self, request, context): + """Returns permissions that the caller has on the specified database or backup + resource. + + Attempting this RPC on a non-existent Cloud Spanner database will + result in a NOT_FOUND error if the user has + `spanner.databases.list` permission on the containing Cloud + Spanner instance. Otherwise returns an empty set of permissions. + Calling this method on a backup that does not exist will + result in a NOT_FOUND error if the user has + `spanner.backups.list` permission on the containing instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CreateBackup(self, request, context): + """Starts creating a new Cloud Spanner Backup. + The returned backup [long-running operation][google.longrunning.Operation] + will have a name of the format + `projects//instances//backups//operations/` + and can be used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type is + [Backup][google.spanner.admin.database.v1.Backup], if successful. + Cancelling the returned operation will stop the creation and delete the + backup. There can be only one pending backup creation per database. Backup + creation of different databases can run concurrently. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CopyBackup(self, request, context): + """Starts copying a Cloud Spanner Backup. + The returned backup [long-running operation][google.longrunning.Operation] + will have a name of the format + `projects//instances//backups//operations/` + and can be used to track copying of the backup. The operation is associated + with the destination backup. + The [metadata][google.longrunning.Operation.metadata] field type is + [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + The [response][google.longrunning.Operation.response] field type is + [Backup][google.spanner.admin.database.v1.Backup], if successful. + Cancelling the returned operation will stop the copying and delete the + destination backup. Concurrent CopyBackup requests can run on the same + source backup. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetBackup(self, request, context): + """Gets metadata on a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateBackup(self, request, context): + """Updates a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteBackup(self, request, context): + """Deletes a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListBackups(self, request, context): + """Lists completed and pending backups. + Backups returned are ordered by `create_time` in descending order, + starting from the most recent `create_time`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def RestoreDatabase(self, request, context): + """Create a new database by restoring from a completed backup. The new + database must be in the same project and in an instance with the same + instance configuration as the instance containing + the backup. The returned database [long-running + operation][google.longrunning.Operation] has a name of the format + `projects//instances//databases//operations/`, + and can be used to track the progress of the operation, and to cancel it. + The [metadata][google.longrunning.Operation.metadata] field type is + [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + The [response][google.longrunning.Operation.response] type + is [Database][google.spanner.admin.database.v1.Database], if + successful. Cancelling the returned operation will stop the restore and + delete the database. + There can be only one database being restored into an instance at a time. + Once the restore operation completes, a new restore operation can be + initiated, without waiting for the optimize operation associated with the + first restore to complete. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListDatabaseOperations(self, request, context): + """Lists database [longrunning-operations][google.longrunning.Operation]. + A database operation has a name of the form + `projects//instances//databases//operations/`. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + `metadata.type_url` describes the type of the metadata. Operations returned + include those that have completed/failed/canceled within the last 7 days, + and pending operations. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListBackupOperations(self, request, context): + """Lists the backup [long-running operations][google.longrunning.Operation] in + the given instance. A backup operation has a name of the form + `projects//instances//backups//operations/`. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + `metadata.type_url` describes the type of the metadata. Operations returned + include those that have completed/failed/canceled within the last 7 days, + and pending operations. Operations returned are ordered by + `operation.metadata.value.progress.start_time` in descending order starting + from the most recently started operation. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListDatabaseRoles(self, request, context): + """Lists Cloud Spanner database roles.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CreateBackupSchedule(self, request, context): + """Creates a new backup schedule.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetBackupSchedule(self, request, context): + """Gets backup schedule for the input schedule name.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateBackupSchedule(self, request, context): + """Updates a backup schedule.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteBackupSchedule(self, request, context): + """Deletes a backup schedule.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListBackupSchedules(self, request, context): + """Lists all the backup schedules for the database.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + +def add_DatabaseAdminServicer_to_server(servicer, server): + rpc_method_handlers = { + "ListDatabases": grpc.unary_unary_rpc_method_handler( + servicer.ListDatabases, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabasesRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabasesResponse.serialize, + ), + "CreateDatabase": grpc.unary_unary_rpc_method_handler( + servicer.CreateDatabase, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.CreateDatabaseRequest.deserialize, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetDatabase": grpc.unary_unary_rpc_method_handler( + servicer.GetDatabase, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.Database.serialize, + ), + "UpdateDatabase": grpc.unary_unary_rpc_method_handler( + servicer.UpdateDatabase, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.UpdateDatabaseRequest.deserialize, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "UpdateDatabaseDdl": grpc.unary_unary_rpc_method_handler( + servicer.UpdateDatabaseDdl, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.deserialize, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "DropDatabase": grpc.unary_unary_rpc_method_handler( + servicer.DropDatabase, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.DropDatabaseRequest.deserialize, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "GetDatabaseDdl": grpc.unary_unary_rpc_method_handler( + servicer.GetDatabaseDdl, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.serialize, + ), + "SetIamPolicy": grpc.unary_unary_rpc_method_handler( + servicer.SetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + "GetIamPolicy": grpc.unary_unary_rpc_method_handler( + servicer.GetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + "TestIamPermissions": grpc.unary_unary_rpc_method_handler( + servicer.TestIamPermissions, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, + ), + "CreateBackup": grpc.unary_unary_rpc_method_handler( + servicer.CreateBackup, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.CreateBackupRequest.deserialize, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "CopyBackup": grpc.unary_unary_rpc_method_handler( + servicer.CopyBackup, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.CopyBackupRequest.deserialize, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetBackup": grpc.unary_unary_rpc_method_handler( + servicer.GetBackup, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.GetBackupRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.Backup.serialize, + ), + "UpdateBackup": grpc.unary_unary_rpc_method_handler( + servicer.UpdateBackup, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.UpdateBackupRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.Backup.serialize, + ), + "DeleteBackup": grpc.unary_unary_rpc_method_handler( + servicer.DeleteBackup, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.DeleteBackupRequest.deserialize, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "ListBackups": grpc.unary_unary_rpc_method_handler( + servicer.ListBackups, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupsRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupsResponse.serialize, + ), + "RestoreDatabase": grpc.unary_unary_rpc_method_handler( + servicer.RestoreDatabase, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.RestoreDatabaseRequest.deserialize, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "ListDatabaseOperations": grpc.unary_unary_rpc_method_handler( + servicer.ListDatabaseOperations, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseOperationsRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseOperationsResponse.serialize, + ), + "ListBackupOperations": grpc.unary_unary_rpc_method_handler( + servicer.ListBackupOperations, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupOperationsRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupOperationsResponse.serialize, + ), + "ListDatabaseRoles": grpc.unary_unary_rpc_method_handler( + servicer.ListDatabaseRoles, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseRolesRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseRolesResponse.serialize, + ), + "CreateBackupSchedule": grpc.unary_unary_rpc_method_handler( + servicer.CreateBackupSchedule, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.CreateBackupScheduleRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.serialize, + ), + "GetBackupSchedule": grpc.unary_unary_rpc_method_handler( + servicer.GetBackupSchedule, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.GetBackupScheduleRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.serialize, + ), + "UpdateBackupSchedule": grpc.unary_unary_rpc_method_handler( + servicer.UpdateBackupSchedule, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.UpdateBackupScheduleRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.serialize, + ), + "DeleteBackupSchedule": grpc.unary_unary_rpc_method_handler( + servicer.DeleteBackupSchedule, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.DeleteBackupScheduleRequest.deserialize, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "ListBackupSchedules": grpc.unary_unary_rpc_method_handler( + servicer.ListBackupSchedules, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.ListBackupSchedulesRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.ListBackupSchedulesResponse.serialize, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + "google.spanner.admin.database.v1.DatabaseAdmin", rpc_method_handlers + ) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers( + "google.spanner.admin.database.v1.DatabaseAdmin", rpc_method_handlers + ) + + +# This class is part of an EXPERIMENTAL API. +class DatabaseAdmin(object): + """Cloud Spanner Database Admin API + + The Cloud Spanner Database Admin API can be used to: + * create, drop, and list databases + * update the schema of pre-existing databases + * create, delete, copy and list backups for a database + * restore a database from an existing backup + """ + + @staticmethod + def ListDatabases( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabasesRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabasesResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def CreateDatabase( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.CreateDatabaseRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def GetDatabase( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.Database.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def UpdateDatabase( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.UpdateDatabaseRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def UpdateDatabaseDdl( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def DropDatabase( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.DropDatabaseRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def GetDatabaseDdl( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def SetIamPolicy( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy", + google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, + google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def GetIamPolicy( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy", + google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, + google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def TestIamPermissions( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions", + google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, + google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def CreateBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.CreateBackupRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def CopyBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.CopyBackupRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def GetBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.GetBackupRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.Backup.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def UpdateBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.UpdateBackupRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.Backup.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def DeleteBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.DeleteBackupRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ListBackups( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupsRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def RestoreDatabase( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.RestoreDatabaseRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ListDatabaseOperations( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseOperationsRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseOperationsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ListBackupOperations( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupOperationsRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupOperationsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ListDatabaseRoles( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseRolesRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseRolesResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def CreateBackupSchedule( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.CreateBackupScheduleRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def GetBackupSchedule( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.GetBackupScheduleRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def UpdateBackupSchedule( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.UpdateBackupScheduleRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def DeleteBackupSchedule( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.DeleteBackupScheduleRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ListBackupSchedules( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.ListBackupSchedulesRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.ListBackupSchedulesResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/spanner_pb2_grpc.py b/packages/sqlalchemy-spanner/test/mockserver_tests/spanner_pb2_grpc.py new file mode 100644 index 000000000000..da86ba18a3cc --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/spanner_pb2_grpc.py @@ -0,0 +1,884 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! + +# Generated with the following commands: +# +# pip install grpcio-tools +# git clone git@github.com:googleapis/googleapis.git +# cd googleapis +# python -m grpc_tools.protoc \ +# -I . \ +# --python_out=. --pyi_out=. --grpc_python_out=. \ +# ./google/spanner/v1/*.proto + +# flake8: noqa + +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +from google.cloud.spanner_v1.types import ( + commit_response as google_dot_spanner_dot_v1_dot_commit__response__pb2, +) +from google.cloud.spanner_v1.types import ( + result_set as google_dot_spanner_dot_v1_dot_result__set__pb2, +) +from google.cloud.spanner_v1.types import ( + spanner as google_dot_spanner_dot_v1_dot_spanner__pb2, +) +from google.cloud.spanner_v1.types import ( + transaction as google_dot_spanner_dot_v1_dot_transaction__pb2, +) + +GRPC_GENERATED_VERSION = "1.67.0" +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + + _version_not_supported = first_version_is_lower( + GRPC_VERSION, GRPC_GENERATED_VERSION + ) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f"The grpc package installed is at version {GRPC_VERSION}," + + f" but the generated code in google/spanner/v1/spanner_pb2_grpc.py depends on" + + f" grpcio>={GRPC_GENERATED_VERSION}." + + f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}" + + f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}." + ) + + +class SpannerServicer(object): + """Cloud Spanner API + + The Cloud Spanner API can be used to manage sessions and execute + transactions on data stored in Cloud Spanner databases. + """ + + def CreateSession(self, request, context): + """Creates a new session. A session can be used to perform + transactions that read and/or modify data in a Cloud Spanner database. + Sessions are meant to be reused for many consecutive + transactions. + + Sessions can only execute one transaction at a time. To execute + multiple concurrent read-write/write-only transactions, create + multiple sessions. Note that standalone reads and queries use a + transaction internally, and count toward the one transaction + limit. + + Active sessions use additional server resources, so it is a good idea to + delete idle and unneeded sessions. + Aside from explicit deletes, Cloud Spanner may delete sessions for which no + operations are sent for more than an hour. If a session is deleted, + requests to it return `NOT_FOUND`. + + Idle sessions can be kept alive by sending a trivial SQL query + periodically, e.g., `"SELECT 1"`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def BatchCreateSessions(self, request, context): + """Creates multiple new sessions. + + This API can be used to initialize a session cache on the clients. + See https://goo.gl/TgSFN2 for best practices on session cache management. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetSession(self, request, context): + """Gets a session. Returns `NOT_FOUND` if the session does not exist. + This is mainly useful for determining whether a session is still + alive. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListSessions(self, request, context): + """Lists all sessions in a given database.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteSession(self, request, context): + """Ends a session, releasing server resources associated with it. This will + asynchronously trigger cancellation of any operations that are running with + this session. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ExecuteSql(self, request, context): + """Executes an SQL statement, returning all results in a single reply. This + method cannot be used to return a result set larger than 10 MiB; + if the query yields more data than that, the query fails with + a `FAILED_PRECONDITION` error. + + Operations inside read-write transactions might return `ABORTED`. If + this occurs, the application should restart the transaction from + the beginning. See [Transaction][google.spanner.v1.Transaction] for more + details. + + Larger result sets can be fetched in streaming fashion by calling + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + instead. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ExecuteStreamingSql(self, request, context): + """Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the + result set as a stream. Unlike + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on + the size of the returned result set. However, no individual row in the + result set can exceed 100 MiB, and no column value can exceed 10 MiB. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ExecuteBatchDml(self, request, context): + """Executes a batch of SQL DML statements. This method allows many statements + to be run with lower latency than submitting them sequentially with + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + + Statements are executed in sequential order. A request can succeed even if + a statement fails. The + [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] + field in the response provides information about the statement that failed. + Clients must inspect this field to determine whether an error occurred. + + Execution stops after the first failed statement; the remaining statements + are not executed. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def Read(self, request, context): + """Reads rows from the database using key lookups and scans, as a + simple key/value style alternative to + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be + used to return a result set larger than 10 MiB; if the read matches more + data than that, the read fails with a `FAILED_PRECONDITION` + error. + + Reads inside read-write transactions might return `ABORTED`. If + this occurs, the application should restart the transaction from + the beginning. See [Transaction][google.spanner.v1.Transaction] for more + details. + + Larger result sets can be yielded in streaming fashion by calling + [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def StreamingRead(self, request, context): + """Like [Read][google.spanner.v1.Spanner.Read], except returns the result set + as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no + limit on the size of the returned result set. However, no individual row in + the result set can exceed 100 MiB, and no column value can exceed + 10 MiB. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def BeginTransaction(self, request, context): + """Begins a new transaction. This step can often be skipped: + [Read][google.spanner.v1.Spanner.Read], + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a + side-effect. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def Commit(self, request, context): + """Commits a transaction. The request includes the mutations to be + applied to rows in the database. + + `Commit` might return an `ABORTED` error. This can occur at any time; + commonly, the cause is conflicts with concurrent + transactions. However, it can also happen for a variety of other + reasons. If `Commit` returns `ABORTED`, the caller should re-attempt + the transaction from the beginning, re-using the same session. + + On very rare occasions, `Commit` might return `UNKNOWN`. This can happen, + for example, if the client job experiences a 1+ hour networking failure. + At that point, Cloud Spanner has lost track of the transaction outcome and + we recommend that you perform another read from the database to see the + state of things as they are now. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def Rollback(self, request, context): + """Rolls back a transaction, releasing any locks it holds. It is a good + idea to call this for any transaction that includes one or more + [Read][google.spanner.v1.Spanner.Read] or + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately + decides not to commit. + + `Rollback` returns `OK` if it successfully aborts the transaction, the + transaction was already aborted, or the transaction is not + found. `Rollback` never returns `ABORTED`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def PartitionQuery(self, request, context): + """Creates a set of partition tokens that can be used to execute a query + operation in parallel. Each of the returned partition tokens can be used + by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to + specify a subset of the query result to read. The same session and + read-only transaction must be used by the PartitionQueryRequest used to + create the partition tokens and the ExecuteSqlRequests that use the + partition tokens. + + Partition tokens become invalid when the session used to create them + is deleted, is idle for too long, begins a new transaction, or becomes too + old. When any of these happen, it is not possible to resume the query, and + the whole operation must be restarted from the beginning. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def PartitionRead(self, request, context): + """Creates a set of partition tokens that can be used to execute a read + operation in parallel. Each of the returned partition tokens can be used + by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a + subset of the read result to read. The same session and read-only + transaction must be used by the PartitionReadRequest used to create the + partition tokens and the ReadRequests that use the partition tokens. There + are no ordering guarantees on rows returned among the returned partition + tokens, or even within each individual StreamingRead call issued with a + partition_token. + + Partition tokens become invalid when the session used to create them + is deleted, is idle for too long, begins a new transaction, or becomes too + old. When any of these happen, it is not possible to resume the read, and + the whole operation must be restarted from the beginning. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def BatchWrite(self, request, context): + """Batches the supplied mutation groups in a collection of efficient + transactions. All mutations in a group are committed atomically. However, + mutations across groups can be committed non-atomically in an unspecified + order and thus, they must be independent of each other. Partial failure is + possible, i.e., some groups may have been committed successfully, while + some may have failed. The results of individual batches are streamed into + the response as the batches are applied. + + BatchWrite requests are not replay protected, meaning that each mutation + group may be applied more than once. Replays of non-idempotent mutations + may have undesirable effects. For example, replays of an insert mutation + may produce an already exists error or if you use generated or commit + timestamp-based keys, it may result in additional rows being added to the + mutation's table. We recommend structuring your mutation groups to be + idempotent to avoid this issue. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + +def add_SpannerServicer_to_server(servicer, server): + rpc_method_handlers = { + "CreateSession": grpc.unary_unary_rpc_method_handler( + servicer.CreateSession, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.CreateSessionRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.Session.serialize, + ), + "BatchCreateSessions": grpc.unary_unary_rpc_method_handler( + servicer.BatchCreateSessions, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BatchCreateSessionsRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BatchCreateSessionsResponse.serialize, + ), + "GetSession": grpc.unary_unary_rpc_method_handler( + servicer.GetSession, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.GetSessionRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.Session.serialize, + ), + "ListSessions": grpc.unary_unary_rpc_method_handler( + servicer.ListSessions, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ListSessionsRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ListSessionsResponse.serialize, + ), + "DeleteSession": grpc.unary_unary_rpc_method_handler( + servicer.DeleteSession, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.DeleteSessionRequest.deserialize, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "ExecuteSql": grpc.unary_unary_rpc_method_handler( + servicer.ExecuteSql, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteSqlRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_result__set__pb2.ResultSet.serialize, + ), + "ExecuteStreamingSql": grpc.unary_stream_rpc_method_handler( + servicer.ExecuteStreamingSql, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteSqlRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_result__set__pb2.PartialResultSet.serialize, + ), + "ExecuteBatchDml": grpc.unary_unary_rpc_method_handler( + servicer.ExecuteBatchDml, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteBatchDmlRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteBatchDmlResponse.serialize, + ), + "Read": grpc.unary_unary_rpc_method_handler( + servicer.Read, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ReadRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_result__set__pb2.ResultSet.serialize, + ), + "StreamingRead": grpc.unary_stream_rpc_method_handler( + servicer.StreamingRead, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ReadRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_result__set__pb2.PartialResultSet.serialize, + ), + "BeginTransaction": grpc.unary_unary_rpc_method_handler( + servicer.BeginTransaction, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BeginTransactionRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_transaction__pb2.Transaction.serialize, + ), + "Commit": grpc.unary_unary_rpc_method_handler( + servicer.Commit, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.CommitRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_commit__response__pb2.CommitResponse.serialize, + ), + "Rollback": grpc.unary_unary_rpc_method_handler( + servicer.Rollback, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.RollbackRequest.deserialize, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "PartitionQuery": grpc.unary_unary_rpc_method_handler( + servicer.PartitionQuery, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionQueryRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionResponse.serialize, + ), + "PartitionRead": grpc.unary_unary_rpc_method_handler( + servicer.PartitionRead, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionReadRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionResponse.serialize, + ), + "BatchWrite": grpc.unary_stream_rpc_method_handler( + servicer.BatchWrite, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BatchWriteRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BatchWriteResponse.serialize, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + "google.spanner.v1.Spanner", rpc_method_handlers + ) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers( + "google.spanner.v1.Spanner", rpc_method_handlers + ) + + +# This class is part of an EXPERIMENTAL API. +class Spanner(object): + """Cloud Spanner API + + The Cloud Spanner API can be used to manage sessions and execute + transactions on data stored in Cloud Spanner databases. + """ + + @staticmethod + def CreateSession( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/CreateSession", + google_dot_spanner_dot_v1_dot_spanner__pb2.CreateSessionRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.Session.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def BatchCreateSessions( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/BatchCreateSessions", + google_dot_spanner_dot_v1_dot_spanner__pb2.BatchCreateSessionsRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.BatchCreateSessionsResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def GetSession( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/GetSession", + google_dot_spanner_dot_v1_dot_spanner__pb2.GetSessionRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.Session.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ListSessions( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/ListSessions", + google_dot_spanner_dot_v1_dot_spanner__pb2.ListSessionsRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.ListSessionsResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def DeleteSession( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/DeleteSession", + google_dot_spanner_dot_v1_dot_spanner__pb2.DeleteSessionRequest.to_json, + google_dot_protobuf_dot_empty__pb2.Empty.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ExecuteSql( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/ExecuteSql", + google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteSqlRequest.to_json, + google_dot_spanner_dot_v1_dot_result__set__pb2.ResultSet.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ExecuteStreamingSql( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, + target, + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteSqlRequest.to_json, + google_dot_spanner_dot_v1_dot_result__set__pb2.PartialResultSet.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ExecuteBatchDml( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/ExecuteBatchDml", + google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteBatchDmlRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteBatchDmlResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def Read( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/Read", + google_dot_spanner_dot_v1_dot_spanner__pb2.ReadRequest.to_json, + google_dot_spanner_dot_v1_dot_result__set__pb2.ResultSet.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def StreamingRead( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, + target, + "/google.spanner.v1.Spanner/StreamingRead", + google_dot_spanner_dot_v1_dot_spanner__pb2.ReadRequest.to_json, + google_dot_spanner_dot_v1_dot_result__set__pb2.PartialResultSet.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def BeginTransaction( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/BeginTransaction", + google_dot_spanner_dot_v1_dot_spanner__pb2.BeginTransactionRequest.to_json, + google_dot_spanner_dot_v1_dot_transaction__pb2.Transaction.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def Commit( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/Commit", + google_dot_spanner_dot_v1_dot_spanner__pb2.CommitRequest.to_json, + google_dot_spanner_dot_v1_dot_commit__response__pb2.CommitResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def Rollback( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/Rollback", + google_dot_spanner_dot_v1_dot_spanner__pb2.RollbackRequest.to_json, + google_dot_protobuf_dot_empty__pb2.Empty.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def PartitionQuery( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/PartitionQuery", + google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionQueryRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def PartitionRead( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/PartitionRead", + google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionReadRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def BatchWrite( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, + target, + "/google.spanner.v1.Spanner/BatchWrite", + google_dot_spanner_dot_v1_dot_spanner__pb2.BatchWriteRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.BatchWriteResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/stale_read_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/stale_read_model.py new file mode 100644 index 000000000000..025a56d21997 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/stale_read_model.py @@ -0,0 +1,28 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import String, BigInteger +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column + + +class Base(DeclarativeBase): + pass + + +class Singer(Base): + __tablename__ = "singers" + id: Mapped[int] = mapped_column(BigInteger, primary_key=True) + name: Mapped[str] = mapped_column(String) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/tags_model.py b/packages/sqlalchemy-spanner/test/mockserver_tests/tags_model.py new file mode 100644 index 000000000000..9965dbf03dfa --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/tags_model.py @@ -0,0 +1,28 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import String, BigInteger +from sqlalchemy.orm import DeclarativeBase +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import mapped_column + + +class Base(DeclarativeBase): + pass + + +class Singer(Base): + __tablename__ = "singers" + id: Mapped[int] = mapped_column(BigInteger, primary_key=True) + name: Mapped[str] = mapped_column(String) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_auto_increment.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_auto_increment.py new file mode 100644 index 000000000000..9e8051dbde7c --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_auto_increment.py @@ -0,0 +1,181 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy.orm import Session +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ( + ResultSet, + CreateSessionRequest, + ExecuteSqlRequest, + CommitRequest, + BeginTransactionRequest, +) +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_result, + add_update_count, +) +from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest +import google.cloud.spanner_v1.types.type as spanner_type +import google.cloud.spanner_v1.types.result_set as result_set + + +class TestAutoIncrement(MockServerTestBase): + def test_create_table(self): + from test.mockserver_tests.auto_increment_model import Base + + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="singers" +LIMIT 1 +""", + ResultSet(), + ) + engine = self.create_engine() + Base.metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(1, len(requests[0].statements)) + eq_( + "CREATE TABLE singers (\n" + "\tid INT64 NOT NULL " + "GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), \n" + "\tname STRING(MAX) NOT NULL\n" + ") PRIMARY KEY (id)", + requests[0].statements[0], + ) + + def test_create_auto_increment_table(self): + from test.mockserver_tests.auto_increment_model import Base + + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="singers" +LIMIT 1 +""", + ResultSet(), + ) + engine = self.create_engine() + engine.dialect.use_auto_increment = True + Base.metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(1, len(requests[0].statements)) + eq_( + "CREATE TABLE singers (\n" + "\tid INT64 NOT NULL AUTO_INCREMENT, \n" + "\tname STRING(MAX) NOT NULL\n" + ") PRIMARY KEY (id)", + requests[0].statements[0], + ) + + def test_create_table_with_specific_sequence_kind(self): + from test.mockserver_tests.auto_increment_model import Base + + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="singers" +LIMIT 1 +""", + ResultSet(), + ) + engine = self.create_engine() + engine.dialect.default_sequence_kind = "non_existing_kind" + Base.metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(1, len(requests[0].statements)) + eq_( + "CREATE TABLE singers (\n" + "\tid INT64 NOT NULL " + "GENERATED BY DEFAULT AS IDENTITY (non_existing_kind), \n" + "\tname STRING(MAX) NOT NULL\n" + ") PRIMARY KEY (id)", + requests[0].statements[0], + ) + + def test_insert_row(self): + from test.mockserver_tests.auto_increment_model import Singer + + self.add_insert_result("INSERT INTO singers (name) VALUES (@a0) THEN RETURN id") + engine = self.create_engine() + + with Session(engine) as session: + singer = Singer(name="Test") + session.add(singer) + # Flush the session to send the insert statement to the database. + session.flush() + eq_(987654321, singer.id) + session.commit() + # Verify the requests that we got. + requests = self.spanner_service.requests + eq_(4, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], BeginTransactionRequest) + is_instance_of(requests[2], ExecuteSqlRequest) + is_instance_of(requests[3], CommitRequest) + + def test_insert_row_with_pk_value(self): + from test.mockserver_tests.auto_increment_model import Singer + + # SQLAlchemy should not use a THEN RETURN clause when a value for the + # primary key has been set on the model. + add_update_count("INSERT INTO singers (id, name) VALUES (@a0, @a1)", 1) + engine = self.create_engine() + + with Session(engine) as session: + # Manually specify a value for the primary key. + singer = Singer(id=1, name="Test") + session.add(singer) + # Flush the session to send the insert statement to the database. + session.flush() + eq_(1, singer.id) + session.commit() + + def add_insert_result(self, sql): + result = result_set.ResultSet( + dict( + metadata=result_set.ResultSetMetadata( + dict( + row_type=spanner_type.StructType( + dict( + fields=[ + spanner_type.StructType.Field( + dict( + name="id", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.INT64) + ), + ) + ) + ] + ) + ) + ) + ), + stats=result_set.ResultSetStats( + dict( + row_count_exact=1, + ) + ), + ) + ) + result.rows.extend([("987654321",)]) + add_result(sql, result) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_basics.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_basics.py new file mode 100644 index 000000000000..3e6c2fe02737 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_basics.py @@ -0,0 +1,319 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest +from google.cloud.spanner_dbapi.parsed_statement import AutocommitDmlMode +from sqlalchemy import ( + create_engine, + select, + MetaData, + Table, + Column, + Index, + Integer, + String, + func, + text, + BigInteger, + Enum, +) +from sqlalchemy.orm import Session, DeclarativeBase, Mapped, mapped_column +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ( + CreateSessionRequest, + ExecuteSqlRequest, + ResultSet, + TypeCode, +) +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_select1_result, + add_result, + add_single_result, + add_update_count, + add_singer_query_result, +) + + +class TestBasics(MockServerTestBase): + def verify_select1(self, results): + result_list = [] + for row in results: + result_list.append(row) + eq_(1, row[0]) + eq_(1, len(result_list)) + requests = self.spanner_service.requests + eq_(2, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], ExecuteSqlRequest) + + def test_select1(self): + add_select1_result() + with self.database.snapshot() as snapshot: + results = snapshot.execute_sql("select 1") + self.verify_select1(results) + + def test_sqlalchemy_select1(self): + add_select1_result() + engine = self.create_engine() + with engine.connect().execution_options( + isolation_level="AUTOCOMMIT" + ) as connection: + results = connection.execute( + select(1).execution_options(request_tag="my-tag") + ).fetchall() + self.verify_select1(results) + request: ExecuteSqlRequest = self.spanner_service.requests[1] + eq_("my-tag", request.request_options.request_tag) + + def test_sqlalchemy_select_now(self): + now = datetime.datetime.now(datetime.UTC) + iso_now = now.isoformat().replace("+00:00", "Z") + add_single_result( + "SELECT current_timestamp AS now_1", + "now_1", + TypeCode.TIMESTAMP, + [(iso_now,)], + ) + engine = self.create_engine() + with engine.connect().execution_options( + isolation_level="AUTOCOMMIT" + ) as connection: + spanner_now = connection.execute(select(func.now())).fetchone()[0] + eq_(spanner_now.timestamp(), now.timestamp()) + + def test_create_table(self): + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="users" +LIMIT 1 +""", + ResultSet(), + ) + engine = self.create_engine() + metadata = MetaData() + Table( + "users", + metadata, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + Column("status", Enum("a", "b", "cee", create_constraint=True)), + ) + metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(1, len(requests[0].statements)) + eq_( + "CREATE TABLE users (\n" + "\tuser_id INT64 NOT NULL " + "GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), \n" + "\tuser_name STRING(16) NOT NULL, \n" + "\tstatus STRING(3), \n" + "\tCHECK (status IN ('a', 'b', 'cee'))\n" + ") PRIMARY KEY (user_id)", + requests[0].statements[0], + ) + + def test_create_table_in_schema(self): + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="schema" AND TABLE_NAME="users" +LIMIT 1 +""", + ResultSet(), + ) + engine = self.create_engine() + metadata = MetaData() + Table( + "users", + metadata, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + Index("ix_users_user_id", "user_id"), + schema="schema", + ) + metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(2, len(requests[0].statements)) + + eq_( + "CREATE TABLE schema.users (\n" + "\tuser_id INT64 NOT NULL " + "GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), \n" + "\tuser_name STRING(16) NOT NULL\n" + ") PRIMARY KEY (user_id)", + requests[0].statements[0], + ) + eq_( + "CREATE INDEX schema.ix_users_user_id ON schema.users (user_id)", + requests[0].statements[1], + ) + + def test_create_multiple_tables(self): + for i in range(2): + add_result( + f"""SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="table{i}" +LIMIT 1 +""", + ResultSet(), + ) + engine = self.create_engine() + metadata = MetaData() + for i in range(2): + Table( + "table" + str(i), + metadata, + Column("id", Integer, primary_key=True), + Column("value", String(16), nullable=False), + ) + metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(2, len(requests[0].statements)) + for i in range(2): + eq_( + f"CREATE TABLE table{i} (\n" + "\tid INT64 NOT NULL " + "GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), \n" + "\tvalue STRING(16) NOT NULL" + "\n) PRIMARY KEY (id)", + requests[0].statements[i], + ) + + def test_partitioned_dml(self): + sql = "UPDATE singers SET checked=true WHERE active = true" + add_update_count(sql, 100, AutocommitDmlMode.PARTITIONED_NON_ATOMIC) + engine = create_engine( + "spanner:///projects/p/instances/i/databases/d", + connect_args={ + "client": self.client, + "logger": MockServerTestBase.logger, + "ignore_transaction_warnings": True, + }, + ) + # TODO: Support autocommit_dml_mode as a connection variable in execution + # options. + with engine.connect().execution_options( + isolation_level="AUTOCOMMIT" + ) as connection: + connection.connection.set_autocommit_dml_mode( + AutocommitDmlMode.PARTITIONED_NON_ATOMIC + ) + results = connection.execute(text(sql)).rowcount + eq_(100, results) + + def test_select_for_update(self): + class Base(DeclarativeBase): + pass + + class Singer(Base): + __tablename__ = "singers" + id: Mapped[int] = mapped_column(BigInteger, primary_key=True) + name: Mapped[str] = mapped_column(String) + + query = ( + "SELECT singers.id AS singers_id, singers.name AS singers_name\n" + "FROM singers\n" + "WHERE singers.id = @a0\n" + " LIMIT @a1 FOR UPDATE" + ) + add_singer_query_result(query) + update = "UPDATE singers SET name=@a0 WHERE singers.id = @a1" + add_update_count(update, 1) + + engine = self.create_engine() + + with Session(engine) as session: + singer = ( + session.query(Singer).filter(Singer.id == 1).with_for_update().first() + ) + singer.name = "New Name" + session.add(singer) + session.commit() + + def test_database_role(self): + add_select1_result() + engine = create_engine( + "spanner:///projects/p/instances/i/databases/d", + connect_args={ + "client": self.client, + "logger": MockServerTestBase.logger, + "database_role": "my_role", + }, + ) + with Session(engine.execution_options(isolation_level="autocommit")) as session: + session.execute(select(1)) + requests = self.spanner_service.requests + eq_(2, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], ExecuteSqlRequest) + request: CreateSessionRequest = requests[0] + eq_("my_role", request.session.creator_role) + + def test_select_table_in_named_schema(self): + class Base(DeclarativeBase): + pass + + class Singer(Base): + __tablename__ = "singers" + __table_args__ = {"schema": "my_schema"} + id: Mapped[int] = mapped_column(BigInteger, primary_key=True) + name: Mapped[str] = mapped_column(String) + + query = ( + "SELECT" + " singers_1.id AS my_schema_singers_id," + " singers_1.name AS my_schema_singers_name\n" + "FROM my_schema.singers AS singers_1\n" + "WHERE singers_1.id = @a0\n" + " LIMIT @a1" + ) + add_singer_query_result(query) + engine = self.create_engine() + + insert = "INSERT INTO my_schema.singers (name) VALUES (@a0) THEN RETURN id" + add_single_result(insert, "id", TypeCode.INT64, [("1",)]) + with Session(engine) as session: + singer = Singer(name="New Name") + session.add(singer) + session.commit() + + update = ( + "UPDATE my_schema.singers AS singers_1 " + "SET name=@a0 " + "WHERE singers_1.id = @a1" + ) + add_update_count(update, 1) + with Session(engine) as session: + singer = session.query(Singer).filter(Singer.id == 1).first() + singer.name = "New Name" + session.add(singer) + session.commit() + + delete = "DELETE FROM my_schema.singers AS singers_1 WHERE singers_1.id = @a0" + add_update_count(delete, 1) + with Session(engine) as session: + singer = session.query(Singer).filter(Singer.id == 1).first() + session.delete(singer) + session.commit() diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_bit_reversed_sequence.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_bit_reversed_sequence.py new file mode 100644 index 000000000000..b54bb367bda7 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_bit_reversed_sequence.py @@ -0,0 +1,126 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy.orm import Session +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ( + ResultSet, + CreateSessionRequest, + ExecuteSqlRequest, + CommitRequest, + BeginTransactionRequest, +) +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_result, +) +from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest +import google.cloud.spanner_v1.types.type as spanner_type +import google.cloud.spanner_v1.types.result_set as result_set + + +class TestBitReversedSequence(MockServerTestBase): + def test_create_table(self): + from test.mockserver_tests.bit_reversed_sequence_model import Base + + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="singers" +LIMIT 1 +""", + ResultSet(), + ) + add_result( + """SELECT true + FROM INFORMATION_SCHEMA.SEQUENCES + WHERE NAME="singer_id" + AND SCHEMA="" + LIMIT 1""", + ResultSet(), + ) + engine = self.create_engine() + Base.metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(2, len(requests[0].statements)) + eq_( + "CREATE SEQUENCE singer_id OPTIONS " + "(sequence_kind = 'bit_reversed_positive')", + requests[0].statements[0], + ) + eq_( + "CREATE TABLE singers (\n" + "\tid INT64 NOT NULL DEFAULT " + "(GET_NEXT_SEQUENCE_VALUE(SEQUENCE singer_id)), \n" + "\tname STRING(MAX) NOT NULL\n" + ") PRIMARY KEY (id)", + requests[0].statements[1], + ) + + def test_insert_row(self): + from test.mockserver_tests.bit_reversed_sequence_model import Singer + + result = result_set.ResultSet( + dict( + metadata=result_set.ResultSetMetadata( + dict( + row_type=spanner_type.StructType( + dict( + fields=[ + spanner_type.StructType.Field( + dict( + name="id", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.INT64) + ), + ) + ) + ] + ) + ) + ) + ), + stats=result_set.ResultSetStats( + dict( + row_count_exact=1, + ) + ), + ) + ) + result.rows.extend(["1"]) + + add_result( + "INSERT INTO singers (id, name) " + "VALUES ( GET_NEXT_SEQUENCE_VALUE(SEQUENCE singer_id), @a0) " + "THEN RETURN id", + result, + ) + engine = self.create_engine() + + with Session(engine) as session: + singer = Singer(name="Test") + session.add(singer) + # Flush the session to send the insert statement to the database. + session.flush() + eq_(1, singer.id) + session.commit() + # Verify the requests that we got. + requests = self.spanner_service.requests + eq_(4, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], BeginTransactionRequest) + is_instance_of(requests[2], ExecuteSqlRequest) + is_instance_of(requests[3], CommitRequest) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_commit_timestamp.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_commit_timestamp.py new file mode 100644 index 000000000000..f70fcec66b35 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_commit_timestamp.py @@ -0,0 +1,57 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ResultSet +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_result, +) +from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest + + +class TestCommitTimestamp(MockServerTestBase): + def test_create_table(self): + from test.mockserver_tests.commit_timestamp_model import Base + + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="singers" +LIMIT 1 +""", + ResultSet(), + ) + add_result( + """SELECT true + FROM INFORMATION_SCHEMA.SEQUENCES + WHERE NAME="singer_id" + AND SCHEMA="" + LIMIT 1""", + ResultSet(), + ) + engine = self.create_engine() + Base.metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(1, len(requests[0].statements)) + eq_( + "CREATE TABLE singers (\n" + "\tid STRING(MAX) NOT NULL, \n" + "\tname STRING(MAX) NOT NULL, \n" + "\tupdated_at TIMESTAMP NOT NULL OPTIONS (allow_commit_timestamp=true)\n" + ") PRIMARY KEY (id)", + requests[0].statements[0], + ) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_default.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_default.py new file mode 100644 index 000000000000..9b46ede00d5d --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_default.py @@ -0,0 +1,49 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import create_engine +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import FixedSizePool, ResultSet +from test.mockserver_tests.mock_server_test_base import MockServerTestBase, add_result +from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest + + +class TestCreateTableDefault(MockServerTestBase): + def test_create_table_with_default(self): + from test.mockserver_tests.default_model import Base + + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="singers" +LIMIT 1 +""", + ResultSet(), + ) + engine = create_engine( + "spanner:///projects/p/instances/i/databases/d", + connect_args={"client": self.client, "pool": FixedSizePool(size=10)}, + ) + Base.metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(1, len(requests[0].statements)) + eq_( + "CREATE TABLE singers (\n" + "\tid STRING(MAX) NOT NULL DEFAULT (GENERATE_UUID()), \n" + "\tname STRING(MAX) NOT NULL\n" + ") PRIMARY KEY (id)", + requests[0].statements[0], + ) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_float32.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_float32.py new file mode 100644 index 000000000000..50cd6a861701 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_float32.py @@ -0,0 +1,73 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy.orm import Session +from sqlalchemy.testing import ( + eq_, + is_instance_of, + is_false, +) +from google.cloud.spanner_v1 import ( + CreateSessionRequest, + ExecuteSqlRequest, + ResultSet, + ResultSetStats, + BeginTransactionRequest, + CommitRequest, + TypeCode, +) +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_result, +) + + +class TestFloat32(MockServerTestBase): + def test_insert_data(self): + from test.mockserver_tests.float32_model import Number + + update_count = ResultSet( + dict( + stats=ResultSetStats( + dict( + row_count_exact=1, + ) + ) + ) + ) + add_result( + "INSERT INTO numbers (number, name, ln) VALUES (@a0, @a1, @a2)", + update_count, + ) + + engine = self.create_engine() + with Session(engine) as session: + n1 = Number(number=1, name="One", ln=0.0) + session.add_all([n1]) + session.commit() + + requests = self.spanner_service.requests + eq_(4, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], BeginTransactionRequest) + is_instance_of(requests[2], ExecuteSqlRequest) + is_instance_of(requests[3], CommitRequest) + request: ExecuteSqlRequest = requests[2] + eq_(3, len(request.params)) + eq_("1", request.params["a0"]) + eq_("One", request.params["a1"]) + eq_(0.0, request.params["a2"]) + eq_(TypeCode.INT64, request.param_types["a0"].code) + eq_(TypeCode.STRING, request.param_types["a1"].code) + is_false("a2" in request.param_types) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_insertmany.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_insertmany.py new file mode 100644 index 000000000000..f5b9f882e3a4 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_insertmany.py @@ -0,0 +1,191 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +from unittest import mock + +import sqlalchemy +from sqlalchemy.orm import Session +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ( + ExecuteSqlRequest, + CommitRequest, + RollbackRequest, + BeginTransactionRequest, + CreateSessionRequest, +) +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_result, +) +import google.cloud.spanner_v1.types.type as spanner_type +import google.cloud.spanner_v1.types.result_set as result_set + + +class TestInsertmany(MockServerTestBase): + @mock.patch.object(uuid, "uuid4", mock.MagicMock(side_effect=["a", "b"])) + def test_insertmany_with_uuid_sentinels(self): + """Ensures one bulk insert for ORM objects distinguished by uuid.""" + from test.mockserver_tests.insertmany_model import SingerUUID + + self.add_uuid_insert_result( + "INSERT INTO singers_uuid (id, name) " + "VALUES (@a0, @a1), (@a2, @a3) " + "THEN RETURN inserted_at, id" + ) + engine = self.create_engine() + + with Session(engine) as session: + session.add(SingerUUID(name="a")) + session.add(SingerUUID(name="b")) + session.commit() + + # Verify the requests that we got. + requests = self.spanner_service.requests + eq_(4, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], BeginTransactionRequest) + is_instance_of(requests[2], ExecuteSqlRequest) + is_instance_of(requests[3], CommitRequest) + + def test_no_insertmany_with_bit_reversed_id(self): + """Ensures we don't try to bulk insert rows with bit-reversed PKs. + + SQLAlchemy's insertmany support requires either incrementing + PKs or client-side supplied sentinel values such as UUIDs. + Spanner's bit-reversed integer PKs don't meet the ordering + requirement, so we need to make sure we don't try to bulk + insert with them. + """ + from test.mockserver_tests.insertmany_model import SingerIntID + + self.add_int_id_insert_result( + "INSERT INTO singers_int_id (name) " + "VALUES (@a0) " + "THEN RETURN id, inserted_at" + ) + engine = self.create_engine() + + with Session(engine) as session: + session.add(SingerIntID(name="a")) + session.add(SingerIntID(name="b")) + try: + session.commit() + except sqlalchemy.exc.SAWarning: + # This will fail because we're returning the same PK + # for two rows. The mock server doesn't currently + # support associating the same query with two + # different results. For our purposes that's okay -- + # we just want to ensure we generate two INSERTs, not + # one. + pass + + # Verify the requests that we got. + requests = self.spanner_service.requests + eq_(5, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], BeginTransactionRequest) + is_instance_of(requests[2], ExecuteSqlRequest) + is_instance_of(requests[3], ExecuteSqlRequest) + is_instance_of(requests[4], RollbackRequest) + + def add_uuid_insert_result(self, sql): + result = result_set.ResultSet( + dict( + metadata=result_set.ResultSetMetadata( + dict( + row_type=spanner_type.StructType( + dict( + fields=[ + spanner_type.StructType.Field( + dict( + name="inserted_at", + type=spanner_type.Type( + dict( + code=spanner_type.TypeCode.TIMESTAMP + ) + ), + ) + ), + spanner_type.StructType.Field( + dict( + name="id", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.STRING) + ), + ) + ), + ] + ) + ) + ) + ), + ) + ) + result.rows.extend( + [ + ( + "2020-06-02T23:58:40Z", + "a", + ), + ( + "2020-06-02T23:58:41Z", + "b", + ), + ] + ) + add_result(sql, result) + + def add_int_id_insert_result(self, sql): + result = result_set.ResultSet( + dict( + metadata=result_set.ResultSetMetadata( + dict( + row_type=spanner_type.StructType( + dict( + fields=[ + spanner_type.StructType.Field( + dict( + name="id", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.INT64) + ), + ) + ), + spanner_type.StructType.Field( + dict( + name="inserted_at", + type=spanner_type.Type( + dict( + code=spanner_type.TypeCode.TIMESTAMP + ) + ), + ) + ), + ] + ) + ) + ) + ), + ) + ) + result.rows.extend( + [ + ( + "1", + "2020-06-02T23:58:40Z", + ), + ] + ) + add_result(sql, result) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_interleaved_index.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_interleaved_index.py new file mode 100644 index 000000000000..198f64318dd5 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_interleaved_index.py @@ -0,0 +1,102 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import create_engine +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ( + FixedSizePool, + ResultSet, +) +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_result, +) +from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest + + +class TestNullFilteredIndex(MockServerTestBase): + """Ensure we emit correct DDL for not null filtered indexes.""" + + def test_create_table(self): + from test.mockserver_tests.interleaved_index import Base + + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="singers" +LIMIT 1 +""", + ResultSet(), + ) + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="albums" +LIMIT 1 +""", + ResultSet(), + ) + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="tracks" +LIMIT 1 +""", + ResultSet(), + ) + engine = create_engine( + "spanner:///projects/p/instances/i/databases/d", + connect_args={"client": self.client, "pool": FixedSizePool(size=10)}, + ) + Base.metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(4, len(requests[0].statements)) + eq_( + "CREATE TABLE singers (\n" + "\tsinger_id STRING(36) NOT NULL, \n" + "\tfirst_name STRING(MAX) NOT NULL, \n" + "\tlast_name STRING(MAX) NOT NULL\n" + ") PRIMARY KEY (singer_id)", + requests[0].statements[0], + ) + eq_( + "CREATE TABLE albums (\n" + "\tsinger_id STRING(36) NOT NULL, \n" + "\talbum_id STRING(36) NOT NULL, \n" + "\talbum_title STRING(MAX) NOT NULL, \n" + "\tFOREIGN KEY(singer_id) REFERENCES singers (singer_id)\n" + ") PRIMARY KEY (singer_id, album_id),\n" + "INTERLEAVE IN PARENT singers ON DELETE CASCADE", + requests[0].statements[1], + ) + eq_( + "CREATE TABLE tracks (\n" + "\tsinger_id STRING(36) NOT NULL, \n" + "\talbum_id STRING(36) NOT NULL, \n" + "\ttrack_id STRING(36) NOT NULL, \n" + "\tsong_name STRING(MAX) NOT NULL, \n" + "\tFOREIGN KEY(singer_id) REFERENCES singers (singer_id), \n" + "\tFOREIGN KEY(album_id) REFERENCES albums (album_id)\n" + ") PRIMARY KEY (singer_id, album_id, track_id),\n" + "INTERLEAVE IN PARENT albums ON DELETE CASCADE", + requests[0].statements[2], + ) + eq_( + "CREATE INDEX idx_name ON tracks " + "(singer_id, album_id, song_name), " + "INTERLEAVE IN albums", + requests[0].statements[3], + ) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_isolation_level.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_isolation_level.py new file mode 100644 index 000000000000..21dca30579fe --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_isolation_level.py @@ -0,0 +1,198 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest +from sqlalchemy import create_engine +from sqlalchemy.orm import Session +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ( + CreateSessionRequest, + ExecuteSqlRequest, + CommitRequest, + BeginTransactionRequest, + TransactionOptions, +) + +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_result, +) +import google.cloud.spanner_v1.types.type as spanner_type +import google.cloud.spanner_v1.types.result_set as result_set + +ISOLATION_LEVEL_UNSPECIFIED = ( + TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED +) + + +class TestIsolationLevel(MockServerTestBase): + def test_default_isolation_level(self): + from test.mockserver_tests.isolation_level_model import Singer + + self.add_insert_result("INSERT INTO singers (name) VALUES (@a0) THEN RETURN id") + engine = self.create_engine() + + with Session(engine) as session: + singer = Singer(name="Test") + session.add(singer) + session.commit() + self.verify_isolation_level( + TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED + ) + + def test_engine_isolation_level(self): + from test.mockserver_tests.isolation_level_model import Singer + + self.add_insert_result("INSERT INTO singers (name) VALUES (@a0) THEN RETURN id") + engine = create_engine( + "spanner:///projects/p/instances/i/databases/d", + connect_args={"client": self.client, "logger": MockServerTestBase.logger}, + isolation_level="REPEATABLE READ", + ) + + with Session(engine) as session: + singer = Singer(name="Test") + session.add(singer) + session.commit() + self.verify_isolation_level(TransactionOptions.IsolationLevel.REPEATABLE_READ) + + def test_execution_options_isolation_level(self): + from test.mockserver_tests.isolation_level_model import Singer + + self.add_insert_result("INSERT INTO singers (name) VALUES (@a0) THEN RETURN id") + engine = self.create_engine() + + with Session( + engine.execution_options(isolation_level="repeatable read") + ) as session: + singer = Singer(name="Test") + session.add(singer) + session.commit() + self.verify_isolation_level(TransactionOptions.IsolationLevel.REPEATABLE_READ) + + def test_override_engine_isolation_level(self): + from test.mockserver_tests.isolation_level_model import Singer + + self.add_insert_result("INSERT INTO singers (name) VALUES (@a0) THEN RETURN id") + engine = create_engine( + "spanner:///projects/p/instances/i/databases/d", + connect_args={"client": self.client, "logger": MockServerTestBase.logger}, + isolation_level="REPEATABLE READ", + ) + + with Session( + engine.execution_options(isolation_level="SERIALIZABLE") + ) as session: + singer = Singer(name="Test") + session.add(singer) + session.commit() + self.verify_isolation_level(TransactionOptions.IsolationLevel.SERIALIZABLE) + + def test_auto_commit(self): + from test.mockserver_tests.isolation_level_model import Singer + + self.add_insert_result("INSERT INTO singers (name) VALUES (@a0) THEN RETURN id") + engine = create_engine( + "spanner:///projects/p/instances/i/databases/d", + connect_args={ + "client": self.client, + "logger": MockServerTestBase.logger, + "ignore_transaction_warnings": True, + }, + ) + + with Session( + engine.execution_options( + isolation_level="AUTOCOMMIT", ignore_transaction_warnings=True + ) + ) as session: + singer = Singer(name="Test") + session.add(singer) + session.commit() + + # Verify the requests that we got. + requests = self.spanner_service.requests + eq_(3, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], ExecuteSqlRequest) + is_instance_of(requests[2], CommitRequest) + execute_request: ExecuteSqlRequest = requests[1] + eq_( + TransactionOptions( + dict( + isolation_level=ISOLATION_LEVEL_UNSPECIFIED, + read_write=TransactionOptions.ReadWrite(), + ) + ), + execute_request.transaction.begin, + ) + + def test_invalid_isolation_level(self): + from test.mockserver_tests.isolation_level_model import Singer + + engine = self.create_engine() + with pytest.raises(ValueError): + with Session(engine.execution_options(isolation_level="foo")) as session: + singer = Singer(name="Test") + session.add(singer) + session.commit() + + def verify_isolation_level(self, level): + # Verify the requests that we got. + requests = self.spanner_service.requests + eq_(4, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], BeginTransactionRequest) + is_instance_of(requests[2], ExecuteSqlRequest) + is_instance_of(requests[3], CommitRequest) + begin_request: BeginTransactionRequest = requests[1] + eq_( + TransactionOptions( + dict( + isolation_level=level, + read_write=TransactionOptions.ReadWrite(), + ) + ), + begin_request.options, + ) + + def add_insert_result(self, sql): + result = result_set.ResultSet( + dict( + metadata=result_set.ResultSetMetadata( + dict( + row_type=spanner_type.StructType( + dict( + fields=[ + spanner_type.StructType.Field( + dict( + name="id", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.INT64) + ), + ) + ) + ] + ) + ) + ) + ), + stats=result_set.ResultSetStats( + dict( + row_count_exact=1, + ) + ), + ) + ) + result.rows.extend([("987654321",)]) + add_result(sql, result) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_json.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_json.py new file mode 100644 index 000000000000..244e5d627576 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_json.py @@ -0,0 +1,194 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import func, select, text +from sqlalchemy.orm import Session +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ( + ResultSet, + CreateSessionRequest, + ExecuteSqlRequest, + CommitRequest, + BeginTransactionRequest, + TypeCode, + JsonObject, +) +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_result, + add_update_count, +) +from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest +import google.cloud.spanner_v1.types.type as spanner_type +import google.cloud.spanner_v1.types.result_set as result_set + + +class TestJson(MockServerTestBase): + def test_create_table(self): + from test.mockserver_tests.json_model import Base + + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="venues" +LIMIT 1 +""", + ResultSet(), + ) + engine = self.create_engine() + Base.metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(1, len(requests[0].statements)) + eq_( + "CREATE TABLE venues (\n" + "\tid INT64 NOT NULL " + "GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), \n" + "\tname STRING(MAX) NOT NULL, \n" + "\tdescription JSON\n" + ") PRIMARY KEY (id)", + requests[0].statements[0], + ) + + def test_insert_dict(self): + self._test_insert_json( + {"type": "Stadium", "size": "Great"}, '{"size":"Great","type":"Stadium"}' + ) + + def test_insert_array(self): + self._test_insert_json( + [{"type": "Stadium", "size": "Great"}], + '[{"size":"Great","type":"Stadium"}]', + ) + + def test_insert_fn(self): + add_update_count( + "INSERT INTO venues (id, name, description) " + "VALUES (@a0, @a1, parse_json(@a2, wide_number_mode=>'round'))", + 1, + ) + self._test_insert_json( + func.parse_json( + '{"type": "Stadium", "size": "Great"}', + text("wide_number_mode=>'round'"), + ), + '{"type": "Stadium", "size": "Great"}', + expected_type_code=TypeCode.STRING, + ) + + def _test_insert_json( + self, description, expected, expected_type_code=TypeCode.JSON + ): + from test.mockserver_tests.json_model import Venue + + add_update_count( + "INSERT INTO venues (id, name, description) VALUES (@a0, @a1, @a2)", 1 + ) + engine = self.create_engine() + + with Session(engine) as session: + venue = Venue(id=1, name="Test", description=description) + session.add(venue) + session.commit() + + # Verify the requests that we got. + requests = self.spanner_service.requests + eq_(4, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], BeginTransactionRequest) + is_instance_of(requests[2], ExecuteSqlRequest) + is_instance_of(requests[3], CommitRequest) + request: ExecuteSqlRequest = requests[2] + eq_(3, len(request.params)) + eq_("1", request.params["a0"]) + eq_("Test", request.params["a1"]) + eq_(expected, request.params["a2"]) + eq_(TypeCode.INT64, request.param_types["a0"].code) + eq_(TypeCode.STRING, request.param_types["a1"].code) + eq_(expected_type_code, request.param_types["a2"].code) + + def test_select_dict(self): + self._test_select_json( + '{"size":"Great","type":"Stadium"}', + JsonObject({"size": "Great", "type": "Stadium"}), + ) + + def test_select_array(self): + self._test_select_json( + '[{"size":"Great","type":"Stadium"}]', + JsonObject([{"size": "Great", "type": "Stadium"}]), + ) + + def _test_select_json(self, description, expected): + from test.mockserver_tests.json_model import Venue + + sql = "SELECT venues.id, venues.name, venues.description \n" "FROM venues" + add_venue_query_result(sql, description) + engine = self.create_engine() + + with Session(engine.execution_options(read_only=True)) as session: + venue = session.execute(select(Venue)).first()[0] + eq_(venue.description, expected) + + +def add_venue_query_result(sql: str, description: str): + result = result_set.ResultSet( + dict( + metadata=result_set.ResultSetMetadata( + dict( + row_type=spanner_type.StructType( + dict( + fields=[ + spanner_type.StructType.Field( + dict( + name="id", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.INT64) + ), + ) + ), + spanner_type.StructType.Field( + dict( + name="name", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.STRING) + ), + ) + ), + spanner_type.StructType.Field( + dict( + name="description", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.JSON) + ), + ) + ), + ] + ) + ) + ) + ), + ) + ) + result.rows.extend( + [ + ( + "1", + "Test", + description, + ), + ] + ) + add_result(sql, result) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_not_enforced_fk.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_not_enforced_fk.py new file mode 100644 index 000000000000..b2253d1b92f0 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_not_enforced_fk.py @@ -0,0 +1,74 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import create_engine +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ( + FixedSizePool, + ResultSet, +) +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_result, +) +from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest + + +class TestNotEnforcedFK(MockServerTestBase): + """Ensure we emit correct DDL for not enforced foreign keys.""" + + def test_create_table(self): + from test.mockserver_tests.not_enforced_fk_model import Base + + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="singers" +LIMIT 1 +""", + ResultSet(), + ) + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="albums" +LIMIT 1 +""", + ResultSet(), + ) + engine = create_engine( + "spanner:///projects/p/instances/i/databases/d", + connect_args={"client": self.client, "pool": FixedSizePool(size=10)}, + ) + Base.metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(2, len(requests[0].statements)) + eq_( + "CREATE TABLE singers (\n" + "\tid STRING(MAX) NOT NULL, \n" + "\tname STRING(MAX) NOT NULL\n" + ") PRIMARY KEY (id)", + requests[0].statements[0], + ) + eq_( + "CREATE TABLE albums (\n" + "\tid STRING(MAX) NOT NULL, \n" + "\tname STRING(MAX) NOT NULL, \n" + "\tsinger_id STRING(MAX) NOT NULL, \n" + "\tFOREIGN KEY(singer_id) REFERENCES singers (id) NOT ENFORCED\n" + ") PRIMARY KEY (id)", + requests[0].statements[1], + ) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_null_filtered_index.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_null_filtered_index.py new file mode 100644 index 000000000000..28ed1b5db5d2 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_null_filtered_index.py @@ -0,0 +1,80 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import create_engine +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ( + FixedSizePool, + ResultSet, +) +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_result, +) +from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest + + +class TestNullFilteredIndex(MockServerTestBase): + """Ensure we emit correct DDL for not null filtered indexes.""" + + def test_create_table(self): + from test.mockserver_tests.null_filtered_index import Base + + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="singers" +LIMIT 1 +""", + ResultSet(), + ) + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="albums" +LIMIT 1 +""", + ResultSet(), + ) + engine = create_engine( + "spanner:///projects/p/instances/i/databases/d", + connect_args={"client": self.client, "pool": FixedSizePool(size=10)}, + ) + Base.metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(5, len(requests[0].statements)) + eq_( + "CREATE TABLE singers (\n" + "\tid STRING(MAX) NOT NULL, \n" + "\tname STRING(MAX) NOT NULL\n" + ") PRIMARY KEY (id)", + requests[0].statements[0], + ) + + # The order of the CREATE INDEX statements appears to be + # arbitrary, so we sort it for test consistency. + index_statements = sorted(requests[0].statements[1:]) + eq_("CREATE INDEX idx_name ON singers (name)", index_statements[0]) + eq_( + "CREATE NULL_FILTERED INDEX idx_null_filtered_name ON singers (name)", + index_statements[1], + ) + eq_("CREATE UNIQUE INDEX idx_uq_name ON singers (name)", index_statements[2]) + eq_( + "CREATE UNIQUE NULL_FILTERED INDEX " + "idx_uq_null_filtered_name ON singers (name)", + index_statements[3], + ) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_pickle_type.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_pickle_type.py new file mode 100644 index 000000000000..b2f1a2abee3c --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_pickle_type.py @@ -0,0 +1,173 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy.orm import Session +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ( + ResultSet, + CreateSessionRequest, + ExecuteSqlRequest, + CommitRequest, + BeginTransactionRequest, + TypeCode, +) +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_result, + add_update_count, +) +from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest +import google.cloud.spanner_v1.types.type as spanner_type +import google.cloud.spanner_v1.types.result_set as result_set + + +class TestPickleType(MockServerTestBase): + def test_create_table(self): + from test.mockserver_tests.pickle_type_model import Base + + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="user_preferences" +LIMIT 1 +""", + ResultSet(), + ) + engine = self.create_engine() + Base.metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(1, len(requests[0].statements)) + eq_( + "CREATE TABLE user_preferences (\n" + "\tuser_id INT64 NOT NULL GENERATED BY DEFAULT" + " AS IDENTITY (BIT_REVERSED_POSITIVE), \n" + "\tusername STRING(50) NOT NULL, \n" + "\tpreferences BYTES(MAX), \n" + "\tcreated_at STRING(30) NOT NULL\n" + ") PRIMARY KEY (user_id)", + requests[0].statements[0], + ) + + def test_insert_and_query(self): + from test.mockserver_tests.pickle_type_model import UserPreferences + + add_update_count( + "INSERT INTO user_preferences (user_id, username, preferences, created_at) " + "VALUES (@a0, @a1, @a2, @a3)", + 1, + ) + engine = self.create_engine() + preferences = {"setting": "true"} + preferences_base64 = "gAWVFQAAAAAAAAB9lIwHc2V0dGluZ5SMBHRydWWUcy4=" + with Session(engine) as session: + new_user = UserPreferences( + user_id=1, + username="test_user", + preferences=preferences, + created_at="2025-05-04T00:00:00.000000", + ) + + session.add(new_user) + session.commit() + + # Verify the requests that we got. + requests = self.spanner_service.requests + eq_(4, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], BeginTransactionRequest) + is_instance_of(requests[2], ExecuteSqlRequest) + is_instance_of(requests[3], CommitRequest) + request: ExecuteSqlRequest = requests[2] + eq_(4, len(request.params)) + eq_("1", request.params["a0"]) + eq_("test_user", request.params["a1"]) + eq_(preferences_base64, request.params["a2"]) + eq_(TypeCode.INT64, request.param_types["a0"].code) + eq_(TypeCode.STRING, request.param_types["a1"].code) + eq_(TypeCode.BYTES, request.param_types["a2"].code) + + add_user_preferences_result( + "SELECT user_preferences.user_id AS user_preferences_user_id, " + "user_preferences.username AS user_preferences_username, " + "user_preferences.preferences AS user_preferences_preferences, " + "user_preferences.created_at AS user_preferences_created_at\n" + "FROM user_preferences\n" + "WHERE user_preferences.user_id = @a0\n" + " LIMIT @a1", + preferences_base64, + ) + user = session.query(UserPreferences).filter_by(user_id=1).first() + eq_(preferences, user.preferences) + + +def add_user_preferences_result(sql: str, preferences_base64: object): + result = result_set.ResultSet( + dict( + metadata=result_set.ResultSetMetadata( + dict( + row_type=spanner_type.StructType( + dict( + fields=[ + spanner_type.StructType.Field( + dict( + name="user_id", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.INT64) + ), + ) + ), + spanner_type.StructType.Field( + dict( + name="user_name", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.STRING) + ), + ) + ), + spanner_type.StructType.Field( + dict( + name="preferences", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.BYTES) + ), + ) + ), + spanner_type.StructType.Field( + dict( + name="created_at", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.TIMESTAMP) + ), + ) + ), + ] + ) + ) + ) + ), + ) + ) + result.rows.extend( + [ + ( + "1", + "Test", + preferences_base64, + "2025-05-05T00:00:00.000000Z", + ), + ] + ) + add_result(sql, result) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_quickstart.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_quickstart.py new file mode 100644 index 000000000000..d62f03594699 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_quickstart.py @@ -0,0 +1,126 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest +from google.cloud.spanner_v1 import ( + ResultSet, + ResultSetStats, + CreateSessionRequest, + ExecuteBatchDmlRequest, + CommitRequest, + BeginTransactionRequest, +) +from sqlalchemy.orm import Session +from sqlalchemy.testing import eq_, is_instance_of, is_not_none +from test.mockserver_tests.mock_server_test_base import MockServerTestBase, add_result + + +class TestQuickStart(MockServerTestBase): + def test_create_tables(self): + from test.mockserver_tests.quickstart_model import Base + + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="user_account" +LIMIT 1""", + ResultSet(), + ) + add_result( + """SELECT true +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA="" AND TABLE_NAME="address" +LIMIT 1""", + ResultSet(), + ) + + engine = self.create_engine() + Base.metadata.create_all(engine) + requests = self.database_admin_service.requests + eq_(1, len(requests)) + is_instance_of(requests[0], UpdateDatabaseDdlRequest) + eq_(2, len(requests[0].statements)) + eq_( + "CREATE TABLE user_account (\n" + "\tid INT64 NOT NULL " + "GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), \n" + "\tname STRING(30) NOT NULL, \n" + "\tfullname STRING(MAX)\n" + ") PRIMARY KEY (id)", + requests[0].statements[0], + ) + eq_( + "CREATE TABLE address (\n" + "\tid INT64 NOT NULL " + "GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), \n" + "\temail_address STRING(MAX) NOT NULL, \n" + "\tuser_id INT64 NOT NULL, \n" + "\tFOREIGN KEY(user_id) REFERENCES user_account (id)\n" + ") PRIMARY KEY (id)", + requests[0].statements[1], + ) + + def test_insert_data(self): + from test.mockserver_tests.quickstart_model import User, Address + + # TODO: Use auto-generated primary keys. + update_count = ResultSet( + dict( + stats=ResultSetStats( + dict( + row_count_exact=1, + ) + ) + ) + ) + add_result( + "INSERT INTO user_account (id, name, fullname) VALUES (@a0, @a1, @a2)", + update_count, + ) + add_result( + "INSERT INTO address (id, email_address, user_id) VALUES (@a0, @a1, @a2)", + update_count, + ) + + engine = self.create_engine() + with Session(engine) as session: + spongebob = User( + id=1, + name="spongebob", + fullname="Spongebob Squarepants", + addresses=[Address(id=1, email_address="spongebob@sqlalchemy.org")], + ) + sandy = User( + id=2, + name="sandy", + fullname="Sandy Cheeks", + addresses=[ + Address(id=2, email_address="sandy@sqlalchemy.org"), + Address(id=3, email_address="sandy@squirrelpower.org"), + ], + ) + patrick = User(id=3, name="patrick", fullname="Patrick Star") + session.add_all([spongebob, sandy, patrick]) + session.commit() + + requests = self.spanner_service.requests + eq_(5, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], BeginTransactionRequest) + is_instance_of(requests[2], ExecuteBatchDmlRequest) + is_instance_of(requests[3], ExecuteBatchDmlRequest) + is_instance_of(requests[4], CommitRequest) + is_not_none(requests[2].transaction.id) + eq_(requests[2].transaction.id, requests[3].transaction.id) + eq_(requests[2].transaction.id, requests[4].transaction_id) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_read_only_transaction.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_read_only_transaction.py new file mode 100644 index 000000000000..0dffbb888e00 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_read_only_transaction.py @@ -0,0 +1,68 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import select +from sqlalchemy.orm import Session +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ( + CreateSessionRequest, + ExecuteSqlRequest, + BeginTransactionRequest, + TransactionOptions, +) +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_singer_query_result, +) + + +class TestReadOnlyTransaction(MockServerTestBase): + def test_read_only_transaction(self): + from test.mockserver_tests.read_only_model import Singer + + add_singer_query_result("SELECT singers.id, singers.name \n" + "FROM singers") + engine = self.create_engine() + + for i in range(2): + with Session(engine.execution_options(read_only=True)) as session: + # Execute two queries in a read-only transaction. + session.scalars(select(Singer)).all() + session.scalars(select(Singer)).all() + + # Verify the requests that we got. + requests = self.spanner_service.requests + eq_(7, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], BeginTransactionRequest) + is_instance_of(requests[2], ExecuteSqlRequest) + is_instance_of(requests[3], ExecuteSqlRequest) + is_instance_of(requests[4], BeginTransactionRequest) + is_instance_of(requests[5], ExecuteSqlRequest) + is_instance_of(requests[6], ExecuteSqlRequest) + # Verify that the transaction is a read-only transaction. + for index in [1, 4]: + begin_request: BeginTransactionRequest = requests[index] + eq_( + TransactionOptions( + dict( + read_only=TransactionOptions.ReadOnly( + dict( + strong=True, + return_read_timestamp=True, + ) + ) + ) + ), + begin_request.options, + ) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_stale_reads.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_stale_reads.py new file mode 100644 index 000000000000..0dcf8b38c680 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_stale_reads.py @@ -0,0 +1,114 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +from sqlalchemy import select +from sqlalchemy.orm import Session +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ( + CreateSessionRequest, + ExecuteSqlRequest, + BeginTransactionRequest, + TransactionOptions, +) +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_singer_query_result, +) + + +class TestStaleReads(MockServerTestBase): + def test_stale_read_multi_use(self): + from test.mockserver_tests.stale_read_model import Singer + + add_singer_query_result("SELECT singers.id, singers.name \nFROM singers") + engine = self.create_engine() + + timestamp = datetime.datetime.fromtimestamp(1733328910) + for i in range(2): + with Session( + engine.execution_options( + read_only=True, + staleness={"read_timestamp": timestamp}, + ) + ) as session: + # Execute two queries in a read-only transaction. + session.scalars(select(Singer)).all() + session.scalars(select(Singer)).all() + + # Verify the requests that we got. + requests = self.spanner_service.requests + eq_(7, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], BeginTransactionRequest) + is_instance_of(requests[2], ExecuteSqlRequest) + is_instance_of(requests[3], ExecuteSqlRequest) + is_instance_of(requests[4], BeginTransactionRequest) + is_instance_of(requests[5], ExecuteSqlRequest) + is_instance_of(requests[6], ExecuteSqlRequest) + # Verify that the transaction is a read-only transaction. + for index in [1, 4]: + begin_request: BeginTransactionRequest = requests[index] + eq_( + TransactionOptions( + dict( + read_only=TransactionOptions.ReadOnly( + dict( + read_timestamp={"seconds": 1733328910}, + return_read_timestamp=True, + ) + ) + ) + ), + begin_request.options, + ) + + def test_stale_read_single_use(self): + from test.mockserver_tests.stale_read_model import Singer + + add_singer_query_result("SELECT singers.id, singers.name \nFROM singers") + engine = self.create_engine() + + with Session( + engine.execution_options( + isolation_level="AUTOCOMMIT", + staleness={"max_staleness": {"seconds": 15}}, + ) + ) as session: + # Execute two queries in autocommit. + session.scalars(select(Singer)).all() + session.scalars(select(Singer)).all() + + # Verify the requests that we got. + requests = self.spanner_service.requests + eq_(3, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], ExecuteSqlRequest) + is_instance_of(requests[2], ExecuteSqlRequest) + # Verify that the requests use a stale read. + for index in [1, 2]: + execute_request: ExecuteSqlRequest = requests[index] + eq_( + TransactionOptions( + dict( + read_only=TransactionOptions.ReadOnly( + dict( + max_staleness={"seconds": 15}, + return_read_timestamp=True, + ) + ) + ) + ), + execute_request.transaction.single_use, + ) diff --git a/packages/sqlalchemy-spanner/test/mockserver_tests/test_tags.py b/packages/sqlalchemy-spanner/test/mockserver_tests/test_tags.py new file mode 100644 index 000000000000..0a4d6337f03c --- /dev/null +++ b/packages/sqlalchemy-spanner/test/mockserver_tests/test_tags.py @@ -0,0 +1,160 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import select +from sqlalchemy.orm import Session +from sqlalchemy.testing import eq_, is_instance_of +from google.cloud.spanner_v1 import ( + CreateSessionRequest, + ExecuteSqlRequest, + BeginTransactionRequest, + CommitRequest, +) +from test.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_update_count, +) +from test.mockserver_tests.mock_server_test_base import add_result +import google.cloud.spanner_v1.types.type as spanner_type +import google.cloud.spanner_v1.types.result_set as result_set + + +class TestStaleReads(MockServerTestBase): + def test_request_tag(self): + from test.mockserver_tests.tags_model import Singer + + add_singer_query_result("SELECT singers.id, singers.name \n" + "FROM singers") + engine = self.create_engine() + + with Session(engine.execution_options(read_only=True)) as session: + # Execute two queries in a read-only transaction. + session.scalars( + select(Singer).execution_options(request_tag="my-tag-1") + ).all() + session.scalars( + select(Singer).execution_options(request_tag="my-tag-2") + ).all() + + # Verify the requests that we got. + requests = self.spanner_service.requests + eq_(4, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], BeginTransactionRequest) + is_instance_of(requests[2], ExecuteSqlRequest) + is_instance_of(requests[3], ExecuteSqlRequest) + # Verify that we got a request tag for the queries. + eq_("my-tag-1", requests[2].request_options.request_tag) + eq_("my-tag-2", requests[3].request_options.request_tag) + + def test_transaction_tag(self): + from test.mockserver_tests.tags_model import Singer + + add_singer_query_result("SELECT singers.id, singers.name\n" + "FROM singers") + add_single_singer_query_result( + "SELECT singers.id AS singers_id, singers.name AS singers_name\n" + "FROM singers\n" + "WHERE singers.id = @a0" + ) + add_update_count("INSERT INTO singers (id, name) VALUES (@a0, @a1)", 1) + engine = self.create_engine() + + with Session( + engine.execution_options(transaction_tag="my-transaction-tag") + ) as session: + # Execute a query and an insert statement in a read/write transaction. + session.get(Singer, 1, execution_options={"request_tag": "my-tag-1"}) + session.scalars( + select(Singer).execution_options(request_tag="my-tag-2") + ).all() + session.connection().execution_options(request_tag="insert-singer") + session.add(Singer(id=1, name="Some Singer")) + session.commit() + + # Verify the requests that we got. + requests = self.spanner_service.requests + eq_(6, len(requests)) + is_instance_of(requests[0], CreateSessionRequest) + is_instance_of(requests[1], BeginTransactionRequest) + is_instance_of(requests[2], ExecuteSqlRequest) + is_instance_of(requests[3], ExecuteSqlRequest) + is_instance_of(requests[4], ExecuteSqlRequest) + is_instance_of(requests[5], CommitRequest) + for request in requests[2:]: + eq_("my-transaction-tag", request.request_options.transaction_tag) + eq_("my-tag-1", requests[2].request_options.request_tag) + eq_("my-tag-2", requests[3].request_options.request_tag) + eq_("insert-singer", requests[4].request_options.request_tag) + + +def empty_singer_result_set(): + return result_set.ResultSet( + dict( + metadata=result_set.ResultSetMetadata( + dict( + row_type=spanner_type.StructType( + dict( + fields=[ + spanner_type.StructType.Field( + dict( + name="singers_id", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.INT64) + ), + ) + ), + spanner_type.StructType.Field( + dict( + name="singers_name", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.STRING) + ), + ) + ), + ] + ) + ) + ) + ), + ) + ) + + +def add_singer_query_result(sql: str): + result = empty_singer_result_set() + result.rows.extend( + [ + ( + "1", + "Jane Doe", + ), + ( + "2", + "John Doe", + ), + ] + ) + add_result(sql, result) + + +def add_single_singer_query_result(sql: str): + result = empty_singer_result_set() + result.rows.extend( + [ + ( + "1", + "Jane Doe", + ), + ] + ) + add_result(sql, result) diff --git a/packages/sqlalchemy-spanner/test/system/test_basics.py b/packages/sqlalchemy-spanner/test/system/test_basics.py new file mode 100644 index 000000000000..5914b9f34f7e --- /dev/null +++ b/packages/sqlalchemy-spanner/test/system/test_basics.py @@ -0,0 +1,441 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import datetime +import os +from typing import Optional + +import pytest +from sqlalchemy import ( + text, + Table, + Column, + Integer, + ForeignKey, + ForeignKeyConstraint, + PrimaryKeyConstraint, + String, + Index, + MetaData, + Boolean, + BIGINT, + inspect, + select, + update, + delete, + event, +) +from sqlalchemy.orm import Session, DeclarativeBase, Mapped, mapped_column +from sqlalchemy.types import REAL +from sqlalchemy.testing import eq_, is_true, is_not_none, is_none +from sqlalchemy.testing.plugin.plugin_base import fixtures + + +class TestBasics(fixtures.TablesTest): + @classmethod + def define_tables(cls, metadata): + numbers = Table( + "numbers", + metadata, + Column("number", Integer), + Column("name", String(20)), + Column("alternative_name", String(20)), + Column("prime", Boolean, server_default=text("FALSE")), + Column("ln", REAL), + PrimaryKeyConstraint("number"), + ) + Index( + "idx_numbers_name", + numbers.c.name, + numbers.c.prime.desc(), + spanner_storing=[numbers.c.alternative_name], + ) + Table( + "users", + metadata, + Column("ID", Integer, primary_key=True), + Column("name", String(20)), + ) + # Add a foreign key example. + Table( + "number_colors", + metadata, + Column("ID", Integer, primary_key=True), + Column( + "number_id", Integer, ForeignKey("numbers.number", name="number_fk") + ), + Column("color", String(20)), + ) + + with cls.bind.begin() as conn: + conn.execute(text("CREATE SCHEMA IF NOT EXISTS schema")) + Table( + "users", + metadata, + Column("ID", Integer, primary_key=True), + Column("name", String(20)), + schema="schema", + ) + # Add a foreign key example which crosses schema. + Table( + "number_colors", + metadata, + Column("ID", Integer, primary_key=True), + Column( + "number_id", + Integer, + ForeignKey("numbers.number", name="cross_schema_number_fk"), + ), + Column("color", String(20)), + schema="schema", + ) + # Add a composite primary key & foreign key example. + Table( + "composite_pk", + metadata, + Column("a", String, primary_key=True), + Column("b", String, primary_key=True), + ) + composite_fk = Table( + "composite_fk", + metadata, + Column("my_a", String, primary_key=True), + Column("my_b", String, primary_key=True), + Column("my_c", String, primary_key=True), + ForeignKeyConstraint( + ["my_a", "my_b"], + ["composite_pk.a", "composite_pk.b"], + name="composite_fk_composite_pk_a_b", + ), + ) + Index( + "idx_composte_fk_all", + composite_fk.c.my_a, + composite_fk.c.my_b, + composite_fk.c.my_c, + ) + + def test_hello_world(self, connection): + greeting = connection.execute(text("select 'Hello World'")) + eq_("Hello World", greeting.fetchone()[0]) + + def test_insert_number(self, connection): + connection.execute( + text( + """insert or update into numbers (number, name, prime, ln) + values (1, 'One', false, cast(ln(1) as float32))""" + ) + ) + name = connection.execute(text("select name from numbers where number=1")) + eq_("One", name.fetchone()[0]) + + def test_reflect(self, connection): + engine = connection.engine + meta: MetaData = MetaData() + meta.reflect(bind=engine) + eq_(5, len(meta.tables)) + table = meta.tables["numbers"] + eq_(5, len(table.columns)) + eq_("number", table.columns[0].name) + eq_(BIGINT, type(table.columns[0].type)) + is_none(table.columns[0].server_default) + eq_("name", table.columns[1].name) + eq_(String, type(table.columns[1].type)) + eq_("alternative_name", table.columns[2].name) + eq_(String, type(table.columns[2].type)) + eq_("prime", table.columns[3].name) + eq_(Boolean, type(table.columns[3].type)) + is_not_none(table.columns[3].server_default) + eq_("FALSE", table.columns[3].server_default.arg.text) + eq_("ln", table.columns[4].name) + eq_(REAL, type(table.columns[4].type)) + eq_(1, len(table.indexes)) + index = next(iter(table.indexes)) + eq_(2, len(index.columns)) + eq_("name", index.columns[0].name) + eq_("prime", index.columns[1].name) + dialect_options = index.dialect_options["spanner"] + eq_(1, len(dialect_options["storing"])) + eq_("alternative_name", dialect_options["storing"][0]) + + def test_table_name_overlapping_with_system_table(self, connection): + class Base(DeclarativeBase): + pass + + class Role(Base): + __tablename__ = "roles" + id: Mapped[int] = mapped_column(Integer, primary_key=True) + name: Mapped[str] = mapped_column(String(100), nullable=True) + type: Mapped[str] = mapped_column(String(100), nullable=True) + description: Mapped[Optional[str]] = mapped_column(String(512)) + + engine = connection.engine + Base.metadata.create_all(engine) + + with Session(engine) as session: + role = Role( + id=1, + name="Test", + type="Test", + description="Test", + ) + session.add(role) + session.commit() + + def test_orm(self, connection): + class Base(DeclarativeBase): + pass + + class Number(Base): + __tablename__ = "numbers" + number: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] = mapped_column(String(20)) + alternative_name: Mapped[str] = mapped_column(String(20)) + prime: Mapped[bool] = mapped_column(Boolean) + ln: Mapped[float] = mapped_column(REAL) + + class User(Base): + __tablename__ = "users" + ID: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] = mapped_column(String(20)) + + class SchemaUser(Base): + __tablename__ = "users" + __table_args__ = {"schema": "schema"} + ID: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] = mapped_column(String(20)) + + engine = connection.engine + with Session(engine) as session: + number = Number( + number=1, name="One", alternative_name="Uno", prime=False, ln=0.0 + ) + session.add(number) + session.commit() + + level = "serializable" + if os.environ.get("SPANNER_EMULATOR_HOST", ""): + level = "REPEATABLE READ" + with Session(engine.execution_options(isolation_level=level)) as session: + user = User(name="Test") + session.add(user) + session.commit() + + statement = select(User).filter_by(name="Test") + users = session.scalars(statement).all() + eq_(1, len(users)) + is_true(users[0].ID > 0) + + with Session(engine) as session: + user = SchemaUser(name="SchemaTest") + session.add(user) + session.commit() + + users = session.scalars( + select(SchemaUser).where(SchemaUser.name == "SchemaTest") + ).all() + eq_(1, len(users)) + is_true(users[0].ID > 0) + + session.execute( + update(SchemaUser) + .where(SchemaUser.name == "SchemaTest") + .values(name="NewName") + ) + session.commit() + + users = session.scalars( + select(SchemaUser).where(SchemaUser.name == "NewName") + ).all() + eq_(1, len(users)) + is_true(users[0].ID > 0) + + session.execute(delete(SchemaUser).where(SchemaUser.name == "NewName")) + session.commit() + + users = session.scalars( + select(SchemaUser).where(SchemaUser.name == "NewName") + ).all() + eq_(0, len(users)) + + def test_multi_row_insert(self, connection): + """Ensures we can perform multi-row inserts.""" + + class Base(DeclarativeBase): + pass + + class User(Base): + __tablename__ = "users" + ID: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] = mapped_column(String(20)) + + with connection.engine.begin() as conn: + inserted_rows = list( + conn.execute( + User.__table__.insert() + .values([{"name": "a"}, {"name": "b"}]) + .returning(User.__table__.c.ID, User.__table__.c.name) + ) + ) + + eq_(2, len(inserted_rows)) + eq_({"a", "b"}, {row.name for row in inserted_rows}) + + with connection.engine.connect() as conn: + selected_rows = list(conn.execute(User.__table__.select())) + + eq_(len(inserted_rows), len(selected_rows)) + eq_(set(inserted_rows), set(selected_rows)) + + @pytest.mark.skipif( + os.environ.get("SPANNER_EMULATOR_HOST") is not None, + reason=( + "Fails in emulator due to bug: " + "https://github.com/GoogleCloudPlatform/cloud-spanner-emulator/issues/279" + ), + ) + def test_cross_schema_fk_lookups(self, connection): + """Ensures we introspect FKs within & across schema.""" + + engine = connection.engine + + insp = inspect(engine) + eq_( + { + (None, "number_colors"): [ + { + "name": "number_fk", + "referred_table": "numbers", + "referred_schema": None, + "referred_columns": ["number"], + "constrained_columns": ["number_id"], + } + ] + }, + insp.get_multi_foreign_keys(filter_names=["number_colors"]), + ) + eq_( + { + ("schema", "number_colors"): [ + { + "name": "cross_schema_number_fk", + "referred_table": "numbers", + "referred_schema": None, + "referred_columns": ["number"], + "constrained_columns": ["number_id"], + } + ] + }, + insp.get_multi_foreign_keys( + filter_names=["number_colors"], schema="schema" + ), + ) + + def test_composite_fk_lookups(self, connection): + """Ensures we introspect composite FKs.""" + + engine = connection.engine + + insp = inspect(engine) + eq_( + { + (None, "composite_fk"): [ + { + "name": "composite_fk_composite_pk_a_b", + "referred_table": "composite_pk", + "referred_schema": None, + "referred_columns": ["a", "b"], + "constrained_columns": ["my_a", "my_b"], + } + ] + }, + insp.get_multi_foreign_keys(filter_names=["composite_fk"]), + ) + + def test_composite_index_lookups(self, connection): + """Ensures we introspect composite indexes.""" + + engine = connection.engine + + insp = inspect(engine) + eq_( + { + (None, "composite_fk"): [ + { + "name": "idx_composte_fk_all", + "column_names": ["my_a", "my_b", "my_c"], + "unique": False, + "column_sorting": {"my_a": "asc", "my_b": "asc", "my_c": "asc"}, + "include_columns": [], + "dialect_options": {}, + } + ] + }, + insp.get_multi_indexes(filter_names=["composite_fk"]), + ) + + def test_commit_timestamp(self, connection): + """Ensures commit timestamps are set.""" + + class Base(DeclarativeBase): + pass + + class TimestampUser(Base): + __tablename__ = "timestamp_users" + ID: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] + updated_at: Mapped[datetime.datetime] = mapped_column( + spanner_allow_commit_timestamp=True, + default=text("PENDING_COMMIT_TIMESTAMP()"), + # Make sure that this column is never part of a THEN RETURN clause. + spanner_exclude_from_returning=True, + ) + + @event.listens_for(TimestampUser, "before_update") + def before_update(mapper, connection, target): + target.updated_at = text("PENDING_COMMIT_TIMESTAMP()") + + engine = connection.engine + Base.metadata.create_all(engine) + try: + with Session(engine) as session: + session.add(TimestampUser(name="name")) + session.commit() + + with Session(engine) as session: + users = list( + session.scalars( + select(TimestampUser).where(TimestampUser.name == "name") + ) + ) + user = users[0] + + is_not_none(user.updated_at) + created_at = user.updated_at + + user.name = "new-name" + session.commit() + + with Session(engine) as session: + users = list( + session.scalars( + select(TimestampUser).where(TimestampUser.name == "new-name") + ) + ) + user = users[0] + + is_not_none(user.updated_at) + is_true(user.updated_at > created_at) + + finally: + Base.metadata.drop_all(engine) diff --git a/packages/sqlalchemy-spanner/test/test_suite_13.py b/packages/sqlalchemy-spanner/test/test_suite_13.py new file mode 100644 index 000000000000..be8b1f782cb0 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/test_suite_13.py @@ -0,0 +1,2214 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import decimal +import operator +import os +import pytest +import random +import time +from unittest import mock + +from google.cloud.spanner_v1 import RequestOptions, Client + +import sqlalchemy +from sqlalchemy import create_engine +from sqlalchemy import inspect +from sqlalchemy import testing +from sqlalchemy import ForeignKey +from sqlalchemy import MetaData +from sqlalchemy.schema import DDL +from sqlalchemy.schema import Computed +from sqlalchemy.testing import config +from sqlalchemy.testing import engines +from sqlalchemy.testing import eq_ +from sqlalchemy.testing import is_instance_of +from sqlalchemy.testing import provide_metadata, emits_warning +from sqlalchemy.testing import fixtures +from sqlalchemy.testing import is_true +from sqlalchemy.testing.schema import Column +from sqlalchemy.testing.schema import Table +from sqlalchemy import literal_column +from sqlalchemy import select +from sqlalchemy import util +from sqlalchemy import event +from sqlalchemy import exists +from sqlalchemy import Boolean +from sqlalchemy import Float +from sqlalchemy import LargeBinary +from sqlalchemy import String +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import relation +from sqlalchemy.orm import Session +from sqlalchemy.types import ARRAY +from sqlalchemy.types import Integer +from sqlalchemy.types import Numeric +from sqlalchemy.types import Text +from sqlalchemy.testing import requires +from sqlalchemy.testing.fixtures import ( + ComputedReflectionFixtureTest as _ComputedReflectionFixtureTest, +) + +from google.api_core.datetime_helpers import DatetimeWithNanoseconds +from google.cloud import spanner_dbapi + +from sqlalchemy.testing.suite.test_cte import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_ddl import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_dialect import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_insert import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_reflection import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_results import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_select import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_sequence import ( + SequenceTest as _SequenceTest, + HasSequenceTest as _HasSequenceTest, +) # noqa: F401, F403 +from sqlalchemy.testing.suite.test_update_delete import * # noqa: F401, F403 + +from sqlalchemy.testing.suite.test_cte import CTETest as _CTETest +from sqlalchemy.testing.suite.test_ddl import TableDDLTest as _TableDDLTest +from sqlalchemy.testing.suite.test_ddl import ( + LongNameBlowoutTest as _LongNameBlowoutTest, +) +from sqlalchemy.testing.suite.test_dialect import EscapingTest as _EscapingTest +from sqlalchemy.testing.suite.test_insert import ( + InsertBehaviorTest as _InsertBehaviorTest, +) +from sqlalchemy.testing.suite.test_select import ( # noqa: F401, F403 + CompoundSelectTest as _CompoundSelectTest, + ExistsTest as _ExistsTest, + IsOrIsNotDistinctFromTest as _IsOrIsNotDistinctFromTest, + LikeFunctionsTest as _LikeFunctionsTest, + OrderByLabelTest as _OrderByLabelTest, +) +from sqlalchemy.testing.suite.test_reflection import ( + QuotedNameArgumentTest as _QuotedNameArgumentTest, + ComponentReflectionTest as _ComponentReflectionTest, + CompositeKeyReflectionTest as _CompositeKeyReflectionTest, + ComputedReflectionTest as _ComputedReflectionTest, +) +from sqlalchemy.testing.suite.test_results import RowFetchTest as _RowFetchTest +from sqlalchemy.testing.suite.test_types import ( # noqa: F401, F403 + _DateFixture as _DateFixtureTest, + _LiteralRoundTripFixture, + _UnicodeFixture as _UnicodeFixtureTest, + BooleanTest as _BooleanTest, + DateTest as _DateTest, + DateTimeHistoricTest, + DateTimeCoercedToDateTimeTest as _DateTimeCoercedToDateTimeTest, + DateTimeMicrosecondsTest as _DateTimeMicrosecondsTest, + DateTimeTest as _DateTimeTest, + IntegerTest as _IntegerTest, + JSONTest as _JSONTest, + NumericTest as _NumericTest, + StringTest as _StringTest, + TextTest as _TextTest, + TimeTest as _TimeTest, + TimeMicrosecondsTest as _TimeMicrosecondsTest, + TimestampMicrosecondsTest, + UnicodeVarcharTest as _UnicodeVarcharTest, + UnicodeTextTest as _UnicodeTextTest, +) +from test._helpers import get_db_url, get_project + +from google.cloud.sqlalchemy_spanner import version as sqlalchemy_spanner_version + +config.test_schema = "" + + +class EscapingTest(_EscapingTest): + @provide_metadata + def test_percent_sign_round_trip(self): + """Test that the DBAPI accommodates for escaped / nonescaped + percent signs in a way that matches the compiler + + SPANNER OVERRIDE + Cloud Spanner supports tables with empty primary key, but + only single one row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + m = self.metadata + t = Table("t", m, Column("data", String(50))) + t.create(config.db) + with config.db.begin() as conn: + conn.execute(t.insert(), dict(data="some % value")) + + eq_( + conn.scalar( + select([t.c.data]).where( + t.c.data == literal_column("'some % value'") + ) + ), + "some % value", + ) + + conn.execute(t.delete()) + conn.execute(t.insert(), dict(data="some %% other value")) + eq_( + conn.scalar( + select([t.c.data]).where( + t.c.data == literal_column("'some %% other value'") + ) + ), + "some %% other value", + ) + + +class CTETest(_CTETest): + @classmethod + def define_tables(cls, metadata): + """ + The original method creates a foreign key without a name, + which causes troubles on test cleanup. Overriding the + method to explicitly set a foreign key name. + """ + Table( + "some_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), + Column("parent_id", ForeignKey("some_table.id", name="fk_some_table")), + ) + + Table( + "some_other_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), + Column("parent_id", Integer), + ) + + @pytest.mark.skip("INSERT from WITH subquery is not supported") + def test_insert_from_select_round_trip(self): + """ + The test checks if an INSERT can be done from a cte, like: + + WITH some_cte AS (...) + INSERT INTO some_other_table (... SELECT * FROM some_cte) + + Such queries are not supported by Spanner. + """ + pass + + @pytest.mark.skip("DELETE from WITH subquery is not supported") + def test_delete_scalar_subq_round_trip(self): + """ + The test checks if a DELETE can be done from a cte, like: + + WITH some_cte AS (...) + DELETE FROM some_other_table (... SELECT * FROM some_cte) + + Such queries are not supported by Spanner. + """ + pass + + @pytest.mark.skip("DELETE from WITH subquery is not supported") + def test_delete_from_round_trip(self): + """ + The test checks if a DELETE can be done from a cte, like: + + WITH some_cte AS (...) + DELETE FROM some_other_table (... SELECT * FROM some_cte) + + Such queries are not supported by Spanner. + """ + pass + + @pytest.mark.skip("UPDATE from WITH subquery is not supported") + def test_update_from_round_trip(self): + """ + The test checks if an UPDATE can be done from a cte, like: + + WITH some_cte AS (...) + UPDATE some_other_table + SET (... SELECT * FROM some_cte) + + Such queries are not supported by Spanner. + """ + pass + + @pytest.mark.skip("WITH RECURSIVE subqueries are not supported") + def test_select_recursive_round_trip(self): + pass + + +class BooleanTest(_BooleanTest): + def test_render_literal_bool(self): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + self._literal_round_trip(Boolean(), [True], [True]) + self._literal_round_trip(Boolean(), [False], [False]) + + +class ExistsTest(_ExistsTest): + def test_select_exists(self, connection): + """ + SPANNER OVERRIDE: + + The original test is trying to execute a query like: + + SELECT ... + WHERE EXISTS (SELECT ...) + + SELECT WHERE without FROM clause is not supported by Spanner. + Rewriting the test to force it to generate a query like: + + SELECT EXISTS (SELECT ...) + """ + stuff = self.tables.stuff + eq_( + connection.execute( + select((exists().where(stuff.c.data == "some data"),)) + ).fetchall(), + [(True,)], + ) + + def test_select_exists_false(self, connection): + """ + SPANNER OVERRIDE: + + The original test is trying to execute a query like: + + SELECT ... + WHERE EXISTS (SELECT ...) + + SELECT WHERE without FROM clause is not supported by Spanner. + Rewriting the test to force it to generate a query like: + + SELECT EXISTS (SELECT ...) + """ + stuff = self.tables.stuff + eq_( + connection.execute( + select((exists().where(stuff.c.data == "no data"),)) + ).fetchall(), + [(False,)], + ) + + +class TableDDLTest(_TableDDLTest): + @pytest.mark.skip( + "Spanner table name must start with an uppercase or lowercase letter" + ) + def test_underscore_names(self): + pass + + +@pytest.mark.skip("Max identifier length in Spanner is 128") +class LongNameBlowoutTest(_LongNameBlowoutTest): + pass + + +class DateFixtureTest(_DateFixtureTest): + @classmethod + def define_tables(cls, metadata): + """ + SPANNER OVERRIDE: + + Cloud Spanner doesn't support auto incrementing ids feature, + which is used by the original test. Overriding the test data + creation method to disable autoincrement and make id column + nullable. + """ + Table( + "date_table", + metadata, + Column("id", Integer, primary_key=True, nullable=True), + Column("date_data", cls.datatype), + ) + + +class DateTest(DateFixtureTest, _DateTest): + """ + SPANNER OVERRIDE: + + DateTest tests used same class method to create table, so to avoid those failures + and maintain DRY concept just inherit the class to run tests successfully. + """ + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null_bound_comparison(self): + super().test_null_bound_comparison() + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null(self): + super().test_null() + + +class DateTimeMicrosecondsTest(_DateTimeMicrosecondsTest, DateTest): + def test_round_trip(self): + """ + SPANNER OVERRIDE: + + Spanner converts timestamp into `%Y-%m-%dT%H:%M:%S.%fZ` format, so to avoid + assert failures convert datetime input to the desire timestamp format. + """ + date_table = self.tables.date_table + config.db.execute(date_table.insert(), {"date_data": self.data}) + + row = config.db.execute(select([date_table.c.date_data])).first() + compare = self.compare or self.data + compare = compare.strftime("%Y-%m-%dT%H:%M:%S.%fZ") + eq_(row[0].rfc3339(), compare) + assert isinstance(row[0], DatetimeWithNanoseconds) + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null_bound_comparison(self): + super().test_null_bound_comparison() + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null(self): + super().test_null() + + +class DateTimeTest(_DateTimeTest, DateTimeMicrosecondsTest): + """ + SPANNER OVERRIDE: + + DateTimeTest tests have the same failures same as DateTimeMicrosecondsTest tests, + so to avoid those failures and maintain DRY concept just inherit the class to run + tests successfully. + """ + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null_bound_comparison(self): + super().test_null_bound_comparison() + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null(self): + super().test_null() + + +@pytest.mark.skip("Spanner doesn't support Time data type.") +class TimeTests(_TimeMicrosecondsTest, _TimeTest): + pass + + +@pytest.mark.skip("Spanner doesn't coerce dates from datetime.") +class DateTimeCoercedToDateTimeTest(_DateTimeCoercedToDateTimeTest): + pass + + +class IntegerTest(_IntegerTest): + @provide_metadata + def _round_trip(self, datatype, data): + """ + SPANNER OVERRIDE: + + This is the helper method for integer class tests which creates a table and + performs an insert operation. + Cloud Spanner supports tables with an empty primary key, but only one + row can be inserted into such a table - following insertions will fail with + `400 id must not be NULL in table date_table`. + Overriding the tests and adding a manual primary key value to avoid the same + failures. + """ + metadata = self.metadata + int_table = Table( + "integer_table", + metadata, + Column("id", Integer, primary_key=True, test_needs_autoincrement=True), + Column("integer_data", datatype), + ) + + metadata.create_all(config.db) + + config.db.execute(int_table.insert(), {"id": 1, "integer_data": data}) + + row = config.db.execute(select([int_table.c.integer_data])).first() + + eq_(row, (data,)) + + if util.py3k: + assert isinstance(row[0], int) + else: + assert isinstance(row[0], (long, int)) # noqa + + +class UnicodeFixtureTest(_UnicodeFixtureTest): + @classmethod + def define_tables(cls, metadata): + """ + SPANNER OVERRIDE: + + Cloud Spanner doesn't support auto incrementing ids feature, + which is used by the original test. Overriding the test data + creation method to disable autoincrement and make id column + nullable. + """ + Table( + "unicode_table", + metadata, + Column("id", Integer, primary_key=True, nullable=True), + Column("unicode_data", cls.datatype), + ) + + def test_round_trip_executemany(self): + """ + SPANNER OVERRIDE + + Cloud Spanner supports tables with empty primary key, but + only single one row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + unicode_table = self.tables.unicode_table + + config.db.execute( + unicode_table.insert(), + [{"id": i, "unicode_data": self.data} for i in range(3)], + ) + + rows = config.db.execute(select([unicode_table.c.unicode_data])).fetchall() + eq_(rows, [(self.data,) for i in range(3)]) + for row in rows: + assert isinstance(row[0], util.text_type) + + @pytest.mark.skip("Spanner doesn't support non-ascii characters") + def test_literal(self): + pass + + @pytest.mark.skip("Spanner doesn't support non-ascii characters") + def test_literal_non_ascii(self): + pass + + +class UnicodeVarcharTest(UnicodeFixtureTest, _UnicodeVarcharTest): + """ + SPANNER OVERRIDE: + + UnicodeVarcharTest class inherits the _UnicodeFixtureTest class's tests, + so to avoid those failures and maintain DRY concept just inherit the class to run + tests successfully. + """ + + pass + + +class UnicodeTextTest(UnicodeFixtureTest, _UnicodeTextTest): + """ + SPANNER OVERRIDE: + + UnicodeTextTest class inherits the _UnicodeFixtureTest class's tests, + so to avoid those failures and maintain DRY concept just inherit the class to run + tests successfully. + """ + + pass + + +class ComponentReflectionTest(_ComponentReflectionTest): + @classmethod + def define_views(cls, metadata, schema): + table_info = { + "users": ["user_id", "test1", "test2"], + "email_addresses": ["address_id", "remote_user_id", "email_address"], + } + if testing.requires.self_referential_foreign_keys.enabled: + table_info["users"] = table_info["users"] + ["parent_user_id"] + for table_name in ("users", "email_addresses"): + fullname = table_name + if schema: + fullname = "%s.%s" % (schema, table_name) + view_name = fullname + "_v" + columns = "" + for column in table_info[table_name]: + stmt = table_name + "." + column + " AS " + column + if columns: + columns = columns + ", " + stmt + else: + columns = stmt + query = f"""CREATE VIEW {view_name} + SQL SECURITY INVOKER + AS SELECT {columns} + FROM {fullname}""" + + event.listen(metadata, "after_create", DDL(query)) + event.listen(metadata, "before_drop", DDL("DROP VIEW %s" % view_name)) + + @classmethod + def define_reflected_tables(cls, metadata, schema): + if schema: + schema_prefix = schema + "." + else: + schema_prefix = "" + + if testing.requires.self_referential_foreign_keys.enabled: + users = Table( + "users", + metadata, + Column("user_id", sqlalchemy.INT, primary_key=True), + Column("test1", sqlalchemy.CHAR(5), nullable=False), + Column("test2", sqlalchemy.Float(5), nullable=False), + Column( + "parent_user_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey( + "%susers.user_id" % schema_prefix, name="user_id_fk" + ), + ), + schema=schema, + test_needs_fk=True, + ) + else: + users = Table( + "users", + metadata, + Column("user_id", sqlalchemy.INT, primary_key=True), + Column("test1", sqlalchemy.CHAR(5), nullable=False), + Column("test2", sqlalchemy.Float(5), nullable=False), + schema=schema, + test_needs_fk=True, + ) + + Table( + "dingalings", + metadata, + Column("dingaling_id", sqlalchemy.Integer, primary_key=True), + Column( + "address_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey("%semail_addresses.address_id" % schema_prefix), + ), + Column("data", sqlalchemy.String(30)), + schema=schema, + test_needs_fk=True, + ) + Table( + "email_addresses", + metadata, + Column("address_id", sqlalchemy.Integer, primary_key=True), + Column( + "remote_user_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey(users.c.user_id), + ), + Column("email_address", sqlalchemy.String(20)), + sqlalchemy.PrimaryKeyConstraint("address_id", name="email_ad_pk"), + schema=schema, + test_needs_fk=True, + ) + Table( + "comment_test", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True, comment="id comment"), + Column("data", sqlalchemy.String(20), comment="data % comment"), + Column( + "d2", + sqlalchemy.String(20), + comment=r"""Comment types type speedily ' " \ '' Fun!""", + ), + schema=schema, + comment=r"""the test % ' " \ table comment""", + ) + + if testing.requires.cross_schema_fk_reflection.enabled: + if schema is None: + Table( + "local_table", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column("data", sqlalchemy.String(20)), + Column( + "remote_id", + ForeignKey("%s.remote_table_2.id" % testing.config.test_schema), + ), + test_needs_fk=True, + schema=config.db.dialect.default_schema_name, + ) + else: + Table( + "remote_table", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column( + "local_id", + ForeignKey( + "%s.local_table.id" % config.db.dialect.default_schema_name + ), + ), + Column("data", sqlalchemy.String(20)), + schema=schema, + test_needs_fk=True, + ) + Table( + "remote_table_2", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column("data", sqlalchemy.String(20)), + schema=schema, + test_needs_fk=True, + ) + + if testing.requires.index_reflection.enabled: + cls.define_index(metadata, users) + + if not schema: + # test_needs_fk is at the moment to force MySQL InnoDB + noncol_idx_test_nopk = Table( + "noncol_idx_test_nopk", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column("q", sqlalchemy.String(5)), + test_needs_fk=True, + ) + + noncol_idx_test_pk = Table( + "noncol_idx_test_pk", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column("q", sqlalchemy.String(5)), + test_needs_fk=True, + ) + + if testing.requires.indexes_with_ascdesc.enabled: + sqlalchemy.Index("noncol_idx_nopk", noncol_idx_test_nopk.c.q.desc()) + sqlalchemy.Index("noncol_idx_pk", noncol_idx_test_pk.c.q.desc()) + + if testing.requires.view_column_reflection.enabled and not bool( + os.environ.get("SPANNER_EMULATOR_HOST") + ): + cls.define_views(metadata, schema) + if not schema and testing.requires.temp_table_reflection.enabled: + cls.define_temp_tables(metadata) + + def _test_get_columns(self, schema=None, table_type="table"): + if table_type == "view" and bool(os.environ.get("SPANNER_EMULATOR_HOST")): + pytest.skip("View tables not supported on emulator") + super()._test_get_columns(schema, table_type) + + @testing.provide_metadata + def _test_get_view_definition(self, schema=None): + if bool(os.environ.get("SPANNER_EMULATOR_HOST")): + pytest.skip("View tables not supported on emulator") + super()._test_get_view_definition(schema) + + @classmethod + def define_temp_tables(cls, metadata): + """ + SPANNER OVERRIDE: + + In Cloud Spanner unique indexes are used instead of directly + creating unique constraints. Overriding the test to replace + constraints with indexes in testing data. + """ + user_tmp = Table( + "user_tmp", + metadata, + Column("id", sqlalchemy.INT, primary_key=True), + Column("name", sqlalchemy.VARCHAR(50)), + Column("foo", sqlalchemy.INT), + sqlalchemy.Index("user_tmp_uq", "name", unique=True), + sqlalchemy.Index("user_tmp_ix", "foo"), + ) + if ( + testing.requires.view_reflection.enabled + and testing.requires.temporary_views.enabled + ): + event.listen( + user_tmp, + "after_create", + DDL("create temporary view user_tmp_v as " "select * from user_tmp"), + ) + event.listen(user_tmp, "before_drop", DDL("drop view user_tmp_v")) + + @testing.provide_metadata + def test_reflect_string_column_max_len(self): + """ + SPANNER SPECIFIC TEST: + + In Spanner column of the STRING type can be + created with size defined as MAX. The test + checks that such a column is correctly reflected. + """ + Table("text_table", self.metadata, Column("TestColumn", Text, nullable=False)) + self.metadata.create_all() + + Table("text_table", MetaData(bind=self.bind), autoload=True) + + @testing.provide_metadata + def test_reflect_bytes_column_max_len(self): + """ + SPANNER SPECIFIC TEST: + + In Spanner column of the BYTES type can be + created with size defined as MAX. The test + checks that such a column is correctly reflected. + """ + Table( + "bytes_table", + self.metadata, + Column("TestColumn", LargeBinary, nullable=False), + ) + self.metadata.create_all() + + Table("bytes_table", MetaData(bind=self.bind), autoload=True) + inspect(config.db).get_columns("bytes_table") + + @testing.provide_metadata + def _test_get_unique_constraints(self, schema=None): + """ + SPANNER OVERRIDE: + + In Cloud Spanner unique indexes are used instead of directly + creating unique constraints. Overriding the test to replace + constraints with indexes in testing data. + """ + # SQLite dialect needs to parse the names of the constraints + # separately from what it gets from PRAGMA index_list(), and + # then matches them up. so same set of column_names in two + # constraints will confuse it. Perhaps we should no longer + # bother with index_list() here since we have the whole + # CREATE TABLE? + uniques = sorted( + [ + {"name": "unique_a", "column_names": ["a"]}, + {"name": "unique_a_b_c", "column_names": ["a", "b", "c"]}, + {"name": "unique_c_a_b", "column_names": ["c", "a", "b"]}, + {"name": "unique_asc_key", "column_names": ["asc", "key"]}, + {"name": "i.have.dots", "column_names": ["b"]}, + {"name": "i have spaces", "column_names": ["c"]}, + ], + key=operator.itemgetter("name"), + ) + orig_meta = self.metadata + Table( + "testtbl", + orig_meta, + Column("id", sqlalchemy.Integer, primary_key=True), + Column("a", sqlalchemy.String(20)), + Column("b", sqlalchemy.String(30)), + Column("c", sqlalchemy.Integer), + # reserved identifiers + Column("asc", sqlalchemy.String(30)), + Column("key", sqlalchemy.String(30)), + sqlalchemy.Index("unique_a", "a", unique=True), + sqlalchemy.Index("unique_a_b_c", "a", "b", "c", unique=True), + sqlalchemy.Index("unique_c_a_b", "c", "a", "b", unique=True), + sqlalchemy.Index("unique_asc_key", "asc", "key", unique=True), + schema=schema, + ) + orig_meta.create_all() + + inspector = inspect(orig_meta.bind) + reflected = sorted( + inspector.get_unique_constraints("testtbl", schema=schema), + key=operator.itemgetter("name"), + ) + + names_that_duplicate_index = set() + + for orig, refl in zip(uniques, reflected): + # Different dialects handle duplicate index and constraints + # differently, so ignore this flag + dupe = refl.pop("duplicates_index", None) + if dupe: + names_that_duplicate_index.add(dupe) + eq_(orig, refl) + + reflected_metadata = MetaData() + reflected = Table( + "testtbl", + reflected_metadata, + autoload_with=orig_meta.bind, + schema=schema, + ) + + # test "deduplicates for index" logic. MySQL and Oracle + # "unique constraints" are actually unique indexes (with possible + # exception of a unique that is a dupe of another one in the case + # of Oracle). make sure # they aren't duplicated. + idx_names = set([idx.name for idx in reflected.indexes]) + uq_names = set( + [ + uq.name + for uq in reflected.constraints + if isinstance(uq, sqlalchemy.UniqueConstraint) + ] + ).difference(["unique_c_a_b"]) + + assert not idx_names.intersection(uq_names) + if names_that_duplicate_index: + eq_(names_that_duplicate_index, idx_names) + eq_(uq_names, set()) + + @testing.provide_metadata + def test_unique_constraint_raises(self): + """ + Checking that unique constraint creation + fails due to a ProgrammingError. + """ + Table( + "user_tmp_failure", + self.metadata, + Column("id", sqlalchemy.INT, primary_key=True), + Column("name", sqlalchemy.VARCHAR(50)), + sqlalchemy.UniqueConstraint("name", name="user_tmp_uq"), + ) + + with pytest.raises(spanner_dbapi.exceptions.ProgrammingError): + self.metadata.create_all() + + @testing.provide_metadata + def _test_get_table_names(self, schema=None, table_type="table", order_by=None): + """ + SPANNER OVERRIDE: + + Spanner doesn't support temporary tables, so real tables are + used for testing. As the original test expects only real + tables to be read, and in Spanner all the tables are real, + expected results override is required. + """ + _ignore_tables = [ + "comment_test", + "noncol_idx_test_pk", + "noncol_idx_test_nopk", + "local_table", + "remote_table", + "remote_table_2", + ] + meta = self.metadata + + insp = inspect(meta.bind) + + if table_type == "view" and not bool(os.environ.get("SPANNER_EMULATOR_HOST")): + table_names = insp.get_view_names(schema) + table_names.sort() + answer = ["email_addresses_v", "users_v"] + eq_(sorted(table_names), answer) + else: + if order_by: + tables = [ + rec[0] + for rec in insp.get_sorted_table_and_fkc_names(schema) + if rec[0] + ] + else: + tables = insp.get_table_names(schema) + table_names = [t for t in tables if t not in _ignore_tables] + + if order_by == "foreign_key": + answer = {"dingalings", "email_addresses", "user_tmp", "users"} + eq_(set(table_names), answer) + else: + answer = ["dingalings", "email_addresses", "user_tmp", "users"] + eq_(sorted(table_names), answer) + + @pytest.mark.skip("Spanner doesn't support temporary tables") + def test_get_temp_table_indexes(self): + pass + + @pytest.mark.skip("Spanner doesn't support temporary tables") + def test_get_temp_table_unique_constraints(self): + pass + + @testing.requires.table_reflection + def test_numeric_reflection(self): + """ + SPANNER OVERRIDE: + + Spanner defines NUMERIC type with the constant precision=38 + and scale=9. Overriding the test to check if the NUMERIC + column is successfully created and has dimensions + correct for Cloud Spanner. + """ + for typ in self._type_round_trip(Numeric(18, 5)): + assert isinstance(typ, Numeric) + eq_(typ.precision, 38) + eq_(typ.scale, 9) + + @testing.requires.table_reflection + def test_binary_reflection(self): + """ + Check that a BYTES column with an explicitly + set size is correctly reflected. + """ + for typ in self._type_round_trip(LargeBinary(20)): + assert isinstance(typ, LargeBinary) + eq_(typ.length, 20) + + @testing.requires.table_reflection + def test_array_reflection(self): + """Check array columns reflection.""" + orig_meta = self.metadata + + str_array = ARRAY(String(16)) + int_array = ARRAY(Integer) + arrays_test = Table( + "arrays_test", + orig_meta, + Column("id", Integer, primary_key=True), + Column("str_array", str_array), + Column("int_array", int_array), + ) + arrays_test.create(create_engine(get_db_url())) + + # autoload the table and check its columns reflection + tab = Table("arrays_test", orig_meta, autoload=True) + col_types = [col.type for col in tab.columns] + for type_ in ( + str_array, + int_array, + ): + assert type_ in col_types + + tab.drop() + + def _assert_insp_indexes(self, indexes, expected_indexes): + expected_indexes.sort(key=lambda item: item["name"]) + + index_names = [d["name"] for d in indexes] + exp_index_names = [d["name"] for d in expected_indexes] + assert sorted(index_names) == sorted(exp_index_names) + + +class CompositeKeyReflectionTest(_CompositeKeyReflectionTest): + @testing.requires.foreign_key_constraint_reflection + @testing.provide_metadata + def test_fk_column_order(self): + """ + SPANNER OVERRIDE: + + Spanner column usage reflection doesn't support determenistic + ordering. Overriding the test to check that columns are + reflected correctly, without considering their order. + """ + # test for issue #5661 + meta = self.metadata + insp = inspect(meta.bind) + foreign_keys = insp.get_foreign_keys(self.tables.tb2.name) + eq_(len(foreign_keys), 1) + fkey1 = foreign_keys[0] + eq_(set(fkey1.get("referred_columns")), {"name", "id", "attr"}) + eq_(set(fkey1.get("constrained_columns")), {"pname", "pid", "pattr"}) + + +class RowFetchTest(_RowFetchTest): + def test_row_w_scalar_select(self): + """ + SPANNER OVERRIDE: + + Cloud Spanner returns a DatetimeWithNanoseconds() for date + data types. Overriding the test to use a DatetimeWithNanoseconds + type value as an expected result. + -------------- + + test that a scalar select as a column is returned as such + and that type conversion works OK. + + (this is half a SQLAlchemy Core test and half to catch database + backends that may have unusual behavior with scalar selects.) + """ + datetable = self.tables.has_dates + s = select([datetable.alias("x").c.today]).as_scalar() + s2 = select([datetable.c.id, s.label("somelabel")]) + row = config.db.execute(s2).first() + + eq_( + row["somelabel"], + DatetimeWithNanoseconds( + 2006, 5, 12, 12, 0, 0, tzinfo=datetime.timezone.utc + ), + ) + + +class InsertBehaviorTest(_InsertBehaviorTest): + @pytest.mark.skip("Spanner doesn't support empty inserts") + def test_empty_insert(self): + pass + + @pytest.mark.skip("Spanner doesn't support auto increment") + def test_insert_from_select_autoinc(self): + pass + + def test_autoclose_on_insert(self): + """ + SPANNER OVERRIDE: + + Cloud Spanner doesn't support tables with an auto increment primary key, + following insertions will fail with `400 id must not be NULL in table + autoinc_pk`. + + Overriding the tests and adding a manual primary key value to avoid the same + failures. + """ + if config.requirements.returning.enabled: + engine = engines.testing_engine(options={"implicit_returning": False}) + else: + engine = config.db + + with engine.begin() as conn: + r = conn.execute( + self.tables.autoinc_pk.insert(), dict(id=1, data="some data") + ) + + assert r._soft_closed + assert not r.closed + assert r.is_insert + assert not r.returns_rows + + +class BytesTest(_LiteralRoundTripFixture, fixtures.TestBase): + __backend__ = True + + def test_nolength_binary(self): + metadata = MetaData() + foo = Table("foo", metadata, Column("one", LargeBinary)) + + foo.create(config.db) + foo.drop(config.db) + + +class StringTest(_StringTest): + @pytest.mark.skip("Spanner doesn't support non-ascii characters") + def test_literal_non_ascii(self): + pass + + +class TextTest(_TextTest): + @classmethod + def define_tables(cls, metadata): + """ + SPANNER OVERRIDE: + + Cloud Spanner doesn't support auto incrementing ids feature, + which is used by the original test. Overriding the test data + creation method to disable autoincrement and make id column + nullable. + """ + Table( + "text_table", + metadata, + Column("id", Integer, primary_key=True, nullable=True), + Column("text_data", Text), + ) + + @pytest.mark.skip("Spanner doesn't support non-ascii characters") + def test_literal_non_ascii(self): + pass + + +class NumericTest(_NumericTest): + @emits_warning(r".*does \*not\* support Decimal objects natively") + def test_render_literal_numeric(self): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + self._literal_round_trip( + Numeric(precision=8, scale=4), + [15.7563], + [decimal.Decimal("15.7563")], + ) + self._literal_round_trip( + Numeric(precision=8, scale=4), + [decimal.Decimal("15.7563")], + [decimal.Decimal("15.7563")], + ) + + @emits_warning(r".*does \*not\* support Decimal objects natively") + def test_render_literal_numeric_asfloat(self): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + self._literal_round_trip( + Numeric(precision=8, scale=4, asdecimal=False), + [15.7563], + [15.7563], + ) + self._literal_round_trip( + Numeric(precision=8, scale=4, asdecimal=False), + [decimal.Decimal("15.7563")], + [15.7563], + ) + + def test_render_literal_float(self): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + self._literal_round_trip( + Float(4), + [decimal.Decimal("15.7563")], + [15.7563], + filter_=lambda n: n is not None and round(n, 5) or None, + ) + + self._literal_round_trip( + Float(4), + [decimal.Decimal("15.7563")], + [15.7563], + filter_=lambda n: n is not None and round(n, 5) or None, + ) + + @requires.precision_generic_float_type + def test_float_custom_scale(self): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + self._do_test( + Float(None, decimal_return_scale=7, asdecimal=True), + [15.7563827], + [decimal.Decimal("15.7563827")], + check_scale=True, + ) + + self._do_test( + Float(None, decimal_return_scale=7, asdecimal=True), + [15.7563827], + [decimal.Decimal("15.7563827")], + check_scale=True, + ) + + def test_numeric_as_decimal(self): + """ + SPANNER OVERRIDE: + + Spanner throws an error 400 Value has type FLOAT64 which cannot be + inserted into column x, which has type NUMERIC for value 15.7563. + Overriding the test to remove the failure case. + """ + self._do_test( + Numeric(precision=8, scale=4), + [decimal.Decimal("15.7563")], + [decimal.Decimal("15.7563")], + ) + + def test_numeric_as_float(self): + """ + SPANNER OVERRIDE: + + Spanner throws an error 400 Value has type FLOAT64 which cannot be + inserted into column x, which has type NUMERIC for value 15.7563. + Overriding the test to remove the failure case. + """ + + self._do_test( + Numeric(precision=8, scale=4, asdecimal=False), + [decimal.Decimal("15.7563")], + [15.7563], + ) + + @requires.floats_to_four_decimals + def test_float_as_decimal(self): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + self._do_test( + Float(precision=8, asdecimal=True), + [15.7563], + [decimal.Decimal("15.7563")], + ) + + self._do_test( + Float(precision=8, asdecimal=True), + [decimal.Decimal("15.7563")], + [decimal.Decimal("15.7563")], + ) + + def test_float_as_float(self): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + self._do_test( + Float(precision=8), + [15.7563], + [15.7563], + filter_=lambda n: n is not None and round(n, 5) or None, + ) + + self._do_test( + Float(precision=8), + [decimal.Decimal("15.7563")], + [15.7563], + filter_=lambda n: n is not None and round(n, 5) or None, + ) + + def test_float_coerce_round_trip(self, connection): + pass + + @requires.precision_numerics_general + def test_precision_decimal(self): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + + Remove an extra digits after decimal point as cloud spanner is + capable of representing an exact numeric value with a precision + of 38 and scale of 9. + """ + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("54.234246451")], + [decimal.Decimal("54.234246451")], + ) + + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("0.004354")], + [decimal.Decimal("0.004354")], + ) + + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("900.0")], + [decimal.Decimal("900.0")], + ) + + @testing.requires.precision_numerics_enotation_large + def test_enotation_decimal_large(self): + """test exceedingly large decimals. + + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + + self._do_test( + Numeric(precision=25, scale=2), + [decimal.Decimal("4E+8")], + [decimal.Decimal("4E+8")], + ) + + self._do_test( + Numeric(precision=25, scale=2), + [decimal.Decimal("5748E+15")], + [decimal.Decimal("5748E+15")], + ) + + self._do_test( + Numeric(precision=25, scale=2), + [decimal.Decimal("1.521E+15")], + [decimal.Decimal("1.521E+15")], + ) + + self._do_test( + Numeric(precision=25, scale=2), + [decimal.Decimal("00000000000000.1E+12")], + [decimal.Decimal("00000000000000.1E+12")], + ) + + @testing.requires.precision_numerics_enotation_large + def test_enotation_decimal(self): + """test exceedingly small decimals. + + Decimal reports values with E notation when the exponent + is greater than 6. + + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + + Remove extra digits after decimal point as cloud spanner is + capable of representing an exact numeric value with a precision + of 38 and scale of 9. + """ + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("1E-2")], + [decimal.Decimal("1E-2")], + ) + + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("1E-3")], + [decimal.Decimal("1E-3")], + ) + + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("1E-4")], + [decimal.Decimal("1E-4")], + ) + + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("1E-5")], + [decimal.Decimal("1E-5")], + ) + + self._do_test( + Numeric(precision=18, scale=14), + [decimal.Decimal("1E-6")], + [decimal.Decimal("1E-6")], + ) + + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("1E-7")], + [decimal.Decimal("1E-7")], + ) + + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("1E-8")], + [decimal.Decimal("1E-8")], + ) + + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("0.010000059")], + [decimal.Decimal("0.010000059")], + ) + + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("0.000000059")], + [decimal.Decimal("0.000000059")], + ) + + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("0.000000696")], + [decimal.Decimal("0.000000696")], + ) + + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("0.700000696")], + [decimal.Decimal("0.700000696")], + ) + + self._do_test( + Numeric(precision=18, scale=9), + [decimal.Decimal("696E-9")], + [decimal.Decimal("696E-9")], + ) + + +class LikeFunctionsTest(_LikeFunctionsTest): + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_contains_autoescape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_contains_autoescape_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_contains_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_endswith_autoescape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_endswith_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_endswith_autoescape_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_startswith_autoescape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_startswith_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_startswith_autoescape_escape(self): + pass + + def test_escape_keyword_raises(self): + """Check that ESCAPE keyword causes an exception when used.""" + with pytest.raises(NotImplementedError): + col = self.tables.some_table.c.data + self._test(col.contains("b##cde", escape="#"), {7}) + + +@pytest.mark.skip("Spanner doesn't support quotes in table names.") +class QuotedNameArgumentTest(_QuotedNameArgumentTest): + pass + + +@pytest.mark.skip("Spanner doesn't support IS DISTINCT FROM clause") +class IsOrIsNotDistinctFromTest(_IsOrIsNotDistinctFromTest): + pass + + +class OrderByLabelTest(_OrderByLabelTest): + @pytest.mark.skip( + "Spanner requires an alias for the GROUP BY list when specifying derived " + "columns also used in SELECT" + ) + def test_group_by_composed(self): + pass + + +class CompoundSelectTest(_CompoundSelectTest): + """ + See: https://github.com/googleapis/python-spanner/issues/347 + """ + + @pytest.mark.skip( + "Spanner DBAPI incorrectly classify the statement starting with brackets." + ) + def test_limit_offset_selectable_in_unions(self): + pass + + @pytest.mark.skip( + "Spanner DBAPI incorrectly classify the statement starting with brackets." + ) + def test_order_by_selectable_in_unions(self): + pass + + +class TestQueryHints(fixtures.TablesTest): + """ + Compile a complex query with JOIN and check that + the table hint was set into the right place. + """ + + __backend__ = True + + def test_complex_query_table_hints(self): + EXPECTED_QUERY = ( + "SELECT users.id, users.name \nFROM users @{FORCE_INDEX=table_1_by_int_idx}" + " JOIN addresses ON users.id = addresses.user_id " + "\nWHERE users.name IN (%s, %s)" + ) + + Base = declarative_base() + engine = create_engine( + "spanner:///projects/project-id/instances/instance-id/databases/database-id" + ) + + class User(Base): + __tablename__ = "users" + id = Column(Integer, primary_key=True) + name = Column(String(50)) + addresses = relation("Address", backref="user") + + class Address(Base): + __tablename__ = "addresses" + id = Column(Integer, primary_key=True) + email = Column(String(50)) + user_id = Column(Integer, ForeignKey("users.id")) + + session = Session(engine) + + query = session.query(User) + query = query.with_hint( + selectable=User, text="@{FORCE_INDEX=table_1_by_int_idx}" + ) + + query = query.filter(User.name.in_(["val1", "val2"])) + query = query.join(Address) + + assert str(query.statement.compile(session.bind)) == EXPECTED_QUERY + + +class SpannerSpecificTestBase(fixtures.TestBase): + """Base class for the Cloud Spanner related tests.""" + + def setUp(self): + self._engine = create_engine(get_db_url()) + self._metadata = MetaData(bind=self._engine) + + +class InterleavedTablesTest(SpannerSpecificTestBase): + """ + Check that CREATE TABLE statements for interleaved tables are correctly + generated. + """ + + def test_interleave(self): + EXP_QUERY = ( + "\nCREATE TABLE client (\n\tteam_id INT64 NOT NULL, " + "\n\tclient_id INT64 NOT NULL, " + "\n\tclient_name STRING(16) NOT NULL" + "\n) PRIMARY KEY (team_id, client_id)," + "\nINTERLEAVE IN PARENT team\n\n" + ) + client = Table( + "client", + self._metadata, + Column("team_id", Integer, primary_key=True), + Column("client_id", Integer, primary_key=True), + Column("client_name", String(16), nullable=False), + spanner_interleave_in="team", + ) + with mock.patch("google.cloud.spanner_dbapi.cursor.Cursor.execute") as execute: + client.create(self._engine) + execute.assert_called_once_with(EXP_QUERY, []) + + def test_interleave_on_delete_cascade(self): + EXP_QUERY = ( + "\nCREATE TABLE client (\n\tteam_id INT64 NOT NULL, " + "\n\tclient_id INT64 NOT NULL, " + "\n\tclient_name STRING(16) NOT NULL" + "\n) PRIMARY KEY (team_id, client_id)," + "\nINTERLEAVE IN PARENT team ON DELETE CASCADE\n\n" + ) + client = Table( + "client", + self._metadata, + Column("team_id", Integer, primary_key=True), + Column("client_id", Integer, primary_key=True), + Column("client_name", String(16), nullable=False), + spanner_interleave_in="team", + spanner_interleave_on_delete_cascade=True, + ) + with mock.patch("google.cloud.spanner_dbapi.cursor.Cursor.execute") as execute: + client.create(self._engine) + execute.assert_called_once_with(EXP_QUERY, []) + + +class UserAgentTest(SpannerSpecificTestBase): + """Check that SQLAlchemy dialect uses correct user agent.""" + + def test_user_agent(self): + with self._engine.connect() as connection: + assert ( + connection.connection.instance._client._client_info.user_agent + == f"gl-sqlalchemy-spanner/{sqlalchemy_spanner_version.__version__}" + ) + + +class ExecutionOptionsReadOnlyTest(fixtures.TestBase): + """ + Check that `execution_options()` method correctly + sets parameters on the underlying DB API connection. + """ + + def setUp(self): + self._engine = create_engine(get_db_url(), pool_size=1) + metadata = MetaData(bind=self._engine) + + self._table = Table( + "execution_options", + metadata, + Column("opt_id", Integer, primary_key=True), + Column("opt_name", String(16), nullable=False), + ) + + metadata.create_all(self._engine) + + def test_read_only(self): + with self._engine.connect().execution_options(read_only=True) as connection: + connection.execute(select(["*"], from_obj=self._table)).fetchall() + assert connection.connection.read_only is True + + with self._engine.connect() as connection: + assert connection.connection.read_only is False + + +class LimitOffsetTest(fixtures.TestBase): + """ + Check that SQL with an offset and no limit is being generated correctly. + """ + + def setUp(self): + self._engine = create_engine(get_db_url(), pool_size=1) + self._metadata = MetaData(bind=self._engine) + + self._table = Table( + "users", + self._metadata, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + ) + + self._metadata.create_all(self._engine) + + def test_offset_only(self): + for offset in [1, 7, 10, 100, 1000, 10000]: + with self._engine.connect().execution_options(read_only=True) as connection: + list(connection.execute(self._table.select().offset(offset)).fetchall()) + + +class ExecutionOptionsTest(fixtures.TestBase): + """ + Check that `execution_options()` method correctly + sets parameters on the underlying DB API connection. + """ + + def setUp(self): + self._engine = create_engine(get_db_url(), pool_size=1) + metadata = MetaData(bind=self._engine) + + self._table = Table( + "execution_options", + metadata, + Column("opt_id", Integer, primary_key=True), + Column("opt_name", String(16), nullable=False), + ) + + metadata.create_all(self._engine) + time.sleep(1) + + def test_staleness(self): + with self._engine.connect().execution_options( + read_only=True, staleness={"exact_staleness": datetime.timedelta(seconds=1)} + ) as connection: + connection.execute(select(["*"], from_obj=self._table)).fetchall() + assert connection.connection.staleness == { + "exact_staleness": datetime.timedelta(seconds=1) + } + + with self._engine.connect() as connection: + assert connection.connection.staleness == {} + + engine = create_engine("sqlite:///database") + with engine.connect() as connection: + pass + + def test_request_priority(self): + PRIORITY = RequestOptions.Priority.PRIORITY_MEDIUM + with self._engine.connect().execution_options( + request_priority=PRIORITY + ) as connection: + connection.execute(select(["*"], from_obj=self._table)).fetchall() + + with self._engine.connect() as connection: + assert connection.connection.request_priority is None + + engine = create_engine("sqlite:///database") + with engine.connect() as connection: + pass + + +class TemporaryTableTest(fixtures.TestBase): + """ + Check that temporary tables raise an error on creation. + """ + + def setUp(self): + self._engine = create_engine(get_db_url(), pool_size=1) + self._metadata = MetaData(bind=self._engine) + + def test_temporary_prefix(self): + with pytest.raises(NotImplementedError): + Table( + "users", + self._metadata, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + prefixes=["TEMPORARY"], + ).create() + + +class ComputedReflectionFixtureTest(_ComputedReflectionFixtureTest): + @classmethod + def define_tables(cls, metadata): + """SPANNER OVERRIDE: + + Avoid using default values for computed columns. + """ + Table( + "computed_default_table", + metadata, + Column("id", Integer, primary_key=True), + Column("normal", Integer), + Column("computed_col", Integer, Computed("normal + 42")), + Column("with_default", Integer), + ) + + t = Table( + "computed_column_table", + metadata, + Column("id", Integer, primary_key=True), + Column("normal", Integer), + Column("computed_no_flag", Integer, Computed("normal + 42")), + ) + + if testing.requires.schemas.enabled: + t2 = Table( + "computed_column_table", + metadata, + Column("id", Integer, primary_key=True), + Column("normal", Integer), + Column("computed_no_flag", Integer, Computed("normal / 42")), + schema=config.test_schema, + ) + + if testing.requires.computed_columns_virtual.enabled: + t.append_column( + Column( + "computed_virtual", + Integer, + Computed("normal + 2", persisted=False), + ) + ) + if testing.requires.schemas.enabled: + t2.append_column( + Column( + "computed_virtual", + Integer, + Computed("normal / 2", persisted=False), + ) + ) + if testing.requires.computed_columns_stored.enabled: + t.append_column( + Column( + "computed_stored", + Integer, + Computed("normal - 42", persisted=True), + ) + ) + if testing.requires.schemas.enabled: + t2.append_column( + Column( + "computed_stored", + Integer, + Computed("normal * 42", persisted=True), + ) + ) + + +class ComputedReflectionTest(_ComputedReflectionTest, ComputedReflectionFixtureTest): + @pytest.mark.skip("Default values are not supported.") + def test_computed_col_default_not_set(self): + pass + + def test_get_column_returns_computed(self): + """ + SPANNER OVERRIDE: + + In Spanner all the generated columns are STORED, + meaning there are no persisted and not persisted + (in the terms of the SQLAlchemy) columns. The + method override omits the persistence reflection checks. + """ + insp = inspect(config.db) + + cols = insp.get_columns("computed_default_table") + data = {c["name"]: c for c in cols} + for key in ("id", "normal", "with_default"): + is_true("computed" not in data[key]) + compData = data["computed_col"] + is_true("computed" in compData) + is_true("sqltext" in compData["computed"]) + eq_(self.normalize(compData["computed"]["sqltext"]), "normal+42") + + def test_create_not_null_computed_column(self): + """ + SPANNER TEST: + + Check that on creating a computed column with a NOT NULL + clause the clause is set in front of the computed column + statement definition and doesn't cause failures. + """ + engine = create_engine(get_db_url()) + metadata = MetaData(bind=engine) + + Table( + "Singers", + metadata, + Column("SingerId", String(36), primary_key=True, nullable=False), + Column("FirstName", String(200)), + Column("LastName", String(200), nullable=False), + Column( + "FullName", + String(400), + Computed("COALESCE(FirstName || ' ', '') || LastName"), + nullable=False, + ), + ) + + metadata.create_all(engine) + + +@pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" +) +class JSONTest(_JSONTest): + @pytest.mark.skip("Values without keys are not supported.") + def test_single_element_round_trip(self, element): + pass + + def _test_round_trip(self, data_element): + data_table = self.tables.data_table + + config.db.execute( + data_table.insert(), + {"id": random.randint(1, 100000000), "name": "row1", "data": data_element}, + ) + + row = config.db.execute(select([data_table.c.data])).first() + + eq_(row, (data_element,)) + + def test_unicode_round_trip(self): + # note we include Unicode supplementary characters as well + with config.db.connect() as conn: + conn.execute( + self.tables.data_table.insert(), + { + "id": random.randint(1, 100000000), + "name": "r1", + "data": { + util.u("réve🐍 illé"): util.u("réve🐍 illé"), + "data": {"k1": util.u("drôl🐍e")}, + }, + }, + ) + + eq_( + conn.scalar(select([self.tables.data_table.c.data])), + { + util.u("réve🐍 illé"): util.u("réve🐍 illé"), + "data": {"k1": util.u("drôl🐍e")}, + }, + ) + + @pytest.mark.skip("Parameterized types are not supported.") + def test_eval_none_flag_orm(self): + pass + + @pytest.mark.skip( + "Spanner JSON_VALUE() always returns STRING," + "thus, this test case can't be executed." + ) + def test_index_typed_comparison(self): + pass + + @pytest.mark.skip( + "Spanner JSON_VALUE() always returns STRING," + "thus, this test case can't be executed." + ) + def test_path_typed_comparison(self): + pass + + @pytest.mark.skip("Custom JSON de-/serializers are not supported.") + def test_round_trip_custom_json(self): + pass + + def _index_fixtures(fn): + fn = testing.combinations( + ("boolean", True), + ("boolean", False), + ("boolean", None), + ("string", "some string"), + ("string", None), + ("integer", 15), + ("integer", 1), + ("integer", 0), + ("integer", None), + ("float", 28.5), + ("float", None), + id_="sa", + )(fn) + return fn + + @_index_fixtures + def test_index_typed_access(self, datatype, value): + data_table = self.tables.data_table + data_element = {"key1": value} + with config.db.connect() as conn: + conn.execute( + data_table.insert(), + { + "id": random.randint(1, 100000000), + "name": "row1", + "data": data_element, + "nulldata": data_element, + }, + ) + + expr = data_table.c.data["key1"] + expr = getattr(expr, "as_%s" % datatype)() + + roundtrip = conn.scalar(select([expr])) + if roundtrip in ("true", "false", None): + roundtrip = str(roundtrip).capitalize() + + eq_(str(roundtrip), str(value)) + + @pytest.mark.skip( + "Spanner doesn't support type casts inside JSON_VALUE() function." + ) + def test_round_trip_json_null_as_json_null(self): + pass + + @pytest.mark.skip( + "Spanner doesn't support type casts inside JSON_VALUE() function." + ) + def test_round_trip_none_as_json_null(self): + pass + + @pytest.mark.skip( + "Spanner doesn't support type casts inside JSON_VALUE() function." + ) + def test_round_trip_none_as_sql_null(self): + pass + + +class CreateEngineWithClientObjectTest(fixtures.TestBase): + def test_create_engine_w_valid_client_object(self): + """ + SPANNER TEST: + + Check that we can connect to SqlAlchemy + by passing custom Client object. + """ + client = Client(project=get_project()) + engine = create_engine(get_db_url(), connect_args={"client": client}) + with engine.connect() as connection: + assert connection.connection.instance._client == client + + def test_create_engine_w_invalid_client_object(self): + """ + SPANNER TEST: + + Check that if project id in url and custom Client + Object passed to enginer mismatch, error is thrown. + """ + client = Client(project="project_id") + engine = create_engine(get_db_url(), connect_args={"client": client}) + + with pytest.raises(ValueError): + engine.connect() + + +class CreateEngineWithoutDatabaseTest(fixtures.TestBase): + def test_create_engine_wo_database(self): + """ + SPANNER TEST: + + Check that we can connect to SqlAlchemy + without passing database id in the + connection URL. + """ + engine = create_engine(get_db_url().split("/database")[0]) + with engine.connect() as connection: + assert connection.connection.database is None + + +class ReturningTest(fixtures.TestBase): + def setUp(self): + self._engine = create_engine(get_db_url()) + metadata = MetaData() + + self._table = Table( + "returning_test", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(16), nullable=False), + ) + + metadata.create_all(self._engine) + + def test_returning_for_insert_and_update(self): + random_id = random.randint(1, 1000) + with self._engine.begin() as connection: + stmt = ( + self._table.insert() + .values(id=random_id, data="some % value") + .returning(self._table.c.id) + ) + row = connection.execute(stmt).fetchall() + eq_( + row, + [(random_id,)], + ) + + with self._engine.begin() as connection: + update_text = "some + value" + stmt = ( + self._table.update() + .values(data=update_text) + .where(self._table.c.id == random_id) + .returning(self._table.c.data) + ) + row = connection.execute(stmt).fetchall() + eq_( + row, + [(update_text,)], + ) + + +@pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" +) +class SequenceTest(_SequenceTest): + @classmethod + def define_tables(cls, metadata): + Table( + "seq_pk", + metadata, + Column( + "id", + Integer, + sqlalchemy.Sequence("tab_id_seq"), + primary_key=True, + ), + Column("data", String(50)), + ) + + Table( + "seq_opt_pk", + metadata, + Column( + "id", + Integer, + sqlalchemy.Sequence("tab_id_seq_opt", data_type=Integer, optional=True), + primary_key=True, + ), + Column("data", String(50)), + ) + + Table( + "seq_no_returning", + metadata, + Column( + "id", + Integer, + sqlalchemy.Sequence("noret_id_seq"), + primary_key=True, + ), + Column("data", String(50)), + implicit_returning=False, + ) + + def test_insert_lastrowid(self, connection): + r = connection.execute(self.tables.seq_pk.insert(), dict(data="some data")) + assert len(r.inserted_primary_key) == 1 + is_instance_of(r.inserted_primary_key[0], int) + + def test_nextval_direct(self, connection): + r = connection.execute(self.tables.seq_pk.c.id.default) + is_instance_of(r, int) + + def _assert_round_trip(self, table, conn): + row = conn.execute(table.select()).first() + id, name = row + is_instance_of(id, int) + eq_(name, "some data") + + @testing.combinations((True,), (False,), argnames="implicit_returning") + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_insert_roundtrip_translate(self, connection, implicit_returning): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_nextval_direct_schema_translate(self, connection): + pass + + +@pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" +) +class HasSequenceTest(_HasSequenceTest): + @classmethod + def define_tables(cls, metadata): + sqlalchemy.Sequence("user_id_seq", metadata=metadata) + sqlalchemy.Sequence( + "other_seq", metadata=metadata, nomaxvalue=True, nominvalue=True + ) + Table( + "user_id_table", + metadata, + Column("id", Integer, primary_key=True), + ) + + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_sequence_cache(self, connection, metadata): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_sequence_schema(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_sequence_schemas_neg(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_sequence_default_not_in_remote(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_sequence_remote_not_in_default(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_get_sequence_names_no_sequence_schema(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_get_sequence_names_sequences_schema(self, connection): + pass diff --git a/packages/sqlalchemy-spanner/test/test_suite_14.py b/packages/sqlalchemy-spanner/test/test_suite_14.py new file mode 100644 index 000000000000..5a083ce26b6f --- /dev/null +++ b/packages/sqlalchemy-spanner/test/test_suite_14.py @@ -0,0 +1,2558 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import timezone +import decimal +import operator +import os +import pytest +import random +import time +from unittest import mock + +from google.cloud.spanner_v1 import RequestOptions, Client + +import sqlalchemy +from sqlalchemy import create_engine, literal, FLOAT +from sqlalchemy import inspect +from sqlalchemy import testing +from sqlalchemy import ForeignKey +from sqlalchemy import MetaData +from sqlalchemy.schema import DDL +from sqlalchemy.schema import Computed +from sqlalchemy.testing import config +from sqlalchemy.testing import engines +from sqlalchemy.testing import eq_ +from sqlalchemy.testing import is_instance_of +from sqlalchemy.testing import provide_metadata, emits_warning +from sqlalchemy.testing import fixtures +from sqlalchemy.testing.provision import temp_table_keyword_args +from sqlalchemy.testing.schema import Column +from sqlalchemy.testing.schema import Table +from sqlalchemy import literal_column +from sqlalchemy import select +from sqlalchemy import util +from sqlalchemy import union +from sqlalchemy import event +from sqlalchemy import exists +from sqlalchemy import Boolean +from sqlalchemy import Float +from sqlalchemy import LargeBinary +from sqlalchemy import String +from sqlalchemy.sql.expression import cast +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import relation +from sqlalchemy.orm import Session +from sqlalchemy.types import Integer +from sqlalchemy.types import Numeric +from sqlalchemy.types import Text +from sqlalchemy.testing import requires +from sqlalchemy.testing import is_true +from sqlalchemy import types as sql_types +from sqlalchemy.testing.fixtures import ( + ComputedReflectionFixtureTest as _ComputedReflectionFixtureTest, +) + +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +from google.cloud import spanner_dbapi + +from sqlalchemy.testing.suite.test_cte import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_ddl import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_dialect import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_insert import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_reflection import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_results import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_select import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_sequence import ( + SequenceTest as _SequenceTest, + HasSequenceTest as _HasSequenceTest, + HasSequenceTestEmpty as _HasSequenceTestEmpty, +) # noqa: F401, F403 +from sqlalchemy.testing.suite.test_update_delete import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_cte import CTETest as _CTETest +from sqlalchemy.testing.suite.test_ddl import TableDDLTest as _TableDDLTest +from sqlalchemy.testing.suite.test_ddl import ( + FutureTableDDLTest as _FutureTableDDLTest, + LongNameBlowoutTest as _LongNameBlowoutTest, +) +from sqlalchemy.testing.suite.test_update_delete import ( + SimpleUpdateDeleteTest as _SimpleUpdateDeleteTest, +) +from sqlalchemy.testing.suite.test_dialect import ( + DifficultParametersTest as _DifficultParametersTest, + EscapingTest as _EscapingTest, +) +from sqlalchemy.testing.suite.test_insert import ( + InsertBehaviorTest as _InsertBehaviorTest, +) +from sqlalchemy.testing.suite.test_select import ( # noqa: F401, F403 + CompoundSelectTest as _CompoundSelectTest, + ExistsTest as _ExistsTest, + FetchLimitOffsetTest as _FetchLimitOffsetTest, + IdentityAutoincrementTest as _IdentityAutoincrementTest, + IsOrIsNotDistinctFromTest as _IsOrIsNotDistinctFromTest, + LikeFunctionsTest as _LikeFunctionsTest, + OrderByLabelTest as _OrderByLabelTest, + PostCompileParamsTest as _PostCompileParamsTest, +) +from sqlalchemy.testing.suite.test_reflection import ( + ComponentReflectionTestExtra as _ComponentReflectionTestExtra, + QuotedNameArgumentTest as _QuotedNameArgumentTest, + ComponentReflectionTest as _ComponentReflectionTest, + CompositeKeyReflectionTest as _CompositeKeyReflectionTest, + ComputedReflectionTest as _ComputedReflectionTest, + HasIndexTest as _HasIndexTest, + HasTableTest as _HasTableTest, +) +from sqlalchemy.testing.suite.test_results import RowFetchTest as _RowFetchTest +from sqlalchemy.testing.suite.test_types import ( # noqa: F401, F403 + BooleanTest as _BooleanTest, + DateTest as _DateTest, + _DateFixture as __DateFixture, + DateTimeHistoricTest, + DateTimeCoercedToDateTimeTest as _DateTimeCoercedToDateTimeTest, + DateTimeMicrosecondsTest as _DateTimeMicrosecondsTest, + DateTimeTest as _DateTimeTest, + IntegerTest as _IntegerTest, + JSONTest as _JSONTest, + _LiteralRoundTripFixture, + NumericTest as _NumericTest, + StringTest as _StringTest, + TextTest as _TextTest, + TimeTest as _TimeTest, + TimeMicrosecondsTest as _TimeMicrosecondsTest, + TimestampMicrosecondsTest, + UnicodeVarcharTest as _UnicodeVarcharTest, + UnicodeTextTest as _UnicodeTextTest, + _UnicodeFixture as __UnicodeFixture, +) +from test._helpers import get_db_url, get_project + +from google.cloud.sqlalchemy_spanner import version as sqlalchemy_spanner_version + + +config.test_schema = "" + + +class BooleanTest(_BooleanTest): + @pytest.mark.skip( + "The original test case was split into 2 parts: " + "test_render_literal_bool_true and test_render_literal_bool_false" + ) + def test_render_literal_bool(self): + pass + + def test_render_literal_bool_true(self, literal_round_trip): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + literal_round_trip(Boolean(), [True], [True]) + + def test_render_literal_bool_false(self, literal_round_trip): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + literal_round_trip(Boolean(), [False], [False]) + + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_whereclause(self): + pass + + +class ComponentReflectionTestExtra(_ComponentReflectionTestExtra): + @testing.requires.table_reflection + def test_nullable_reflection(self, connection, metadata): + t = Table( + "t", + metadata, + Column("a", Integer, nullable=True), + Column("b", Integer, nullable=False), + ) + t.create(connection) + connection.connection.commit() + eq_( + dict( + (col["name"], col["nullable"]) + for col in inspect(connection).get_columns("t") + ), + {"a": True, "b": False}, + ) + + def _type_round_trip(self, connection, metadata, *types): + t = Table( + "t", metadata, *[Column("t%d" % i, type_) for i, type_ in enumerate(types)] + ) + t.create(connection) + connection.connection.commit() + + return [c["type"] for c in inspect(connection).get_columns("t")] + + @testing.requires.table_reflection + def test_numeric_reflection(self, connection, metadata): + """ + SPANNER OVERRIDE: + + Spanner defines NUMERIC type with the constant precision=38 + and scale=9. Overriding the test to check if the NUMERIC + column is successfully created and has dimensions + correct for Cloud Spanner. + """ + for typ in self._type_round_trip(connection, metadata, Numeric(18, 5)): + assert isinstance(typ, Numeric) + eq_(typ.precision, 38) + eq_(typ.scale, 9) + + @testing.requires.table_reflection + def test_binary_reflection(self, connection, metadata): + """ + Check that a BYTES column with an explicitly + set size is correctly reflected. + """ + for typ in self._type_round_trip(connection, metadata, LargeBinary(20)): + assert isinstance(typ, LargeBinary) + eq_(typ.length, 20) + + +class ComponentReflectionTest(_ComponentReflectionTest): + @classmethod + def define_views(cls, metadata, schema): + table_info = { + "users": ["user_id", "test1", "test2"], + "email_addresses": ["address_id", "remote_user_id", "email_address"], + } + if testing.requires.self_referential_foreign_keys.enabled: + table_info["users"] = table_info["users"] + ["parent_user_id"] + for table_name in ("users", "email_addresses"): + fullname = table_name + if schema: + fullname = "%s.%s" % (schema, table_name) + view_name = fullname + "_v" + columns = "" + for column in table_info[table_name]: + stmt = table_name + "." + column + " AS " + column + if columns: + columns = columns + ", " + stmt + else: + columns = stmt + query = f"""CREATE VIEW {view_name} + SQL SECURITY INVOKER + AS SELECT {columns} + FROM {fullname}""" + + event.listen(metadata, "after_create", DDL(query)) + event.listen(metadata, "before_drop", DDL("DROP VIEW %s" % view_name)) + + @classmethod + def define_tables(cls, metadata): + cls.define_reflected_tables(metadata, None) + + @classmethod + def define_reflected_tables(cls, metadata, schema): + if schema: + schema_prefix = schema + "." + else: + schema_prefix = "" + + if testing.requires.self_referential_foreign_keys.enabled: + users = Table( + "users", + metadata, + Column("user_id", sqlalchemy.INT, primary_key=True), + Column("test1", sqlalchemy.CHAR(5), nullable=False), + Column("test2", sqlalchemy.Float(5), nullable=False), + Column( + "parent_user_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey( + "%susers.user_id" % schema_prefix, name="user_id_fk" + ), + ), + schema=schema, + test_needs_fk=True, + ) + else: + users = Table( + "users", + metadata, + Column("user_id", sqlalchemy.INT, primary_key=True), + Column("test1", sqlalchemy.CHAR(5), nullable=False), + Column("test2", sqlalchemy.Float(5), nullable=False), + schema=schema, + test_needs_fk=True, + ) + + Table( + "dingalings", + metadata, + Column("dingaling_id", sqlalchemy.Integer, primary_key=True), + Column( + "address_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey("%semail_addresses.address_id" % schema_prefix), + ), + Column("data", sqlalchemy.String(30)), + schema=schema, + test_needs_fk=True, + ) + Table( + "email_addresses", + metadata, + Column("address_id", sqlalchemy.Integer, primary_key=True), + Column( + "remote_user_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey(users.c.user_id), + ), + Column("email_address", sqlalchemy.String(20)), + sqlalchemy.PrimaryKeyConstraint("address_id", name="email_ad_pk"), + schema=schema, + test_needs_fk=True, + ) + Table( + "comment_test", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True, comment="id comment"), + Column("data", sqlalchemy.String(20), comment="data % comment"), + Column( + "d2", + sqlalchemy.String(20), + comment=r"""Comment types type speedily ' " \ '' Fun!""", + ), + schema=schema, + comment=r"""the test % ' " \ table comment""", + ) + + if testing.requires.cross_schema_fk_reflection.enabled: + if schema is None: + Table( + "local_table", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column("data", sqlalchemy.String(20)), + Column( + "remote_id", + ForeignKey("%s.remote_table_2.id" % testing.config.test_schema), + ), + test_needs_fk=True, + schema=config.db.dialect.default_schema_name, + ) + else: + Table( + "remote_table", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column( + "local_id", + ForeignKey( + "%s.local_table.id" % config.db.dialect.default_schema_name + ), + ), + Column("data", sqlalchemy.String(20)), + schema=schema, + test_needs_fk=True, + ) + Table( + "remote_table_2", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column("data", sqlalchemy.String(20)), + schema=schema, + test_needs_fk=True, + ) + + if testing.requires.index_reflection.enabled: + cls.define_index(metadata, users) + + if not schema: + # test_needs_fk is at the moment to force MySQL InnoDB + noncol_idx_test_nopk = Table( + "noncol_idx_test_nopk", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column("q", sqlalchemy.String(5)), + test_needs_fk=True, + extend_existing=True, + ) + + noncol_idx_test_pk = Table( + "noncol_idx_test_pk", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column("q", sqlalchemy.String(5)), + test_needs_fk=True, + extend_existing=True, + ) + + if testing.requires.indexes_with_ascdesc.enabled: + sqlalchemy.Index("noncol_idx_nopk", noncol_idx_test_nopk.c.q.desc()) + sqlalchemy.Index("noncol_idx_pk", noncol_idx_test_pk.c.q.desc()) + + if testing.requires.view_column_reflection.enabled and not bool( + os.environ.get("SPANNER_EMULATOR_HOST") + ): + cls.define_views(metadata, schema) + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + @testing.requires.view_reflection + @testing.combinations( + (False,), (True, testing.requires.schemas), argnames="use_schema" + ) + def test_get_view_definition(self, connection, use_schema): + if use_schema: + schema = config.test_schema + else: + schema = None + view_name1 = "users_v" + view_name2 = "email_addresses_v" + insp = inspect(connection) + v1 = insp.get_view_definition(view_name1, schema=schema) + self.assert_(v1) + v2 = insp.get_view_definition(view_name2, schema=schema) + self.assert_(v2) + + @testing.combinations( + (False, False), + (False, True, testing.requires.schemas), + (True, False, testing.requires.view_reflection), + ( + True, + True, + testing.requires.schemas + testing.requires.view_reflection, + ), + argnames="use_views,use_schema", + ) + def test_get_columns(self, connection, use_views, use_schema): + if use_views and bool(os.environ.get("SPANNER_EMULATOR_HOST")): + pytest.skip("Skipped on emulator") + + schema = None + users, addresses = (self.tables.users, self.tables.email_addresses) + if use_views: + table_names = ["users_v", "email_addresses_v"] + else: + table_names = ["users", "email_addresses"] + + insp = inspect(connection) + for table_name, table in zip(table_names, (users, addresses)): + schema_name = schema + cols = insp.get_columns(table_name, schema=schema_name) + self.assert_(len(cols) > 0, len(cols)) + + # should be in order + + for i, col in enumerate(table.columns): + eq_(col.name, cols[i]["name"]) + ctype = cols[i]["type"].__class__ + ctype_def = col.type + if isinstance(ctype_def, sqlalchemy.types.TypeEngine): + ctype_def = ctype_def.__class__ + + # Oracle returns Date for DateTime. + + if testing.against("oracle") and ctype_def in ( + sql_types.Date, + sql_types.DateTime, + ): + ctype_def = sql_types.Date + + # assert that the desired type and return type share + # a base within one of the generic types. + + self.assert_( + len( + set(ctype.__mro__) + .intersection(ctype_def.__mro__) + .intersection( + [ + sql_types.Integer, + sql_types.Numeric, + sql_types.DateTime, + sql_types.Date, + sql_types.Time, + sql_types.String, + sql_types._Binary, + ] + ) + ) + > 0, + "%s(%s), %s(%s)" % (col.name, col.type, cols[i]["name"], ctype), + ) + + if not col.primary_key: + assert cols[i]["default"] is None + + @testing.combinations((False,), argnames="use_schema") + @testing.requires.foreign_key_constraint_reflection + def test_get_foreign_keys(self, connection, use_schema): + if use_schema: + schema = config.test_schema + else: + schema = None + + users, addresses = (self.tables.users, self.tables.email_addresses) + insp = inspect(connection) + expected_schema = schema + # users + + if testing.requires.self_referential_foreign_keys.enabled: + users_fkeys = insp.get_foreign_keys(users.name, schema=schema) + fkey1 = users_fkeys[0] + + with testing.requires.named_constraints.fail_if(): + eq_(fkey1["name"], "user_id_fk") + + eq_(fkey1["referred_schema"], expected_schema) + eq_(fkey1["referred_table"], users.name) + eq_(fkey1["referred_columns"], ["user_id"]) + if testing.requires.self_referential_foreign_keys.enabled: + eq_(fkey1["constrained_columns"], ["parent_user_id"]) + + # addresses + addr_fkeys = insp.get_foreign_keys(addresses.name, schema=schema) + fkey1 = addr_fkeys[0] + + with testing.requires.implicitly_named_constraints.fail_if(): + self.assert_(fkey1["name"] is not None) + + eq_(fkey1["referred_schema"], expected_schema) + eq_(fkey1["referred_table"], users.name) + eq_(fkey1["referred_columns"], ["user_id"]) + eq_(fkey1["constrained_columns"], ["remote_user_id"]) + + @testing.requires.foreign_key_constraint_reflection + @testing.combinations( + (None, True, False, False), + (None, True, False, True, testing.requires.schemas), + ("foreign_key", True, False, False), + (None, False, False, False), + (None, False, False, True, testing.requires.schemas), + (None, True, False, False), + (None, True, False, True, testing.requires.schemas), + argnames="order_by,include_plain,include_views,use_schema", + ) + def test_get_table_names( + self, connection, order_by, include_plain, include_views, use_schema + ): + if use_schema: + schema = config.test_schema + else: + schema = None + + _ignore_tables = [ + "account", + "alembic_version", + "bytes_table", + "comment_test", + "date_table", + "noncol_idx_test_pk", + "noncol_idx_test_nopk", + "local_table", + "remote_table", + "remote_table_2", + "text_table", + "user_tmp", + ] + + insp = inspect(connection) + + if include_views: + table_names = insp.get_view_names(schema) + table_names.sort() + answer = ["email_addresses_v", "users_v"] + eq_(sorted(table_names), answer) + + if include_plain: + if order_by: + tables = [ + rec[0] + for rec in insp.get_sorted_table_and_fkc_names(schema) + if rec[0] + ] + else: + tables = insp.get_table_names(schema) + table_names = [t for t in tables if t not in _ignore_tables] + + if order_by == "foreign_key": + answer = ["users", "email_addresses", "dingalings"] + eq_(table_names, answer) + else: + answer = ["dingalings", "email_addresses", "users"] + eq_(sorted(table_names), answer) + + @classmethod + def define_temp_tables(cls, metadata): + """ + SPANNER OVERRIDE: + + In Cloud Spanner unique indexes are used instead of directly + creating unique constraints. Overriding the test to replace + constraints with indexes in testing data. + """ + kw = temp_table_keyword_args(config, config.db) + user_tmp = Table( + "user_tmp", + metadata, + Column("id", sqlalchemy.INT, primary_key=True), + Column("name", sqlalchemy.VARCHAR(50)), + Column("foo", sqlalchemy.INT), + sqlalchemy.Index("user_tmp_uq", "name", unique=True), + sqlalchemy.Index("user_tmp_ix", "foo"), + extend_existing=True, + **kw, + ) + if ( + testing.requires.view_reflection.enabled + and testing.requires.temporary_views.enabled + ): + event.listen( + user_tmp, + "after_create", + DDL("create temporary view user_tmp_v as " "select * from user_tmp"), + ) + event.listen(user_tmp, "before_drop", DDL("drop view user_tmp_v")) + + @testing.provide_metadata + def test_reflect_string_column_max_len(self): + """ + SPANNER SPECIFIC TEST: + + In Spanner column of the STRING type can be + created with size defined as MAX. The test + checks that such a column is correctly reflected. + """ + metadata = MetaData(self.bind) + Table("text_table", metadata, Column("TestColumn", Text, nullable=False)) + metadata.create_all() + + Table("text_table", metadata, autoload=True) + + def test_reflect_bytes_column_max_len(self): + """ + SPANNER SPECIFIC TEST: + + In Spanner column of the BYTES type can be + created with size defined as MAX. The test + checks that such a column is correctly reflected. + """ + metadata = MetaData(self.bind) + Table( + "bytes_table", + metadata, + Column("TestColumn", LargeBinary, nullable=False), + ) + metadata.create_all() + + Table("bytes_table", metadata, autoload=True) + + @testing.combinations( + (True, testing.requires.schemas), (False,), argnames="use_schema" + ) + @testing.requires.unique_constraint_reflection + def test_get_unique_constraints(self, metadata, connection, use_schema): + # SQLite dialect needs to parse the names of the constraints + # separately from what it gets from PRAGMA index_list(), and + # then matches them up. so same set of column_names in two + # constraints will confuse it. Perhaps we should no longer + # bother with index_list() here since we have the whole + # CREATE TABLE? + + if use_schema: + schema = config.test_schema + else: + schema = None + uniques = sorted( + [ + {"name": "unique_a", "column_names": ["a"]}, + {"name": "unique_a_b_c", "column_names": ["a", "b", "c"]}, + {"name": "unique_c_a_b", "column_names": ["c", "a", "b"]}, + {"name": "unique_asc_key", "column_names": ["asc", "key"]}, + {"name": "i.have.dots", "column_names": ["b"]}, + {"name": "i have spaces", "column_names": ["c"]}, + ], + key=operator.itemgetter("name"), + ) + table = Table( + "testtbl", + metadata, + Column("id", sqlalchemy.INT, primary_key=True), + Column("a", String(20)), + Column("b", String(30)), + Column("c", Integer), + # reserved identifiers + Column("asc", String(30)), + Column("key", String(30)), + sqlalchemy.Index("unique_a", "a", unique=True), + sqlalchemy.Index("unique_a_b_c", "a", "b", "c", unique=True), + sqlalchemy.Index("unique_c_a_b", "c", "a", "b", unique=True), + sqlalchemy.Index("unique_asc_key", "asc", "key", unique=True), + schema=schema, + ) + table.create(connection) + connection.connection.commit() + + inspector = inspect(connection) + reflected = sorted( + inspector.get_unique_constraints("testtbl", schema=schema), + key=operator.itemgetter("name"), + ) + + names_that_duplicate_index = set() + + for orig, refl in zip(uniques, reflected): + # Different dialects handle duplicate index and constraints + # differently, so ignore this flag + dupe = refl.pop("duplicates_index", None) + if dupe: + names_that_duplicate_index.add(dupe) + eq_(orig, refl) + + reflected_metadata = MetaData() + reflected = Table( + "testtbl", + reflected_metadata, + autoload_with=connection, + schema=schema, + ) + + # test "deduplicates for index" logic. MySQL and Oracle + # "unique constraints" are actually unique indexes (with possible + # exception of a unique that is a dupe of another one in the case + # of Oracle). make sure # they aren't duplicated. + idx_names = set([idx.name for idx in reflected.indexes]) + uq_names = set( + [ + uq.name + for uq in reflected.constraints + if isinstance(uq, sqlalchemy.UniqueConstraint) + ] + ).difference(["unique_c_a_b"]) + + assert not idx_names.intersection(uq_names) + if names_that_duplicate_index: + eq_(names_that_duplicate_index, idx_names) + eq_(uq_names, set()) + + @testing.provide_metadata + def test_unique_constraint_raises(self): + """ + Checking that unique constraint creation + fails due to a ProgrammingError. + """ + metadata = MetaData(self.bind) + Table( + "user_tmp_failure", + metadata, + Column("id", sqlalchemy.INT, primary_key=True), + Column("name", sqlalchemy.VARCHAR(50)), + sqlalchemy.UniqueConstraint("name", name="user_tmp_uq"), + ) + + with pytest.raises(spanner_dbapi.exceptions.ProgrammingError): + metadata.create_all() + + @testing.provide_metadata + def _test_get_table_names(self, schema=None, table_type="table", order_by=None): + """ + SPANNER OVERRIDE: + + Spanner doesn't support temporary tables, so real tables are + used for testing. As the original test expects only real + tables to be read, and in Spanner all the tables are real, + expected results override is required. + """ + _ignore_tables = [ + "comment_test", + "noncol_idx_test_pk", + "noncol_idx_test_nopk", + "local_table", + "remote_table", + "remote_table_2", + ] + meta = self.metadata + + insp = inspect(meta.bind) + + if table_type == "view" and not bool(os.environ.get("SPANNER_EMULATOR_HOST")): + table_names = insp.get_view_names(schema) + table_names.sort() + answer = ["email_addresses_v", "users_v"] + eq_(sorted(table_names), answer) + else: + if order_by: + tables = [ + rec[0] + for rec in insp.get_sorted_table_and_fkc_names(schema) + if rec[0] + ] + else: + tables = insp.get_table_names(schema) + table_names = [t for t in tables if t not in _ignore_tables] + + if order_by == "foreign_key": + answer = {"dingalings", "email_addresses", "user_tmp", "users"} + eq_(set(table_names), answer) + else: + answer = ["dingalings", "email_addresses", "user_tmp", "users"] + eq_(sorted(table_names), answer) + + @pytest.mark.skip("Spanner doesn't support temporary tables") + def test_get_temp_table_indexes(self): + pass + + @pytest.mark.skip("Spanner doesn't support temporary tables") + def test_get_temp_table_unique_constraints(self): + pass + + @pytest.mark.skip("Spanner doesn't support temporary tables") + def test_get_temp_table_columns(self): + pass + + def _assert_insp_indexes(self, indexes, expected_indexes): + expected_indexes.sort(key=lambda item: item["name"]) + + index_names = [d["name"] for d in indexes] + exp_index_names = [d["name"] for d in expected_indexes] + assert sorted(index_names) == sorted(exp_index_names) + + +class CompositeKeyReflectionTest(_CompositeKeyReflectionTest): + @testing.requires.foreign_key_constraint_reflection + def test_fk_column_order(self): + """ + SPANNER OVERRIDE: + + Spanner column usage reflection doesn't support determenistic + ordering. Overriding the test to check that columns are + reflected correctly, without considering their order. + """ + # test for issue #5661 + insp = inspect(self.bind) + foreign_keys = insp.get_foreign_keys(self.tables.tb2.name) + eq_(len(foreign_keys), 1) + fkey1 = foreign_keys[0] + eq_(set(fkey1.get("referred_columns")), {"name", "id", "attr"}) + eq_(set(fkey1.get("constrained_columns")), {"pname", "pid", "pattr"}) + + +@pytest.mark.skip("Spanner doesn't support quotes in table names.") +class QuotedNameArgumentTest(_QuotedNameArgumentTest): + pass + + +class _DateFixture(__DateFixture): + compare = None + + @classmethod + def define_tables(cls, metadata): + """ + SPANNER OVERRIDE: + + Cloud Spanner doesn't support auto incrementing ids feature, + which is used by the original test. Overriding the test data + creation method to disable autoincrement and make id column + nullable. + """ + + class Decorated(sqlalchemy.TypeDecorator): + impl = cls.datatype + cache_ok = True + + Table( + "date_table", + metadata, + Column("id", Integer, primary_key=True, nullable=True), + Column("date_data", cls.datatype), + Column("decorated_date_data", Decorated), + ) + + +class DateTest(_DateTest): + """ + SPANNER OVERRIDE: + + DateTest tests used same class method to create table, so to avoid those failures + and maintain DRY concept just inherit the class to run tests successfully. + """ + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null_bound_comparison(self): + super().test_null_bound_comparison() + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null(self, connection): + super().test_null(connection) + + +class CTETest(_CTETest): + @classmethod + def define_tables(cls, metadata): + """ + The original method creates a foreign key without a name, + which causes troubles on test cleanup. Overriding the + method to explicitly set a foreign key name. + """ + Table( + "some_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), + Column("parent_id", ForeignKey("some_table.id", name="fk_some_table")), + ) + + Table( + "some_other_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), + Column("parent_id", Integer), + ) + + @pytest.mark.skip("INSERT from WITH subquery is not supported") + def test_insert_from_select_round_trip(self): + """ + The test checks if an INSERT can be done from a cte, like: + + WITH some_cte AS (...) + INSERT INTO some_other_table (... SELECT * FROM some_cte) + + Such queries are not supported by Spanner. + """ + pass + + @pytest.mark.skip("DELETE from WITH subquery is not supported") + def test_delete_scalar_subq_round_trip(self): + """ + The test checks if a DELETE can be done from a cte, like: + + WITH some_cte AS (...) + DELETE FROM some_other_table (... SELECT * FROM some_cte) + + Such queries are not supported by Spanner. + """ + pass + + @pytest.mark.skip("DELETE from WITH subquery is not supported") + def test_delete_from_round_trip(self): + """ + The test checks if a DELETE can be done from a cte, like: + + WITH some_cte AS (...) + DELETE FROM some_other_table (... SELECT * FROM some_cte) + + Such queries are not supported by Spanner. + """ + pass + + @pytest.mark.skip("UPDATE from WITH subquery is not supported") + def test_update_from_round_trip(self): + """ + The test checks if an UPDATE can be done from a cte, like: + + WITH some_cte AS (...) + UPDATE some_other_table + SET (... SELECT * FROM some_cte) + + Such queries are not supported by Spanner. + """ + pass + + @pytest.mark.skip("WITH RECURSIVE subqueries are not supported") + def test_select_recursive_round_trip(self): + pass + + +class DateTimeMicrosecondsTest(_DateTimeMicrosecondsTest, DateTest): + @pytest.mark.skip("Spanner dates are time zone independent") + def test_select_direct(self): + pass + + def test_round_trip(self): + """ + SPANNER OVERRIDE: + + Spanner converts timestamp into `%Y-%m-%dT%H:%M:%S.%fZ` format, so to avoid + assert failures convert datetime input to the desire timestamp format. + """ + date_table = self.tables.date_table + config.db.execute(date_table.insert(), {"date_data": self.data, "id": 250}) + + row = config.db.execute(select([date_table.c.date_data])).first() + compare = self.compare or self.data + compare = compare.strftime("%Y-%m-%dT%H:%M:%S.%fZ") + eq_(row[0].rfc3339(), compare) + assert isinstance(row[0], DatetimeWithNanoseconds) + + def test_round_trip_decorated(self, connection): + """ + SPANNER OVERRIDE: + + Spanner converts timestamp into `%Y-%m-%dT%H:%M:%S.%fZ` format, so to avoid + assert failures convert datetime input to the desire timestamp format. + """ + date_table = self.tables.date_table + + connection.execute( + date_table.insert(), {"id": 1, "decorated_date_data": self.data} + ) + + row = connection.execute(select(date_table.c.decorated_date_data)).first() + + compare = self.compare or self.data + compare = compare.strftime("%Y-%m-%dT%H:%M:%S.%fZ") + eq_(row[0].rfc3339(), compare) + assert isinstance(row[0], DatetimeWithNanoseconds) + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null_bound_comparison(self): + super().test_null_bound_comparison() + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null(self, connection): + super().test_null(connection) + + +class DateTimeTest(_DateTimeTest, DateTimeMicrosecondsTest): + """ + SPANNER OVERRIDE: + + DateTimeTest tests have the same failures same as DateTimeMicrosecondsTest tests, + so to avoid those failures and maintain DRY concept just inherit the class to run + tests successfully. + """ + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null_bound_comparison(self): + super().test_null_bound_comparison() + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null(self, connection): + super().test_null(connection) + + @pytest.mark.skip("Spanner dates are time zone independent") + def test_select_direct(self): + pass + + +@pytest.mark.skip("Not supported by Spanner") +class DifficultParametersTest(_DifficultParametersTest): + pass + + +class FetchLimitOffsetTest(_FetchLimitOffsetTest): + @pytest.mark.skip("Spanner doesn't support composite LIMIT and OFFSET clauses") + def test_expr_limit(self, connection): + pass + + @pytest.mark.skip("Spanner doesn't support composite LIMIT and OFFSET clauses") + def test_expr_offset(self, connection): + pass + + @pytest.mark.skip("Spanner doesn't support composite LIMIT and OFFSET clauses") + def test_expr_limit_offset(self, connection): + pass + + @pytest.mark.skip("Spanner doesn't support composite LIMIT and OFFSET clauses") + def test_expr_limit_simple_offset(self, connection): + pass + + @pytest.mark.skip("Spanner doesn't support composite LIMIT and OFFSET clauses") + def test_simple_limit_expr_offset(self, connection): + pass + + @pytest.mark.skip("Spanner doesn't support composite LIMIT and OFFSET clauses") + def test_bound_offset(self, connection): + pass + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_limit_render_multiple_times(self, connection): + table = self.tables.some_table + stmt = select(table.c.id).limit(1).scalar_subquery() + + u = union(select(stmt), select(stmt)).subquery().select() + + self._assert_result( + connection, + u, + [(1,)], + ) + + @testing.requires.offset + def test_simple_offset(self, connection): + table = self.tables.some_table + self._assert_result( + connection, + select(table).order_by(table.c.id).offset(2), + [(3, 3, 4), (4, 4, 5), (5, 4, 6)], + ) + + +@pytest.mark.skip("Spanner doesn't support autoincrement") +class IdentityAutoincrementTest(_IdentityAutoincrementTest): + pass + + +class EscapingTest(_EscapingTest): + @provide_metadata + def test_percent_sign_round_trip(self): + """Test that the DBAPI accommodates for escaped / nonescaped + percent signs in a way that matches the compiler + + SPANNER OVERRIDE + Cloud Spanner supports tables with empty primary key, but + only single one row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + m = self.metadata + t = Table("t", m, Column("data", String(50))) + t.create(config.db) + with config.db.begin() as conn: + conn.execute(t.insert(), dict(data="some % value")) + + eq_( + conn.scalar( + select([t.c.data]).where( + t.c.data == literal_column("'some % value'") + ) + ), + "some % value", + ) + + conn.execute(t.delete()) + conn.execute(t.insert(), dict(data="some %% other value")) + eq_( + conn.scalar( + select([t.c.data]).where( + t.c.data == literal_column("'some %% other value'") + ) + ), + "some %% other value", + ) + + +class ExistsTest(_ExistsTest): + def test_select_exists(self, connection): + """ + SPANNER OVERRIDE: + + The original test is trying to execute a query like: + + SELECT ... + WHERE EXISTS (SELECT ...) + + SELECT WHERE without FROM clause is not supported by Spanner. + Rewriting the test to force it to generate a query like: + + SELECT EXISTS (SELECT ...) + """ + stuff = self.tables.stuff + eq_( + connection.execute( + select((exists().where(stuff.c.data == "some data"),)) + ).fetchall(), + [(True,)], + ) + + def test_select_exists_false(self, connection): + """ + SPANNER OVERRIDE: + + The original test is trying to execute a query like: + + SELECT ... + WHERE EXISTS (SELECT ...) + + SELECT WHERE without FROM clause is not supported by Spanner. + Rewriting the test to force it to generate a query like: + + SELECT EXISTS (SELECT ...) + """ + stuff = self.tables.stuff + eq_( + connection.execute( + select((exists().where(stuff.c.data == "no data"),)) + ).fetchall(), + [(False,)], + ) + + +class TableDDLTest(_TableDDLTest): + @pytest.mark.skip( + "Spanner table name must start with an uppercase or lowercase letter" + ) + def test_underscore_names(self): + pass + + @pytest.mark.skip("Table names incuding schemas are not supported by Spanner") + def test_create_table_schema(self): + pass + + +class FutureTableDDLTest(_FutureTableDDLTest): + @pytest.mark.skip("Table names incuding schemas are not supported by Spanner") + def test_create_table_schema(self): + pass + + @pytest.mark.skip( + "Spanner table name must start with an uppercase or lowercase letter" + ) + def test_underscore_names(self): + pass + + +@pytest.mark.skip("Max identifier length in Spanner is 128") +class LongNameBlowoutTest(_LongNameBlowoutTest): + pass + + +@pytest.mark.skip("Spanner doesn't support Time data type.") +class TimeTests(_TimeMicrosecondsTest, _TimeTest): + pass + + +@pytest.mark.skip("Spanner doesn't coerce dates from datetime.") +class DateTimeCoercedToDateTimeTest(_DateTimeCoercedToDateTimeTest): + pass + + +class IntegerTest(_IntegerTest): + @provide_metadata + def _round_trip(self, datatype, data): + """ + SPANNER OVERRIDE: + + This is the helper method for integer class tests which creates a table and + performs an insert operation. + Cloud Spanner supports tables with an empty primary key, but only one + row can be inserted into such a table - following insertions will fail with + `400 id must not be NULL in table date_table`. + Overriding the tests and adding a manual primary key value to avoid the same + failures. + """ + metadata = self.metadata + int_table = Table( + "integer_table", + metadata, + Column("id", Integer, primary_key=True, test_needs_autoincrement=True), + Column("integer_data", datatype), + ) + + metadata.create_all(config.db) + + config.db.execute(int_table.insert(), {"id": 1, "integer_data": data}) + + row = config.db.execute(select([int_table.c.integer_data])).first() + + eq_(row, (data,)) + + if util.py3k: + assert isinstance(row[0], int) + else: + assert isinstance(row[0], (long, int)) # noqa + + +class _UnicodeFixture(__UnicodeFixture): + @classmethod + def define_tables(cls, metadata): + """ + SPANNER OVERRIDE: + + Cloud Spanner doesn't support auto incrementing ids feature, + which is used by the original test. Overriding the test data + creation method to disable autoincrement and make id column + nullable. + """ + Table( + "unicode_table", + metadata, + Column("id", Integer, primary_key=True, nullable=True), + Column("unicode_data", cls.datatype), + ) + + def test_round_trip_executemany(self): + """ + SPANNER OVERRIDE + + Cloud Spanner supports tables with empty primary key, but + only single one row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + unicode_table = self.tables.unicode_table + + config.db.execute( + unicode_table.insert(), + [{"id": i, "unicode_data": self.data} for i in range(3)], + ) + + rows = config.db.execute(select([unicode_table.c.unicode_data])).fetchall() + eq_(rows, [(self.data,) for i in range(3)]) + for row in rows: + assert isinstance(row[0], util.text_type) + + @pytest.mark.skip("Spanner doesn't support non-ascii characters") + def test_literal(self): + pass + + @pytest.mark.skip("Spanner doesn't support non-ascii characters") + def test_literal_non_ascii(self): + pass + + +class UnicodeVarcharTest(_UnicodeFixture, _UnicodeVarcharTest): + """ + SPANNER OVERRIDE: + + UnicodeVarcharTest class inherits the __UnicodeFixture class's tests, + so to avoid those failures and maintain DRY concept just inherit the class to run + tests successfully. + """ + + pass + + +class UnicodeTextTest(_UnicodeFixture, _UnicodeTextTest): + """ + SPANNER OVERRIDE: + + UnicodeTextTest class inherits the __UnicodeFixture class's tests, + so to avoid those failures and maintain DRY concept just inherit the class to run + tests successfully. + """ + + pass + + +class RowFetchTest(_RowFetchTest): + def test_row_w_scalar_select(self): + """ + SPANNER OVERRIDE: + + Cloud Spanner returns a DatetimeWithNanoseconds() for date + data types. Overriding the test to use a DatetimeWithNanoseconds + type value as an expected result. + -------------- + + test that a scalar select as a column is returned as such + and that type conversion works OK. + + (this is half a SQLAlchemy Core test and half to catch database + backends that may have unusual behavior with scalar selects.) + """ + datetable = self.tables.has_dates + s = select([datetable.alias("x").c.today]).scalar_subquery() + s2 = select([datetable.c.id, s.label("somelabel")]) + row = config.db.execute(s2).first() + + eq_( + row["somelabel"], + DatetimeWithNanoseconds(2006, 5, 12, 12, 0, 0, tzinfo=timezone.utc), + ) + + +class InsertBehaviorTest(_InsertBehaviorTest): + @pytest.mark.skip("Spanner doesn't support empty inserts") + def test_empty_insert(self): + pass + + @pytest.mark.skip("Spanner doesn't support empty inserts") + def test_empty_insert_multiple(self): + pass + + @pytest.mark.skip("Spanner doesn't support auto increment") + def test_insert_from_select_autoinc(self): + pass + + def test_autoclose_on_insert(self): + """ + SPANNER OVERRIDE: + + Cloud Spanner doesn't support tables with an auto increment primary key, + following insertions will fail with `400 id must not be NULL in table + autoinc_pk`. + + Overriding the tests and adding a manual primary key value to avoid the same + failures. + """ + if config.requirements.returning.enabled: + engine = engines.testing_engine(options={"implicit_returning": False}) + else: + engine = config.db + + with engine.begin() as conn: + r = conn.execute( + self.tables.autoinc_pk.insert(), dict(id=1, data="some data") + ) + + assert r._soft_closed + assert not r.closed + assert r.is_insert + assert not r.returns_rows + + +class BytesTest(_LiteralRoundTripFixture, fixtures.TestBase): + __backend__ = True + + def test_nolength_binary(self): + metadata = MetaData() + foo = Table("foo", metadata, Column("one", LargeBinary)) + + foo.create(config.db) + foo.drop(config.db) + + +class StringTest(_StringTest): + @pytest.mark.skip("Spanner doesn't support non-ascii characters") + def test_literal_non_ascii(self): + pass + + +class TextTest(_TextTest): + @classmethod + def define_tables(cls, metadata): + """ + SPANNER OVERRIDE: + + Cloud Spanner doesn't support auto incrementing ids feature, + which is used by the original test. Overriding the test data + creation method to disable autoincrement and make id column + nullable. + """ + Table( + "text_table", + metadata, + Column("id", Integer, primary_key=True, nullable=True), + Column("text_data", Text), + ) + + @pytest.mark.skip("Spanner doesn't support non-ascii characters") + def test_literal_non_ascii(self): + pass + + @pytest.mark.skip("Not supported by Spanner") + def test_text_roundtrip(self, connection): + pass + + @pytest.mark.skip("Not supported by Spanner") + def test_text_empty_strings(self, connection): + pass + + @pytest.mark.skip("Not supported by Spanner") + def test_text_null_strings(self, connection): + pass + + +class NumericTest(_NumericTest): + @testing.fixture + def do_numeric_test(self, metadata, connection): + @testing.emits_warning(r".*does \*not\* support Decimal objects natively") + def run(type_, input_, output, filter_=None, check_scale=False): + t = Table( + "t", + metadata, + Column("x", type_), + Column("id", Integer, primary_key=True), + ) + t.create(connection) + connection.connection.commit() + connection.execute( + t.insert(), [{"x": x, "id": i} for i, x in enumerate(input_)] + ) + + result = {row[0] for row in connection.execute(t.select())} + output = set(output) + if filter_: + result = set(filter_(x) for x in result) + output = set(filter_(x) for x in output) + eq_(result, output) + if check_scale: + eq_([str(x) for x in result], [str(x) for x in output]) + + return run + + @emits_warning(r".*does \*not\* support Decimal objects natively") + def test_render_literal_numeric(self, literal_round_trip): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + literal_round_trip( + Numeric(precision=8, scale=4), + [decimal.Decimal("15.7563")], + [decimal.Decimal("15.7563")], + ) + + @emits_warning(r".*does \*not\* support Decimal objects natively") + def test_render_literal_numeric_asfloat(self, literal_round_trip): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + literal_round_trip( + Numeric(precision=8, scale=4, asdecimal=False), + [decimal.Decimal("15.7563")], + [15.7563], + ) + + def test_render_literal_float(self, literal_round_trip): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + literal_round_trip( + Float(4), + [decimal.Decimal("15.7563")], + [15.7563], + filter_=lambda n: n is not None and round(n, 5) or None, + ) + + @requires.precision_generic_float_type + def test_float_custom_scale(self, do_numeric_test): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + do_numeric_test( + Float(None, decimal_return_scale=7, asdecimal=True), + [decimal.Decimal("15.7563827"), decimal.Decimal("15.7563827")], + [decimal.Decimal("15.7563827")], + check_scale=True, + ) + + def test_numeric_as_decimal(self, do_numeric_test): + """ + SPANNER OVERRIDE: + + Spanner throws an error 400 Value has type FLOAT64 which cannot be + inserted into column x, which has type NUMERIC for value 15.7563. + Overriding the test to remove the failure case. + """ + do_numeric_test( + Numeric(precision=8, scale=4), + [decimal.Decimal("15.7563"), decimal.Decimal("15.7563")], + [decimal.Decimal("15.7563")], + ) + + def test_numeric_as_float(self, do_numeric_test): + """ + SPANNER OVERRIDE: + + Spanner throws an error 400 Value has type FLOAT64 which cannot be + inserted into column x, which has type NUMERIC for value 15.7563. + Overriding the test to remove the failure case. + """ + do_numeric_test( + Numeric(precision=8, scale=4, asdecimal=False), + [decimal.Decimal("15.7563"), decimal.Decimal("15.7563")], + [15.7563], + ) + + @requires.floats_to_four_decimals + def test_float_as_decimal(self, do_numeric_test): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + do_numeric_test( + Float(precision=8, asdecimal=True), + [decimal.Decimal("15.7563"), decimal.Decimal("15.7563"), None], + [decimal.Decimal("15.7563"), None], + filter_=lambda n: n is not None and round(n, 4) or None, + ) + + def test_float_as_float(self, do_numeric_test): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + do_numeric_test( + Float(precision=8), + [decimal.Decimal("15.7563"), decimal.Decimal("15.7563")], + [15.7563], + filter_=lambda n: n is not None and round(n, 5) or None, + ) + + @testing.requires.literal_float_coercion + def test_float_coerce_round_trip(self, connection): + expr = 15.7563 + + val = connection.scalar(select(cast(literal(expr), FLOAT))) + eq_(val, expr) + + @requires.precision_numerics_general + def test_precision_decimal(self, do_numeric_test): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + + Remove an extra digits after decimal point as cloud spanner is + capable of representing an exact numeric value with a precision + of 38 and scale of 9. + """ + numbers = set( + [ + decimal.Decimal("54.246451650"), + decimal.Decimal("0.004354"), + decimal.Decimal("900.0"), + ] + ) + do_numeric_test(Numeric(precision=18, scale=9), numbers, numbers) + + @testing.requires.precision_numerics_enotation_large + def test_enotation_decimal_large(self, do_numeric_test): + """test exceedingly large decimals. + + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + numbers = set( + [ + decimal.Decimal("4E+8"), + decimal.Decimal("5748E+15"), + decimal.Decimal("1.521E+15"), + decimal.Decimal("000000000.1E+9"), + ] + ) + do_numeric_test(Numeric(precision=25, scale=2), numbers, numbers) + + @testing.requires.precision_numerics_enotation_large + def test_enotation_decimal(self, do_numeric_test): + """test exceedingly small decimals. + + Decimal reports values with E notation when the exponent + is greater than 6. + + SPANNER OVERRIDE: + + Remove extra digits after decimal point as Cloud Spanner is + capable of representing an exact numeric value with a precision + of 38 and scale of 9. + """ + numbers = set( + [ + decimal.Decimal("1E-2"), + decimal.Decimal("1E-3"), + decimal.Decimal("1E-4"), + decimal.Decimal("1E-5"), + decimal.Decimal("1E-6"), + decimal.Decimal("1E-7"), + decimal.Decimal("1E-8"), + decimal.Decimal("0.105940696"), + decimal.Decimal("0.005940696"), + decimal.Decimal("0.000000696"), + decimal.Decimal("0.700000696"), + decimal.Decimal("696E-9"), + ] + ) + do_numeric_test(Numeric(precision=38, scale=9), numbers, numbers) + + +class LikeFunctionsTest(_LikeFunctionsTest): + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_contains_autoescape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_contains_autoescape_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_contains_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_endswith_autoescape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_endswith_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_endswith_autoescape_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_startswith_autoescape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_startswith_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_startswith_autoescape_escape(self): + pass + + def test_escape_keyword_raises(self): + """Check that ESCAPE keyword causes an exception when used.""" + with pytest.raises(NotImplementedError): + col = self.tables.some_table.c.data + self._test(col.contains("b##cde", escape="#"), {7}) + + +@pytest.mark.skip("Spanner doesn't support IS DISTINCT FROM clause") +class IsOrIsNotDistinctFromTest(_IsOrIsNotDistinctFromTest): + pass + + +class OrderByLabelTest(_OrderByLabelTest): + @pytest.mark.skip( + "Spanner requires an alias for the GROUP BY list when specifying derived " + "columns also used in SELECT" + ) + def test_group_by_composed(self): + pass + + +class CompoundSelectTest(_CompoundSelectTest): + """ + See: https://github.com/googleapis/python-spanner/issues/347 + """ + + @pytest.mark.skip( + "Spanner DBAPI incorrectly classify the statement starting with brackets." + ) + def test_limit_offset_selectable_in_unions(self): + pass + + @pytest.mark.skip( + "Spanner DBAPI incorrectly classify the statement starting with brackets." + ) + def test_order_by_selectable_in_unions(self): + pass + + +class TestQueryHints(fixtures.TablesTest): + """ + Compile a complex query with JOIN and check that + the table hint was set into the right place. + """ + + __backend__ = True + + def test_complex_query_table_hints(self): + EXPECTED_QUERY = ( + "SELECT users.id, users.name \nFROM users @{FORCE_INDEX=table_1_by_int_idx}" + " JOIN addresses ON users.id = addresses.user_id " + "\nWHERE users.name IN (__[POSTCOMPILE_name_1])" + ) + + Base = declarative_base() + engine = create_engine( + "spanner:///projects/project-id/instances/instance-id/databases/database-id" + ) + + class User(Base): + __tablename__ = "users" + id = Column(Integer, primary_key=True) + name = Column(String(50)) + addresses = relation("Address", backref="user") + + class Address(Base): + __tablename__ = "addresses" + id = Column(Integer, primary_key=True) + email = Column(String(50)) + user_id = Column(Integer, ForeignKey("users.id")) + + session = Session(engine) + + query = session.query(User) + query = query.with_hint( + selectable=User, text="@{FORCE_INDEX=table_1_by_int_idx}" + ) + + query = query.filter(User.name.in_(["val1", "val2"])) + query = query.join(Address) + + assert str(query.statement.compile(session.bind)) == EXPECTED_QUERY + + +class InterleavedTablesTest(fixtures.TestBase): + """ + Check that CREATE TABLE statements for interleaved tables are correctly + generated. + """ + + def setUp(self): + self._engine = create_engine( + "spanner:///projects/appdev-soda-spanner-staging/instances/" + "sqlalchemy-dialect-test/databases/compliance-test" + ) + self._metadata = MetaData(bind=self._engine) + + def test_interleave(self): + EXP_QUERY = ( + "\nCREATE TABLE client (\n\tteam_id INT64 NOT NULL, " + "\n\tclient_id INT64 NOT NULL, " + "\n\tclient_name STRING(16) NOT NULL" + "\n) PRIMARY KEY (team_id, client_id)," + "\nINTERLEAVE IN PARENT team\n\n" + ) + client = Table( + "client", + self._metadata, + Column("team_id", Integer, primary_key=True), + Column("client_id", Integer, primary_key=True), + Column("client_name", String(16), nullable=False), + spanner_interleave_in="team", + ) + with mock.patch("google.cloud.spanner_dbapi.cursor.Cursor.execute") as execute: + client.create(self._engine) + execute.assert_called_once_with(EXP_QUERY, []) + + def test_interleave_on_delete_cascade(self): + EXP_QUERY = ( + "\nCREATE TABLE client (\n\tteam_id INT64 NOT NULL, " + "\n\tclient_id INT64 NOT NULL, " + "\n\tclient_name STRING(16) NOT NULL" + "\n) PRIMARY KEY (team_id, client_id)," + "\nINTERLEAVE IN PARENT team ON DELETE CASCADE\n\n" + ) + client = Table( + "client", + self._metadata, + Column("team_id", Integer, primary_key=True), + Column("client_id", Integer, primary_key=True), + Column("client_name", String(16), nullable=False), + spanner_interleave_in="team", + spanner_interleave_on_delete_cascade=True, + ) + with mock.patch("google.cloud.spanner_dbapi.cursor.Cursor.execute") as execute: + client.create(self._engine) + execute.assert_called_once_with(EXP_QUERY, []) + + +class UserAgentTest(fixtures.TestBase): + """Check that SQLAlchemy dialect uses correct user agent.""" + + def setUp(self): + self._engine = create_engine( + "spanner:///projects/appdev-soda-spanner-staging/instances/" + "sqlalchemy-dialect-test/databases/compliance-test" + ) + self._metadata = MetaData(bind=self._engine) + + def test_user_agent(self): + with self._engine.connect() as connection: + assert ( + connection.connection.instance._client._client_info.user_agent + == f"gl-sqlalchemy-spanner/{sqlalchemy_spanner_version.__version__}" + ) + + +class SimpleUpdateDeleteTest(_SimpleUpdateDeleteTest): + """ + SPANNER OVERRIDE: + + Spanner doesn't support `rowcount` property. These + test cases overrides omit `rowcount` checks. + """ + + def test_delete(self, connection): + t = self.tables.plain_pk + r = connection.execute(t.delete().where(t.c.id == 2)) + assert not r.is_insert + assert not r.returns_rows + eq_( + connection.execute(t.select().order_by(t.c.id)).fetchall(), + [(1, "d1"), (3, "d3")], + ) + + def test_update(self, connection): + t = self.tables.plain_pk + r = connection.execute(t.update().where(t.c.id == 2), dict(data="d2_new")) + assert not r.is_insert + assert not r.returns_rows + + eq_( + connection.execute(t.select().order_by(t.c.id)).fetchall(), + [(1, "d1"), (2, "d2_new"), (3, "d3")], + ) + + +class HasIndexTest(_HasIndexTest): + @classmethod + def define_tables(cls, metadata): + tt = Table( + "test_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), + ) + sqlalchemy.Index("my_idx", tt.c.data) + + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_index_schema(self): + pass + + +class HasTableTest(_HasTableTest): + @classmethod + def define_tables(cls, metadata): + Table( + "test_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), + ) + + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_table_schema(self): + pass + + @testing.requires.views + def test_has_table_view(self, connection): + pass + + @testing.requires.views + def test_has_table_view_schema(self, connection): + pass + + +class PostCompileParamsTest(_PostCompileParamsTest): + def test_execute(self): + table = self.tables.some_table + + stmt = select(table.c.id).where( + table.c.x == sqlalchemy.bindparam("q", literal_execute=True) + ) + + with self.sql_execution_asserter() as asserter: + with config.db.connect() as conn: + conn.execute(stmt, dict(q=10)) + + asserter.assert_( + sqlalchemy.testing.assertsql.CursorSQL( + "SELECT some_table.id \nFROM some_table " "\nWHERE some_table.x = 10", + [] if config.db.dialect.positional else {}, + ) + ) + + def test_execute_expanding_plus_literal_execute(self): + table = self.tables.some_table + + stmt = select(table.c.id).where( + table.c.x.in_( + sqlalchemy.bindparam("q", expanding=True, literal_execute=True) + ) + ) + + with self.sql_execution_asserter() as asserter: + with config.db.connect() as conn: + conn.execute(stmt, dict(q=[5, 6, 7])) + + asserter.assert_( + sqlalchemy.testing.assertsql.CursorSQL( + "SELECT some_table.id \nFROM some_table " + "\nWHERE some_table.x IN (5, 6, 7)", + [] if config.db.dialect.positional else {}, + ) + ) + + @testing.requires.tuple_in + def test_execute_tuple_expanding_plus_literal_execute(self): + table = self.tables.some_table + + stmt = select(table.c.id).where( + sqlalchemy.tuple_(table.c.x, table.c.y).in_( + sqlalchemy.bindparam("q", expanding=True, literal_execute=True) + ) + ) + + with self.sql_execution_asserter() as asserter: + with config.db.connect() as conn: + conn.execute(stmt, dict(q=[(5, 10), (12, 18)])) + + asserter.assert_( + sqlalchemy.testing.assertsql.CursorSQL( + "SELECT some_table.id \nFROM some_table " + "\nWHERE (some_table.x, some_table.y) " + "IN (%s(5, 10), (12, 18))" + % ("VALUES " if config.db.dialect.tuple_in_values else ""), + () if config.db.dialect.positional else {}, + ) + ) + + @testing.requires.tuple_in + def test_execute_tuple_expanding_plus_literal_heterogeneous_execute(self): + table = self.tables.some_table + + stmt = select(table.c.id).where( + sqlalchemy.tuple_(table.c.x, table.c.z).in_( + sqlalchemy.bindparam("q", expanding=True, literal_execute=True) + ) + ) + + with self.sql_execution_asserter() as asserter: + with config.db.connect() as conn: + conn.execute(stmt, dict(q=[(5, "z1"), (12, "z3")])) + + asserter.assert_( + sqlalchemy.testing.assertsql.CursorSQL( + "SELECT some_table.id \nFROM some_table " + "\nWHERE (some_table.x, some_table.z) " + "IN (%s(5, 'z1'), (12, 'z3'))" + % ("VALUES " if config.db.dialect.tuple_in_values else ""), + () if config.db.dialect.positional else {}, + ) + ) + + +class ComputedReflectionFixtureTest(_ComputedReflectionFixtureTest): + @classmethod + def define_tables(cls, metadata): + """SPANNER OVERRIDE: + + Avoid using default values for computed columns. + """ + Table( + "computed_default_table", + metadata, + Column("id", Integer, primary_key=True), + Column("normal", Integer), + Column("computed_col", Integer, Computed("normal + 42")), + Column("with_default", Integer), + ) + + t = Table( + "computed_column_table", + metadata, + Column("id", Integer, primary_key=True), + Column("normal", Integer), + Column("computed_no_flag", Integer, Computed("normal + 42")), + ) + + if testing.requires.computed_columns_virtual.enabled: + t.append_column( + Column( + "computed_virtual", + Integer, + Computed("normal + 2", persisted=False), + ) + ) + if testing.requires.computed_columns_stored.enabled: + t.append_column( + Column( + "computed_stored", + Integer, + Computed("normal - 42", persisted=True), + ) + ) + + +class ComputedReflectionTest(_ComputedReflectionTest, ComputedReflectionFixtureTest): + @testing.requires.schemas + def test_get_column_returns_persisted_with_schema(self): + insp = inspect(config.db) + + cols = insp.get_columns("computed_column_table", schema=config.test_schema) + data = {c["name"]: c for c in cols} + + self.check_column( + data, + "computed_no_flag", + "normal+42", + testing.requires.computed_columns_default_persisted.enabled, + ) + if testing.requires.computed_columns_virtual.enabled: + self.check_column( + data, + "computed_virtual", + "normal/2", + False, + ) + if testing.requires.computed_columns_stored.enabled: + self.check_column( + data, + "computed_stored", + "normal-42", + True, + ) + + @pytest.mark.skip("Default values are not supported.") + def test_computed_col_default_not_set(self): + pass + + def test_get_column_returns_computed(self): + """ + SPANNER OVERRIDE: + + In Spanner all the generated columns are STORED, + meaning there are no persisted and not persisted + (in the terms of the SQLAlchemy) columns. The + method override omits the persistence reflection checks. + """ + insp = inspect(config.db) + + cols = insp.get_columns("computed_default_table") + data = {c["name"]: c for c in cols} + for key in ("id", "normal", "with_default"): + is_true("computed" not in data[key]) + compData = data["computed_col"] + is_true("computed" in compData) + is_true("sqltext" in compData["computed"]) + eq_(self.normalize(compData["computed"]["sqltext"]), "normal+42") + + def test_create_not_null_computed_column(self): + """ + SPANNER TEST: + + Check that on creating a computed column with a NOT NULL + clause the clause is set in front of the computed column + statement definition and doesn't cause failures. + """ + engine = create_engine(get_db_url()) + metadata = MetaData(bind=engine) + + Table( + "Singers", + metadata, + Column("SingerId", String(36), primary_key=True, nullable=False), + Column("FirstName", String(200)), + Column("LastName", String(200), nullable=False), + Column( + "FullName", + String(400), + Computed("COALESCE(FirstName || ' ', '') || LastName"), + nullable=False, + ), + ) + + metadata.create_all(engine) + + +@pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" +) +class JSONTest(_JSONTest): + @pytest.mark.skip("Values without keys are not supported.") + def test_single_element_round_trip(self, element): + pass + + def _test_round_trip(self, data_element, connection): + data_table = self.tables.data_table + + config.db.execute( + data_table.insert(), + {"id": random.randint(1, 100000000), "name": "row1", "data": data_element}, + ) + + row = config.db.execute(select([data_table.c.data])).first() + + eq_(row, (data_element,)) + + def test_unicode_round_trip(self): + # note we include Unicode supplementary characters as well + with config.db.connect() as conn: + conn.execute( + self.tables.data_table.insert(), + { + "id": random.randint(1, 100000000), + "name": "r1", + "data": { + util.u("réve🐍 illé"): util.u("réve🐍 illé"), + "data": {"k1": util.u("drôl🐍e")}, + }, + }, + ) + + eq_( + conn.scalar(select([self.tables.data_table.c.data])), + { + util.u("réve🐍 illé"): util.u("réve🐍 illé"), + "data": {"k1": util.u("drôl🐍e")}, + }, + ) + + @pytest.mark.skip("Parameterized types are not supported.") + def test_eval_none_flag_orm(self): + pass + + @pytest.mark.skip( + "Spanner JSON_VALUE() always returns STRING," + "thus, this test case can't be executed." + ) + def test_index_typed_comparison(self): + pass + + @pytest.mark.skip( + "Spanner JSON_VALUE() always returns STRING," + "thus, this test case can't be executed." + ) + def test_path_typed_comparison(self): + pass + + @pytest.mark.skip("Custom JSON de-/serializers are not supported.") + def test_round_trip_custom_json(self): + pass + + def _index_fixtures(fn): + fn = testing.combinations( + ("boolean", True), + ("boolean", False), + ("boolean", None), + ("string", "some string"), + ("string", None), + ("integer", 15), + ("integer", 1), + ("integer", 0), + ("integer", None), + ("float", 28.5), + ("float", None), + id_="sa", + )(fn) + return fn + + @_index_fixtures + def test_index_typed_access(self, datatype, value): + data_table = self.tables.data_table + data_element = {"key1": value} + with config.db.connect() as conn: + conn.execute( + data_table.insert(), + { + "id": random.randint(1, 100000000), + "name": "row1", + "data": data_element, + "nulldata": data_element, + }, + ) + + expr = data_table.c.data["key1"] + expr = getattr(expr, "as_%s" % datatype)() + + roundtrip = conn.scalar(select([expr])) + if roundtrip in ("true", "false", None): + roundtrip = str(roundtrip).capitalize() + + eq_(str(roundtrip), str(value)) + + @pytest.mark.skip( + "Spanner doesn't support type casts inside JSON_VALUE() function." + ) + def test_round_trip_json_null_as_json_null(self): + pass + + @pytest.mark.skip( + "Spanner doesn't support type casts inside JSON_VALUE() function." + ) + def test_round_trip_none_as_json_null(self): + pass + + @pytest.mark.skip( + "Spanner doesn't support type casts inside JSON_VALUE() function." + ) + def test_round_trip_none_as_sql_null(self): + pass + + +class ExecutionOptionsRequestPriorotyTest(fixtures.TestBase): + def setUp(self): + self._engine = create_engine(get_db_url(), pool_size=1) + metadata = MetaData(bind=self._engine) + + self._table = Table( + "execution_options2", + metadata, + Column("opt_id", Integer, primary_key=True), + Column("opt_name", String(16), nullable=False), + ) + + metadata.create_all(self._engine) + time.sleep(1) + + def test_request_priority(self): + PRIORITY = RequestOptions.Priority.PRIORITY_MEDIUM + with self._engine.connect().execution_options( + request_priority=PRIORITY + ) as connection: + connection.execute(select(["*"], from_obj=self._table)).fetchall() + + with self._engine.connect() as connection: + assert connection.connection.request_priority is None + + engine = create_engine("sqlite:///database") + with engine.connect() as connection: + pass + + +class CreateEngineWithClientObjectTest(fixtures.TestBase): + def test_create_engine_w_valid_client_object(self): + """ + SPANNER TEST: + + Check that we can connect to SqlAlchemy + by passing custom Client object. + """ + client = Client(project=get_project()) + engine = create_engine(get_db_url(), connect_args={"client": client}) + with engine.connect() as connection: + assert connection.connection.instance._client == client + + def test_create_engine_w_invalid_client_object(self): + """ + SPANNER TEST: + + Check that if project id in url and custom Client + Object passed to enginer mismatch, error is thrown. + """ + client = Client(project="project_id") + engine = create_engine(get_db_url(), connect_args={"client": client}) + + with pytest.raises(ValueError): + engine.connect() + + +class CreateEngineWithoutDatabaseTest(fixtures.TestBase): + def test_create_engine_wo_database(self): + """ + SPANNER TEST: + + Check that we can connect to SqlAlchemy + without passing database id in the + connection URL. + """ + engine = create_engine(get_db_url().split("/database")[0]) + with engine.connect() as connection: + assert connection.connection.database is None + + +class ReturningTest(fixtures.TestBase): + def setUp(self): + self._engine = create_engine(get_db_url()) + metadata = MetaData() + + self._table = Table( + "returning_test", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(16), nullable=False), + ) + + metadata.create_all(self._engine) + + def test_returning_for_insert_and_update(self): + random_id = random.randint(1, 1000) + with self._engine.begin() as connection: + stmt = ( + self._table.insert() + .values(id=random_id, data="some % value") + .returning(self._table.c.id) + ) + row = connection.execute(stmt).fetchall() + eq_( + row, + [(random_id,)], + ) + + with self._engine.begin() as connection: + update_text = "some + value" + stmt = ( + self._table.update() + .values(data=update_text) + .where(self._table.c.id == random_id) + .returning(self._table.c.data) + ) + row = connection.execute(stmt).fetchall() + eq_( + row, + [(update_text,)], + ) + + +@pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" +) +class SequenceTest(_SequenceTest): + @classmethod + def define_tables(cls, metadata): + Table( + "seq_pk", + metadata, + Column( + "id", + Integer, + sqlalchemy.Sequence("tab_id_seq"), + primary_key=True, + ), + Column("data", String(50)), + ) + + Table( + "seq_opt_pk", + metadata, + Column( + "id", + Integer, + sqlalchemy.Sequence("tab_id_seq_opt", data_type=Integer, optional=True), + primary_key=True, + ), + Column("data", String(50)), + ) + + Table( + "seq_no_returning", + metadata, + Column( + "id", + Integer, + sqlalchemy.Sequence("noret_id_seq"), + primary_key=True, + ), + Column("data", String(50)), + implicit_returning=False, + ) + + def test_insert_lastrowid(self, connection): + r = connection.execute(self.tables.seq_pk.insert(), dict(data="some data")) + assert len(r.inserted_primary_key) == 1 + is_instance_of(r.inserted_primary_key[0], int) + + def test_nextval_direct(self, connection): + r = connection.execute(self.tables.seq_pk.c.id.default) + is_instance_of(r, int) + + def _assert_round_trip(self, table, conn): + row = conn.execute(table.select()).first() + id, name = row + is_instance_of(id, int) + eq_(name, "some data") + + @testing.combinations((True,), (False,), argnames="implicit_returning") + @testing.requires.schemas + @pytest.mark.skip("Spanner doesn't support user defined schemas") + def test_insert_roundtrip_translate(self, connection, implicit_returning): + pass + + @testing.requires.schemas + @pytest.mark.skip("Spanner doesn't support user defined schemas") + def test_nextval_direct_schema_translate(self, connection): + pass + + +@pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" +) +class HasSequenceTest(_HasSequenceTest): + @classmethod + def define_tables(cls, metadata): + sqlalchemy.Sequence("user_id_seq", metadata=metadata) + sqlalchemy.Sequence( + "other_seq", metadata=metadata, nomaxvalue=True, nominvalue=True + ) + Table( + "user_id_table", + metadata, + Column("id", Integer, primary_key=True), + ) + + @testing.requires.schemas + @pytest.mark.skip("Spanner doesn't support user defined schemas") + def test_has_sequence_schema(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Spanner doesn't support user defined schemas") + def test_has_sequence_schemas_neg(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Spanner doesn't support user defined schemas") + def test_has_sequence_default_not_in_remote(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Spanner doesn't support user defined schemas") + def test_has_sequence_remote_not_in_default(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Spanner doesn't support user defined schemas") + def test_get_sequence_names_no_sequence_schema(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Spanner doesn't support user defined schemas") + def test_get_sequence_names_sequences_schema(self, connection): + pass + + +@pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" +) +class HasSequenceTestEmpty(_HasSequenceTestEmpty): + def test_get_sequence_names_no_sequence(self, connection): + super().test_get_sequence_names_no_sequence(connection) diff --git a/packages/sqlalchemy-spanner/test/test_suite_20.py b/packages/sqlalchemy-spanner/test/test_suite_20.py new file mode 100644 index 000000000000..d74ac9f5e2de --- /dev/null +++ b/packages/sqlalchemy-spanner/test/test_suite_20.py @@ -0,0 +1,3321 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import timezone +import decimal +import operator +import os +import pytest +import random +import time +from unittest import mock + +from google.cloud.spanner_v1 import RequestOptions, Client +import sqlalchemy +from sqlalchemy import create_engine, literal, FLOAT +from sqlalchemy.engine import Inspector +from sqlalchemy import inspect +from sqlalchemy import testing +from sqlalchemy import ForeignKey +from sqlalchemy import MetaData +from sqlalchemy.engine import ObjectKind +from sqlalchemy.engine import ObjectScope +from sqlalchemy.schema import DDL +from sqlalchemy.schema import Computed +from sqlalchemy.testing import config +from sqlalchemy.testing import engines +from sqlalchemy.testing import eq_ +from sqlalchemy.testing import is_instance_of +from sqlalchemy.testing import provide_metadata, emits_warning +from sqlalchemy.testing import is_true +from sqlalchemy.testing import fixtures +from sqlalchemy.testing.provision import temp_table_keyword_args +from sqlalchemy.testing.schema import Column +from sqlalchemy.testing.schema import Table +from sqlalchemy import literal_column +from sqlalchemy import select +from sqlalchemy import util +from sqlalchemy import union +from sqlalchemy import event +from sqlalchemy import exists +from sqlalchemy import Boolean +from sqlalchemy import Float +from sqlalchemy import LargeBinary +from sqlalchemy import String +from sqlalchemy.sql.expression import cast +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import relationship +from sqlalchemy.orm import Session +from sqlalchemy.types import Integer +from sqlalchemy.types import Numeric + +from sqlalchemy.types import Text +from sqlalchemy.testing import requires +from sqlalchemy import Index +from sqlalchemy import types +from sqlalchemy.testing.fixtures import ( + ComputedReflectionFixtureTest as _ComputedReflectionFixtureTest, +) + +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +from google.cloud import spanner_dbapi + +from sqlalchemy.testing.suite.test_cte import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_ddl import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_dialect import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_insert import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_reflection import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_deprecations import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_results import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_select import ( + BitwiseTest as _BitwiseTest, +) # noqa: F401, F403 +from sqlalchemy.testing.suite.test_sequence import ( + SequenceTest as _SequenceTest, + HasSequenceTest as _HasSequenceTest, + HasSequenceTestEmpty as _HasSequenceTestEmpty, +) # noqa: F401, F403 +from sqlalchemy.testing.suite.test_unicode_ddl import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_update_delete import * # noqa: F401, F403 +from sqlalchemy.testing.suite.test_cte import CTETest as _CTETest +from sqlalchemy.testing.suite.test_ddl import TableDDLTest as _TableDDLTest +from sqlalchemy.testing.suite.test_ddl import ( + FutureTableDDLTest as _FutureTableDDLTest, + LongNameBlowoutTest as _LongNameBlowoutTest, +) +from sqlalchemy.testing.suite.test_update_delete import ( + SimpleUpdateDeleteTest as _SimpleUpdateDeleteTest, +) +from sqlalchemy.testing.suite.test_dialect import ( + DifficultParametersTest as _DifficultParametersTest, + EscapingTest as _EscapingTest, + ReturningGuardsTest as _ReturningGuardsTest, +) +from sqlalchemy.testing.suite.test_insert import ( + InsertBehaviorTest as _InsertBehaviorTest, +) +from sqlalchemy.testing.suite.test_select import ( # noqa: F401, F403 + CompoundSelectTest as _CompoundSelectTest, + ExistsTest as _ExistsTest, + FetchLimitOffsetTest as _FetchLimitOffsetTest, + IdentityAutoincrementTest as _IdentityAutoincrementTest, + IsOrIsNotDistinctFromTest as _IsOrIsNotDistinctFromTest, + LikeFunctionsTest as _LikeFunctionsTest, + OrderByLabelTest as _OrderByLabelTest, + PostCompileParamsTest as _PostCompileParamsTest, + SameNamedSchemaTableTest as _SameNamedSchemaTableTest, +) +from sqlalchemy.testing.suite.test_reflection import ( # noqa: F401, F403 + ComponentReflectionTestExtra as _ComponentReflectionTestExtra, + QuotedNameArgumentTest as _QuotedNameArgumentTest, + ComponentReflectionTest as _ComponentReflectionTest, + CompositeKeyReflectionTest as _CompositeKeyReflectionTest, + ComputedReflectionTest as _ComputedReflectionTest, + HasIndexTest as _HasIndexTest, + HasTableTest as _HasTableTest, +) +from sqlalchemy.testing.suite.test_results import ( + RowFetchTest as _RowFetchTest, +) +from sqlalchemy.testing.suite.test_types import ( # noqa: F401, F403 + BooleanTest as _BooleanTest, + DateTest as _DateTest, + _DateFixture as __DateFixture, + DateTimeHistoricTest, + DateTimeCoercedToDateTimeTest as _DateTimeCoercedToDateTimeTest, + DateTimeMicrosecondsTest as _DateTimeMicrosecondsTest, + DateTimeTest as _DateTimeTest, + IntegerTest as _IntegerTest, + JSONTest as _JSONTest, + _LiteralRoundTripFixture, + NumericTest as _NumericTest, + StringTest as _StringTest, + TextTest as _TextTest, + TimeTest as _TimeTest, + TimeMicrosecondsTest as _TimeMicrosecondsTest, + TimestampMicrosecondsTest, + UnicodeVarcharTest as _UnicodeVarcharTest, + UnicodeTextTest as _UnicodeTextTest, + _UnicodeFixture as __UnicodeFixture, +) # noqa: F401, F403 +from test._helpers import ( + get_db_url, + get_project, +) + +from google.cloud.sqlalchemy_spanner import version as sqlalchemy_spanner_version + + +config.test_schema = "" + + +class BooleanTest(_BooleanTest): + @pytest.mark.skip( + "The original test case was split into 2 parts: " + "test_render_literal_bool_true and test_render_literal_bool_false" + ) + def test_render_literal_bool(self): + pass + + def test_render_literal_bool_true(self, literal_round_trip_spanner): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + literal_round_trip_spanner(Boolean(), [True], [True]) + + def test_render_literal_bool_false(self, literal_round_trip_spanner): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + literal_round_trip_spanner(Boolean(), [False], [False]) + + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_whereclause(self): + pass + + +class BitwiseTest(_BitwiseTest): + @pytest.mark.skip("Causes too many problems with other tests") + def test_bitwise(self, case, expected, connection): + pass + + +class ComponentReflectionTestExtra(_ComponentReflectionTestExtra): + @testing.requires.table_reflection + def test_nullable_reflection(self, connection, metadata): + t = Table( + "t", + metadata, + Column("a", Integer, nullable=True), + Column("b", Integer, nullable=False), + ) + t.create(connection) + connection.connection.commit() + eq_( + dict( + (col["name"], col["nullable"]) + for col in inspect(connection).get_columns("t") + ), + {"a": True, "b": False}, + ) + + def _type_round_trip(self, connection, metadata, *types): + t = Table( + "t", metadata, *[Column("t%d" % i, type_) for i, type_ in enumerate(types)] + ) + t.create(connection) + connection.connection.commit() + + return [c["type"] for c in inspect(connection).get_columns("t")] + + @testing.requires.table_reflection + def test_numeric_reflection(self, connection, metadata): + """ + SPANNER OVERRIDE: + + Spanner defines NUMERIC type with the constant precision=38 + and scale=9. Overriding the test to check if the NUMERIC + column is successfully created and has dimensions + correct for Cloud Spanner. + """ + for typ in self._type_round_trip(connection, metadata, Numeric(18, 5)): + assert isinstance(typ, Numeric) + eq_(typ.precision, 38) + eq_(typ.scale, 9) + + @testing.requires.table_reflection + def test_binary_reflection(self, connection, metadata): + """ + Check that a BYTES column with an explicitly + set size is correctly reflected. + """ + for typ in self._type_round_trip(connection, metadata, LargeBinary(20)): + assert isinstance(typ, LargeBinary) + eq_(typ.length, 20) + + @testing.requires.table_reflection + def test_string_length_reflection(self, connection, metadata): + typ = self._type_round_trip(connection, metadata, types.String(52))[0] + assert isinstance(typ, types.String) + + +class ComputedReflectionFixtureTest(_ComputedReflectionFixtureTest): + @classmethod + def define_tables(cls, metadata): + """SPANNER OVERRIDE: + + Avoid using default values for computed columns. + """ + Table( + "computed_default_table", + metadata, + Column("id", Integer, primary_key=True), + Column("normal", Integer), + Column("computed_col", Integer, Computed("normal + 42")), + Column("with_default", Integer), + ) + + t = Table( + "computed_column_table", + metadata, + Column("id", Integer, primary_key=True), + Column("normal", Integer), + Column("computed_no_flag", Integer, Computed("normal + 42")), + ) + + if testing.requires.computed_columns_virtual.enabled: + t.append_column( + Column( + "computed_virtual", + Integer, + Computed("normal + 2", persisted=False), + ) + ) + if testing.requires.computed_columns_stored.enabled: + t.append_column( + Column( + "computed_stored", + Integer, + Computed("normal - 42", persisted=True), + ) + ) + + +class ComputedReflectionTest(_ComputedReflectionTest, ComputedReflectionFixtureTest): + @testing.requires.schemas + def test_get_column_returns_persisted_with_schema(self): + insp = inspect(config.db) + + cols = insp.get_columns("computed_column_table", schema=config.test_schema) + data = {c["name"]: c for c in cols} + + self.check_column( + data, + "computed_no_flag", + "normal+42", + testing.requires.computed_columns_default_persisted.enabled, + ) + if testing.requires.computed_columns_virtual.enabled: + self.check_column( + data, + "computed_virtual", + "normal/2", + False, + ) + if testing.requires.computed_columns_stored.enabled: + self.check_column( + data, + "computed_stored", + "normal-42", + True, + ) + + @pytest.mark.skip("Default values are not supported.") + def test_computed_col_default_not_set(self): + pass + + def test_get_column_returns_computed(self): + """ + SPANNER OVERRIDE: + + In Spanner all the generated columns are STORED, + meaning there are no persisted and not persisted + (in the terms of the SQLAlchemy) columns. The + method override omits the persistence reflection checks. + """ + insp = inspect(config.db) + + cols = insp.get_columns("computed_default_table") + data = {c["name"]: c for c in cols} + for key in ("id", "normal", "with_default"): + is_true("computed" not in data[key]) + compData = data["computed_col"] + is_true("computed" in compData) + is_true("sqltext" in compData["computed"]) + eq_(self.normalize(compData["computed"]["sqltext"]), "normal+42") + + def test_create_not_null_computed_column(self, connection): + """ + SPANNER TEST: + + Check that on creating a computed column with a NOT NULL + clause the clause is set in front of the computed column + statement definition and doesn't cause failures. + """ + metadata = MetaData() + + Table( + "Singers", + metadata, + Column("SingerId", String(36), primary_key=True, nullable=False), + Column("FirstName", String(200)), + Column("LastName", String(200), nullable=False), + Column( + "FullName", + String(400), + Computed("COALESCE(FirstName || ' ', '') || LastName"), + nullable=False, + ), + ) + + metadata.create_all(connection) + + +class ComponentReflectionTest(_ComponentReflectionTest): + @pytest.mark.skip("Skip") + def test_not_existing_table(self, method, connection): + pass + + @classmethod + def define_tables(cls, metadata): + cls.define_reflected_tables(metadata, None) + + @classmethod + def define_views(cls, metadata, schema): + table_info = { + "dingalings": [ + "dingaling_id", + "address_id", + "data", + "id_user", + ], + "users": ["user_id", "test1", "test2"], + "email_addresses": ["address_id", "remote_user_id", "email_address"], + } + if testing.requires.self_referential_foreign_keys.enabled: + table_info["users"] = table_info["users"] + ["parent_user_id"] + if testing.requires.materialized_views.enabled: + materialized = {"dingalings"} + else: + materialized = set() + for table_name in ("users", "email_addresses", "dingalings"): + fullname = table_name + if schema: + fullname = f"{schema}.{table_name}" + view_name = fullname + "_v" + prefix = "MATERIALIZED " if table_name in materialized else "" + columns = "" + for column in table_info[table_name]: + stmt = table_name + "." + column + " AS " + column + if columns: + columns = columns + ", " + stmt + else: + columns = stmt + query = f"""CREATE {prefix}VIEW {view_name} + SQL SECURITY INVOKER + AS SELECT {columns} + FROM {fullname}""" + + event.listen(metadata, "after_create", DDL(query)) + if table_name in materialized: + index_name = "mat_index" + if schema and testing.against("oracle"): + index_name = f"{schema}.{index_name}" + idx = f"CREATE INDEX {index_name} ON {view_name}(data)" + event.listen(metadata, "after_create", DDL(idx)) + event.listen(metadata, "before_drop", DDL(f"DROP {prefix}VIEW {view_name}")) + + @classmethod + def define_reflected_tables(cls, metadata, schema): + if schema: + schema_prefix = schema + "." + else: + schema_prefix = "" + + if testing.requires.self_referential_foreign_keys.enabled: + users = Table( + "users", + metadata, + Column("user_id", sqlalchemy.INT, primary_key=True), + Column("test1", sqlalchemy.CHAR(5), nullable=False), + Column("test2", sqlalchemy.Float(5), nullable=False), + Column( + "parent_user_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey( + "%susers.user_id" % schema_prefix, name="user_id_fk" + ), + ), + schema=schema, + test_needs_fk=True, + ) + else: + users = Table( + "users", + metadata, + Column("user_id", sqlalchemy.INT, primary_key=True), + Column("test1", sqlalchemy.CHAR(5), nullable=False), + Column("test2", sqlalchemy.Float(5), nullable=False), + schema=schema, + test_needs_fk=True, + ) + + Table( + "dingalings", + metadata, + Column("dingaling_id", sqlalchemy.Integer, primary_key=True), + Column( + "address_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey("%semail_addresses.address_id" % schema_prefix), + ), + Column( + "id_user", + sqlalchemy.Integer, + sqlalchemy.ForeignKey("%susers.user_id" % schema_prefix), + ), + Column("data", sqlalchemy.String(30)), + schema=schema, + test_needs_fk=True, + ) + Table( + "email_addresses", + metadata, + Column("address_id", sqlalchemy.Integer, primary_key=True), + Column( + "remote_user_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey(users.c.user_id), + ), + Column("email_address", sqlalchemy.String(20)), + sqlalchemy.PrimaryKeyConstraint("address_id", name="email_ad_pk"), + schema=schema, + test_needs_fk=True, + ) + Table( + "comment_test", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True, comment="id comment"), + Column("data", sqlalchemy.String(20), comment="data % comment"), + Column( + "d2", + sqlalchemy.String(20), + comment=r"""Comment types type speedily ' " \ '' Fun!""", + ), + schema=schema, + comment=r"""the test % ' " \ table comment""", + ) + Table( + "no_constraints", + metadata, + Column("data", sqlalchemy.String(20)), + schema=schema, + ) + + if testing.requires.cross_schema_fk_reflection.enabled: + if schema is None: + Table( + "local_table", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column("data", sqlalchemy.String(20)), + Column( + "remote_id", + ForeignKey("%s.remote_table_2.id" % testing.config.test_schema), + ), + test_needs_fk=True, + schema=config.db.dialect.default_schema_name, + ) + else: + Table( + "remote_table", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column( + "local_id", + ForeignKey( + "%s.local_table.id" % config.db.dialect.default_schema_name + ), + ), + Column("data", sqlalchemy.String(20)), + schema=schema, + test_needs_fk=True, + ) + Table( + "remote_table_2", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column("data", sqlalchemy.String(20)), + schema=schema, + test_needs_fk=True, + ) + + if testing.requires.index_reflection.enabled: + sqlalchemy.Index("users_t_idx", users.c.test1, users.c.test2, unique=True) + sqlalchemy.Index( + "users_all_idx", users.c.user_id, users.c.test2, users.c.test1 + ) + + if not schema: + # test_needs_fk is at the moment to force MySQL InnoDB + noncol_idx_test_nopk = Table( + "noncol_idx_test_nopk", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column("q", sqlalchemy.String(5)), + test_needs_fk=True, + extend_existing=True, + ) + + noncol_idx_test_pk = Table( + "noncol_idx_test_pk", + metadata, + Column("id", sqlalchemy.Integer, primary_key=True), + Column("q", sqlalchemy.String(5)), + test_needs_fk=True, + extend_existing=True, + ) + + if testing.requires.indexes_with_ascdesc.enabled: + sqlalchemy.Index("noncol_idx_nopk", noncol_idx_test_nopk.c.q.desc()) + sqlalchemy.Index("noncol_idx_pk", noncol_idx_test_pk.c.q.desc()) + + if testing.requires.view_column_reflection.enabled and not bool( + os.environ.get("SPANNER_EMULATOR_HOST") + ): + cls.define_views(metadata, schema) + + @testing.combinations( + (False, False), + (False, True, testing.requires.schemas), + (True, False, testing.requires.view_reflection), + ( + True, + True, + testing.requires.schemas + testing.requires.view_reflection, + ), + argnames="use_views,use_schema", + ) + def test_get_columns(self, connection, use_views, use_schema): + if use_views and bool(os.environ.get("SPANNER_EMULATOR_HOST")): + pytest.skip("Skipped on emulator") + + schema = None + + users, addresses = (self.tables.users, self.tables.email_addresses) + if use_views: + table_names = ["users_v", "email_addresses_v", "dingalings_v"] + else: + table_names = ["users", "email_addresses"] + + insp = inspect(connection) + for table_name, table in zip(table_names, (users, addresses)): + schema_name = schema + cols = insp.get_columns(table_name, schema=schema_name) + is_true(len(cols) > 0, len(cols)) + + # should be in order + + for i, col in enumerate(table.columns): + eq_(col.name, cols[i]["name"]) + ctype = cols[i]["type"].__class__ + ctype_def = col.type + if isinstance(ctype_def, sqlalchemy.types.TypeEngine): + ctype_def = ctype_def.__class__ + + # Oracle returns Date for DateTime. + + if testing.against("oracle") and ctype_def in ( + types.Date, + types.DateTime, + ): + ctype_def = types.Date + + # assert that the desired type and return type share + # a base within one of the generic types. + + is_true( + len( + set(ctype.__mro__) + .intersection(ctype_def.__mro__) + .intersection( + [ + types.Integer, + types.Numeric, + types.DateTime, + types.Date, + types.Time, + types.String, + types._Binary, + ] + ) + ) + > 0, + "%s(%s), %s(%s)" % (col.name, col.type, cols[i]["name"], ctype), + ) + + if not col.primary_key: + assert cols[i]["default"] is None + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + @testing.requires.view_reflection + def test_get_view_definition( + self, + connection, + ): + schema = None + insp = inspect(connection) + for view in ["users_v", "email_addresses_v", "dingalings_v"]: + v = insp.get_view_definition(view, schema=schema) + is_true(bool(v)) + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + @testing.requires.view_reflection + def test_get_view_definition_does_not_exist(self, connection): + super().test_get_view_definition_does_not_exist(connection) + + def filter_name_values(): + return testing.combinations(True, False, argnames="use_filter") + + @filter_name_values() + @testing.requires.index_reflection + def test_get_multi_indexes( + self, + get_multi_exp, + use_filter, + schema=None, + scope=ObjectScope.DEFAULT, + kind=ObjectKind.TABLE, + ): + """ + SPANNER OVERRIDE: + + Spanner doesn't support indexes on views and + doesn't support temporary tables, so real tables are + used for testing. As the original test expects only real + tables to be read, and in Spanner all the tables are real, + expected results override is required. + """ + insp, kws, exp = get_multi_exp( + schema, + scope, + kind, + use_filter, + Inspector.get_indexes, + self.exp_indexes, + ) + _ignore_tables = [ + (None, "comment_test"), + (None, "dingalings"), + (None, "email_addresses"), + (None, "no_constraints"), + ] + exp = {k: v for k, v in exp.items() if k not in _ignore_tables} + + for kw in kws: + insp.clear_cache() + result = insp.get_multi_indexes(**kw) + self._check_table_dict(result, exp, self._required_index_keys) + + def exp_pks( + self, + schema=None, + scope=ObjectScope.ANY, + kind=ObjectKind.ANY, + filter_names=None, + ): + def pk(*cols, name=mock.ANY, comment=None): + return { + "constrained_columns": list(cols), + "name": name, + "comment": comment, + } + + empty = pk(name=None) + if testing.requires.materialized_views_reflect_pk.enabled: + materialized = {(schema, "dingalings_v"): pk("dingaling_id")} + else: + materialized = {(schema, "dingalings_v"): empty} + views = { + (schema, "email_addresses_v"): empty, + (schema, "users_v"): empty, + (schema, "user_tmp_v"): empty, + } + self._resolve_views(views, materialized) + tables = { + (schema, "users"): pk("user_id"), + (schema, "dingalings"): pk("dingaling_id"), + (schema, "email_addresses"): pk( + "address_id", name="email_ad_pk", comment="ea pk comment" + ), + (schema, "comment_test"): pk("id"), + (schema, "no_constraints"): empty, + (schema, "local_table"): pk("id"), + (schema, "remote_table"): pk("id"), + (schema, "remote_table_2"): pk("id"), + (schema, "noncol_idx_test_nopk"): pk("id"), + (schema, "noncol_idx_test_pk"): pk("id"), + (schema, self.temp_table_name()): pk("id"), + } + if not testing.requires.reflects_pk_names.enabled: + for val in tables.values(): + if val["name"] is not None: + val["name"] = mock.ANY + res = self._resolve_kind(kind, tables, views, materialized) + res = self._resolve_names(schema, scope, filter_names, res) + return res + + @filter_name_values() + @testing.requires.primary_key_constraint_reflection + def test_get_multi_pk_constraint( + self, + get_multi_exp, + use_filter, + schema=None, + scope=ObjectScope.DEFAULT, + kind=ObjectKind.TABLE, + ): + """ + SPANNER OVERRIDE: + + Spanner doesn't support temporary tables, so real tables are + used for testing. As the original test expects only real + tables to be read, and in Spanner all the tables are real, + expected results override is required. + """ + insp, kws, exp = get_multi_exp( + schema, + scope, + kind, + use_filter, + Inspector.get_pk_constraint, + self.exp_pks, + ) + _ignore_tables = [(None, "no_constraints")] + exp = {k: v for k, v in exp.items() if k not in _ignore_tables} + + for kw in kws: + insp.clear_cache() + result = insp.get_multi_pk_constraint(**kw) + self._check_table_dict(result, exp, self._required_pk_keys, make_lists=True) + + def exp_fks( + self, + schema=None, + scope=ObjectScope.ANY, + kind=ObjectKind.ANY, + filter_names=None, + ): + class tt: + def __eq__(self, other): + return other is None or config.db.dialect.default_schema_name == other + + def fk( + cols, + ref_col, + ref_table, + ref_schema=schema, + name=mock.ANY, + comment=None, + ): + return { + "constrained_columns": cols, + "referred_columns": ref_col, + "name": name, + "options": mock.ANY, + "referred_schema": ref_schema if ref_schema is not None else tt(), + "referred_table": ref_table, + "comment": comment, + } + + materialized = {} + views = {} + self._resolve_views(views, materialized) + tables = { + (schema, "users"): [ + fk(["parent_user_id"], ["user_id"], "users", name="user_id_fk") + ], + (schema, "dingalings"): [ + fk(["address_id"], ["address_id"], "email_addresses"), + fk(["id_user"], ["user_id"], "users"), + ], + (schema, "email_addresses"): [fk(["remote_user_id"], ["user_id"], "users")], + (schema, "local_table"): [ + fk( + ["remote_id"], + ["id"], + "remote_table_2", + ref_schema=config.test_schema, + ) + ], + (schema, "remote_table"): [ + fk(["local_id"], ["id"], "local_table", ref_schema=None) + ], + } + if not testing.requires.self_referential_foreign_keys.enabled: + tables[(schema, "users")].clear() + if not testing.requires.named_constraints.enabled: + for vals in tables.values(): + for val in vals: + if val["name"] is not mock.ANY: + val["name"] = mock.ANY + + res = self._resolve_kind(kind, tables, views, materialized) + res = self._resolve_names(schema, scope, filter_names, res) + return res + + @filter_name_values() + @testing.requires.foreign_key_constraint_reflection + def test_get_multi_foreign_keys( + self, + get_multi_exp, + use_filter, + schema=None, + scope=ObjectScope.DEFAULT, + kind=ObjectKind.TABLE, + ): + """ + SPANNER OVERRIDE: + + Spanner doesn't support temporary tables, so real tables are + used for testing. As the original test expects only real + tables to be read, and in Spanner all the tables are real, + expected results override is required. + """ + insp, kws, exp = get_multi_exp( + schema, + scope, + kind, + use_filter, + Inspector.get_foreign_keys, + self.exp_fks, + ) + for kw in kws: + insp.clear_cache() + result = insp.get_multi_foreign_keys(**kw) + self._adjust_sort(result, exp, lambda d: tuple(d["constrained_columns"])) + self._check_table_dict( + { + key: sorted(value, key=lambda x: x["constrained_columns"]) + for key, value in result.items() + }, + { + key: sorted(value, key=lambda x: x["constrained_columns"]) + for key, value in exp.items() + }, + self._required_fk_keys, + ) + + def test_get_foreign_keys_quoted_name(self, connection, metadata): + pass + + def test_get_indexes_quoted_name(self, connection, metadata): + pass + + def test_get_unique_constraints_quoted_name(self, connection, metadata): + pass + + def exp_columns( + self, + schema=None, + scope=ObjectScope.ANY, + kind=ObjectKind.ANY, + filter_names=None, + ): + def col(name, auto=False, default=mock.ANY, comment=None, nullable=True): + res = { + "name": name, + "autoincrement": auto, + "type": mock.ANY, + "default": default, + "comment": comment, + "nullable": nullable, + } + if auto == "omit": + res.pop("autoincrement") + return res + + def pk(name, **kw): + kw = {"auto": True, "default": mock.ANY, "nullable": False, **kw} + return col(name, **kw) + + materialized = { + (schema, "dingalings_v"): [ + col("dingaling_id", auto="omit", nullable=mock.ANY), + col("address_id"), + col("id_user"), + col("data"), + ] + } + views = { + (schema, "email_addresses_v"): [ + col("address_id", auto="omit", nullable=mock.ANY), + col("remote_user_id"), + col("email_address"), + ], + (schema, "users_v"): [ + col("user_id", auto="omit", nullable=mock.ANY), + col("test1", nullable=mock.ANY), + col("test2", nullable=mock.ANY), + col("parent_user_id"), + ], + (schema, "user_tmp_v"): [ + col("id", auto="omit", nullable=mock.ANY), + col("name"), + col("foo"), + ], + } + self._resolve_views(views, materialized) + tables = { + (schema, "users"): [ + pk("user_id"), + col("test1", nullable=False), + col("test2", nullable=False), + col("parent_user_id"), + ], + (schema, "dingalings"): [ + pk("dingaling_id"), + col("address_id"), + col("id_user"), + col("data"), + ], + (schema, "email_addresses"): [ + pk("address_id"), + col("remote_user_id"), + col("email_address"), + ], + (schema, "comment_test"): [ + pk("id", comment="id comment"), + col("data", comment="data % comment"), + col( + "d2", + comment=r"""Comment types type speedily ' " \ '' Fun!""", + ), + ], + (schema, "no_constraints"): [col("data")], + (schema, "local_table"): [pk("id"), col("data"), col("remote_id")], + (schema, "remote_table"): [pk("id"), col("local_id"), col("data")], + (schema, "remote_table_2"): [pk("id"), col("data")], + (schema, "noncol_idx_test_nopk"): [pk("id"), col("q")], + (schema, "noncol_idx_test_pk"): [pk("id"), col("q")], + (schema, self.temp_table_name()): [ + pk("id"), + col("name"), + col("foo"), + ], + } + res = self._resolve_kind(kind, tables, views, materialized) + res = self._resolve_names(schema, scope, filter_names, res) + return res + + @filter_name_values() + def test_get_multi_columns( + self, + get_multi_exp, + use_filter, + schema=None, + scope=ObjectScope.DEFAULT, + kind=ObjectKind.TABLE, + ): + """ + SPANNER OVERRIDE: + + Spanner doesn't support temporary tables, so real tables are + used for testing. As the original test expects only real + tables to be read, and in Spanner all the tables are real, + expected results override is required. + """ + _ignore_tables = [ + "bitwise", + ] + + insp, kws, exp = get_multi_exp( + schema, + scope, + kind, + use_filter, + Inspector.get_columns, + self.exp_columns, + ) + + for kw in kws: + insp.clear_cache() + result = insp.get_multi_columns(**kw) + for t in _ignore_tables: + result.pop((schema, t), None) + self._check_table_dict(result, exp, self._required_column_keys) + + @pytest.mark.skip( + "Requires an introspection method to be implemented in SQLAlchemy first" + ) + def test_get_multi_unique_constraints(self): + pass + + @pytest.mark.skip( + "Requires an introspection method to be implemented in SQLAlchemy first" + ) + def test_get_multi_check_constraints(self): + pass + + @testing.combinations((False,), argnames="use_schema") + @testing.requires.foreign_key_constraint_reflection + def test_get_foreign_keys(self, connection, use_schema): + if use_schema: + schema = config.test_schema + else: + schema = None + + users, addresses = (self.tables.users, self.tables.email_addresses) + insp = inspect(connection) + expected_schema = schema + # users + + if testing.requires.self_referential_foreign_keys.enabled: + users_fkeys = insp.get_foreign_keys(users.name, schema=schema) + fkey1 = users_fkeys[0] + + with testing.requires.named_constraints.fail_if(): + eq_(fkey1["name"], "user_id_fk") + + eq_(fkey1["referred_schema"], expected_schema) + eq_(fkey1["referred_table"], users.name) + eq_(fkey1["referred_columns"], ["user_id"]) + if testing.requires.self_referential_foreign_keys.enabled: + eq_(fkey1["constrained_columns"], ["parent_user_id"]) + + # addresses + addr_fkeys = insp.get_foreign_keys(addresses.name, schema=schema) + fkey1 = addr_fkeys[0] + + with testing.requires.implicitly_named_constraints.fail_if(): + self.assert_(fkey1["name"] is not None) + + eq_(fkey1["referred_schema"], expected_schema) + eq_(fkey1["referred_table"], users.name) + eq_(fkey1["referred_columns"], ["user_id"]) + eq_(fkey1["constrained_columns"], ["remote_user_id"]) + + @testing.combinations( + None, + ("foreign_key", testing.requires.foreign_key_constraint_reflection), + argnames="order_by", + ) + @testing.combinations( + (True, testing.requires.schemas), False, argnames="use_schema" + ) + def test_get_table_names(self, connection, order_by, use_schema): + schema = None + + _ignore_tables = [ + "account", + "alembic_version", + "bitwise", + "bytes_table", + "comment_test", + "date_table", + "noncol_idx_test_pk", + "noncol_idx_test_nopk", + "local_table", + "remote_table", + "remote_table_2", + "text_table", + "user_tmp", + "no_constraints", + ] + + insp = inspect(connection) + + if order_by: + tables = [ + rec[0] for rec in insp.get_sorted_table_and_fkc_names(schema) if rec[0] + ] + else: + tables = insp.get_table_names(schema) + table_names = [t for t in tables if t not in _ignore_tables] + + if order_by == "foreign_key": + answer = ["users", "email_addresses", "dingalings"] + eq_(table_names, answer) + else: + answer = ["dingalings", "email_addresses", "users"] + eq_(sorted(table_names), answer) + + @classmethod + def define_temp_tables(cls, metadata): + """ + SPANNER OVERRIDE: + + In Cloud Spanner unique indexes are used instead of directly + creating unique constraints. Overriding the test to replace + constraints with indexes in testing data. + """ + kw = temp_table_keyword_args(config, config.db) + user_tmp = Table( + "user_tmp", + metadata, + Column("id", sqlalchemy.INT, primary_key=True), + Column("name", sqlalchemy.VARCHAR(50)), + Column("foo", sqlalchemy.INT), + sqlalchemy.Index("user_tmp_uq", "name", unique=True), + sqlalchemy.Index("user_tmp_ix", "foo"), + extend_existing=True, + **kw, + ) + if ( + testing.requires.view_reflection.enabled + and testing.requires.temporary_views.enabled + ): + event.listen( + user_tmp, + "after_create", + DDL("create temporary view user_tmp_v as " "select * from user_tmp"), + ) + event.listen(user_tmp, "before_drop", DDL("drop view user_tmp_v")) + + @testing.provide_metadata + def test_reflect_string_column_max_len(self, connection): + """ + SPANNER SPECIFIC TEST: + + In Spanner column of the STRING type can be + created with size defined as MAX. The test + checks that such a column is correctly reflected. + """ + metadata = MetaData() + Table("text_table", metadata, Column("TestColumn", Text, nullable=False)) + metadata.create_all(connection) + + Table("text_table", metadata, autoload=True) + + def test_reflect_bytes_column_max_len(self, connection): + """ + SPANNER SPECIFIC TEST: + + In Spanner column of the BYTES type can be + created with size defined as MAX. The test + checks that such a column is correctly reflected. + """ + metadata = MetaData() + Table( + "bytes_table", + metadata, + Column("TestColumn", LargeBinary, nullable=False), + ) + metadata.create_all(connection) + + Table("bytes_table", metadata, autoload=True) + + @testing.requires.unique_constraint_reflection + def test_get_unique_constraints(self, metadata, connection, use_schema=False): + # SQLite dialect needs to parse the names of the constraints + # separately from what it gets from PRAGMA index_list(), and + # then matches them up. so same set of column_names in two + # constraints will confuse it. Perhaps we should no longer + # bother with index_list() here since we have the whole + # CREATE TABLE? + + if use_schema: + schema = config.test_schema + else: + schema = None + uniques = sorted( + [ + {"name": "unique_a", "column_names": ["a"]}, + {"name": "unique_a_b_c", "column_names": ["a", "b", "c"]}, + {"name": "unique_c_a_b", "column_names": ["c", "a", "b"]}, + {"name": "unique_asc_key", "column_names": ["asc", "key"]}, + {"name": "i.have.dots", "column_names": ["b"]}, + {"name": "i have spaces", "column_names": ["c"]}, + ], + key=operator.itemgetter("name"), + ) + table = Table( + "testtbl", + metadata, + Column("id", sqlalchemy.INT, primary_key=True), + Column("a", String(20)), + Column("b", String(30)), + Column("c", Integer), + # reserved identifiers + Column("asc", String(30)), + Column("key", String(30)), + sqlalchemy.Index("unique_a", "a", unique=True), + sqlalchemy.Index("unique_a_b_c", "a", "b", "c", unique=True), + sqlalchemy.Index("unique_c_a_b", "c", "a", "b", unique=True), + sqlalchemy.Index("unique_asc_key", "asc", "key", unique=True), + schema=schema, + ) + table.create(connection) + connection.connection.commit() + + inspector = inspect(connection) + reflected = sorted( + inspector.get_unique_constraints("testtbl", schema=schema), + key=operator.itemgetter("name"), + ) + + names_that_duplicate_index = set() + + for orig, refl in zip(uniques, reflected): + # Different dialects handle duplicate index and constraints + # differently, so ignore this flag + dupe = refl.pop("duplicates_index", None) + if dupe: + names_that_duplicate_index.add(dupe) + eq_(orig, refl) + + reflected_metadata = MetaData() + reflected = Table( + "testtbl", + reflected_metadata, + autoload_with=connection, + schema=schema, + ) + + # test "deduplicates for index" logic. MySQL and Oracle + # "unique constraints" are actually unique indexes (with possible + # exception of a unique that is a dupe of another one in the case + # of Oracle). make sure # they aren't duplicated. + idx_names = set([idx.name for idx in reflected.indexes]) + uq_names = set( + [ + uq.name + for uq in reflected.constraints + if isinstance(uq, sqlalchemy.UniqueConstraint) + ] + ).difference(["unique_c_a_b"]) + + assert not idx_names.intersection(uq_names) + if names_that_duplicate_index: + eq_(names_that_duplicate_index, idx_names) + eq_(uq_names, set()) + + @testing.provide_metadata + def test_unique_constraint_raises(self, connection): + """ + Checking that unique constraint creation + fails due to a ProgrammingError. + """ + metadata = MetaData() + Table( + "user_tmp_failure", + metadata, + Column("id", sqlalchemy.INT, primary_key=True), + Column("name", sqlalchemy.VARCHAR(50)), + sqlalchemy.UniqueConstraint("name", name="user_tmp_uq"), + ) + + with pytest.raises(spanner_dbapi.exceptions.ProgrammingError): + metadata.create_all(connection) + + @testing.provide_metadata + def _test_get_table_names(self, schema=None, table_type="table", order_by=None): + """ + SPANNER OVERRIDE: + + Spanner doesn't support temporary tables, so real tables are + used for testing. As the original test expects only real + tables to be read, and in Spanner all the tables are real, + expected results override is required. + """ + _ignore_tables = [ + "bitwise", + "comment_test", + "noncol_idx_test_pk", + "noncol_idx_test_nopk", + "local_table", + "remote_table", + "remote_table_2", + "no_constraints", + ] + meta = self.metadata + + insp = inspect(meta.bind) + + if table_type == "view" and not bool(os.environ.get("SPANNER_EMULATOR_HOST")): + table_names = insp.get_view_names(schema) + table_names.sort() + answer = ["email_addresses_v", "users_v"] + eq_(sorted(table_names), answer) + else: + if order_by: + tables = [ + rec[0] + for rec in insp.get_sorted_table_and_fkc_names(schema) + if rec[0] + ] + else: + tables = insp.get_table_names(schema) + table_names = [t for t in tables if t not in _ignore_tables] + + if order_by == "foreign_key": + answer = {"dingalings", "email_addresses", "user_tmp", "users"} + eq_(set(table_names), answer) + else: + answer = ["dingalings", "email_addresses", "user_tmp", "users"] + eq_(sorted(table_names), answer) + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_get_view_names(self, connection, use_schema=False): + insp = inspect(connection) + schema = None + table_names = insp.get_view_names(schema) + if testing.requires.materialized_views.enabled: + eq_(sorted(table_names), ["email_addresses_v", "users_v"]) + eq_(insp.get_materialized_view_names(schema), ["dingalings_v"]) + else: + answer = ["dingalings_v", "email_addresses_v", "users_v"] + eq_(sorted(table_names), answer) + + @pytest.mark.skip("Spanner doesn't support temporary tables") + def test_get_temp_table_indexes(self): + pass + + @pytest.mark.skip("Spanner doesn't support temporary tables") + def test_get_temp_table_unique_constraints(self): + pass + + @pytest.mark.skip("Spanner doesn't support temporary tables") + def test_get_temp_table_columns(self): + pass + + @pytest.mark.skip("Spanner doesn't support temporary tables") + def test_reflect_table_temp_table(self, connection): + pass + + def exp_indexes( + self, + schema=None, + scope=ObjectScope.ANY, + kind=ObjectKind.ANY, + filter_names=None, + ): + def idx( + *cols, + name, + unique=False, + column_sorting=None, + duplicates=False, + fk=False, + ): + fk_req = testing.requires.foreign_keys_reflect_as_index + dup_req = testing.requires.unique_constraints_reflect_as_index + if (fk and not fk_req.enabled) or (duplicates and not dup_req.enabled): + return () + res = { + "unique": unique, + "column_names": list(cols), + "name": name, + "dialect_options": mock.ANY, + "include_columns": [], + } + if column_sorting: + res["column_sorting"] = {"q": "desc"} + if duplicates: + res["duplicates_constraint"] = name + return [res] + + materialized = {(schema, "dingalings_v"): []} + views = { + (schema, "email_addresses_v"): [], + (schema, "users_v"): [], + (schema, "user_tmp_v"): [], + } + self._resolve_views(views, materialized) + if materialized: + materialized[(schema, "dingalings_v")].extend(idx("data", name="mat_index")) + tables = { + (schema, "users"): [ + *idx("parent_user_id", name="user_id_fk", fk=True), + *idx("user_id", "test2", "test1", name="users_all_idx"), + *idx("test1", "test2", name="users_t_idx", unique=True), + ], + (schema, "dingalings"): [ + *idx("data", name=mock.ANY, unique=True, duplicates=True), + *idx("id_user", name=mock.ANY, fk=True), + *idx( + "address_id", + "dingaling_id", + name="zz_dingalings_multiple", + unique=True, + duplicates=True, + ), + ], + (schema, "email_addresses"): [ + *idx("email_address", name=mock.ANY), + *idx("remote_user_id", name=mock.ANY, fk=True), + ], + (schema, "comment_test"): [], + (schema, "no_constraints"): [], + (schema, "local_table"): [*idx("remote_id", name=mock.ANY, fk=True)], + (schema, "remote_table"): [*idx("local_id", name=mock.ANY, fk=True)], + (schema, "remote_table_2"): [], + (schema, "noncol_idx_test_nopk"): [ + *idx( + "q", + name="noncol_idx_nopk", + column_sorting={"q": "desc"}, + ) + ], + (schema, "noncol_idx_test_pk"): [ + *idx("q", name="noncol_idx_pk", column_sorting={"q": "desc"}) + ], + (schema, self.temp_table_name()): [ + *idx("foo", name="user_tmp_ix"), + *idx( + "name", + name=f"user_tmp_uq_{config.ident}", + duplicates=True, + unique=True, + ), + ], + } + if ( + not testing.requires.indexes_with_ascdesc.enabled + or not testing.requires.reflect_indexes_with_ascdesc.enabled + ): + tables[(schema, "noncol_idx_test_nopk")].clear() + tables[(schema, "noncol_idx_test_pk")].clear() + res = self._resolve_kind(kind, tables, views, materialized) + res = self._resolve_names(schema, scope, filter_names, res) + return res + + def _check_list(self, result, exp, req_keys=None, msg=None): + if req_keys is None: + eq_(result, exp, msg) + else: + eq_(len(result), len(exp), msg) + for r, e in zip(result, exp): + for k in set(r) | set(e): + if (k in req_keys and (k in r and k in e)) or (k in r and k in e): + if isinstance(r[k], list): + r[k].sort() + e[k].sort() + eq_(r[k], e[k], f"{msg} - {k} - {r}") + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + @testing.combinations(True, False, argnames="use_schema") + @testing.combinations((True, testing.requires.views), False, argnames="views") + def test_metadata(self, connection, use_schema, views): + m = MetaData() + schema = None + m.reflect(connection, schema=schema, views=views, resolve_fks=False) + + insp = inspect(connection) + tables = insp.get_table_names(schema) + if views: + tables += insp.get_view_names(schema) + try: + tables += insp.get_materialized_view_names(schema) + except NotImplementedError: + pass + if schema is not None: + tables = [f"{schema}.{t}" for t in tables] + eq_(sorted(m.tables), sorted(tables)) + + +class CompositeKeyReflectionTest(_CompositeKeyReflectionTest): + @testing.requires.foreign_key_constraint_reflection + def test_fk_column_order(self, connection): + """ + SPANNER OVERRIDE: + + Spanner column usage reflection doesn't support determenistic + ordering. Overriding the test to check that columns are + reflected correctly, without considering their order. + """ + # test for issue #5661 + insp = inspect(connection) + foreign_keys = insp.get_foreign_keys(self.tables.tb2.name) + eq_(len(foreign_keys), 1) + fkey1 = foreign_keys[0] + eq_(set(fkey1.get("referred_columns")), {"name", "id", "attr"}) + eq_(set(fkey1.get("constrained_columns")), {"pname", "pid", "pattr"}) + + +@pytest.mark.skip("Spanner doesn't support quotes in table names.") +class QuotedNameArgumentTest(_QuotedNameArgumentTest): + pass + + +class _DateFixture(__DateFixture): + compare = None + + @classmethod + def define_tables(cls, metadata): + """ + SPANNER OVERRIDE: + + Cloud Spanner doesn't support auto incrementing ids feature, + which is used by the original test. Overriding the test data + creation method to disable autoincrement and make id column + nullable. + """ + + class Decorated(sqlalchemy.TypeDecorator): + impl = cls.datatype + cache_ok = True + + Table( + "date_table", + metadata, + Column("id", Integer, primary_key=True, nullable=True), + Column("date_data", cls.datatype), + Column("decorated_date_data", Decorated), + ) + + +class DateTest(_DateTest): + """ + SPANNER OVERRIDE: + + DateTest tests used same class method to create table, so to avoid those failures + and maintain DRY concept just inherit the class to run tests successfully. + """ + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null_bound_comparison(self): + super().test_null_bound_comparison() + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null(self, connection): + super().test_null(connection) + + +class CTETest(_CTETest): + @classmethod + def define_tables(cls, metadata): + """ + The original method creates a foreign key without a name, + which causes troubles on test cleanup. Overriding the + method to explicitly set a foreign key name. + """ + Table( + "some_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), + Column("parent_id", ForeignKey("some_table.id", name="fk_some_table")), + ) + + Table( + "some_other_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), + Column("parent_id", Integer), + ) + + @pytest.mark.skip("INSERT from WITH subquery is not supported") + def test_insert_from_select_round_trip(self): + """ + The test checks if an INSERT can be done from a cte, like: + + WITH some_cte AS (...) + INSERT INTO some_other_table (... SELECT * FROM some_cte) + + Such queries are not supported by Spanner. + """ + pass + + @pytest.mark.skip("DELETE from WITH subquery is not supported") + def test_delete_scalar_subq_round_trip(self): + """ + The test checks if a DELETE can be done from a cte, like: + + WITH some_cte AS (...) + DELETE FROM some_other_table (... SELECT * FROM some_cte) + + Such queries are not supported by Spanner. + """ + pass + + @pytest.mark.skip("DELETE from WITH subquery is not supported") + def test_delete_from_round_trip(self): + """ + The test checks if a DELETE can be done from a cte, like: + + WITH some_cte AS (...) + DELETE FROM some_other_table (... SELECT * FROM some_cte) + + Such queries are not supported by Spanner. + """ + pass + + @pytest.mark.skip("UPDATE from WITH subquery is not supported") + def test_update_from_round_trip(self): + """ + The test checks if an UPDATE can be done from a cte, like: + + WITH some_cte AS (...) + UPDATE some_other_table + SET (... SELECT * FROM some_cte) + + Such queries are not supported by Spanner. + """ + pass + + @pytest.mark.skip("WITH RECURSIVE subqueries are not supported") + def test_select_recursive_round_trip(self): + pass + + +class DateTimeMicrosecondsTest(_DateTimeMicrosecondsTest, DateTest): + @pytest.mark.skip("Spanner dates are time zone independent") + def test_select_direct(self): + pass + + def test_round_trip(self): + """ + SPANNER OVERRIDE: + + Spanner converts timestamp into `%Y-%m-%dT%H:%M:%S.%fZ` format, so to avoid + assert failures convert datetime input to the desire timestamp format. + """ + date_table = self.tables.date_table + + with config.db.connect() as connection: + connection.execute(date_table.insert(), {"date_data": self.data, "id": 250}) + row = connection.execute(select(date_table.c.date_data)).first() + + compare = self.compare or self.data.astimezone(timezone.utc) + compare = compare.strftime("%Y-%m-%dT%H:%M:%S.%fZ") + eq_(row[0].rfc3339(), compare) + assert isinstance(row[0], DatetimeWithNanoseconds) + + def test_round_trip_decorated(self, connection): + """ + SPANNER OVERRIDE: + + Spanner converts timestamp into `%Y-%m-%dT%H:%M:%S.%fZ` format, so to avoid + assert failures convert datetime input to the desire timestamp format. + """ + date_table = self.tables.date_table + + connection.execute( + date_table.insert(), {"id": 1, "decorated_date_data": self.data} + ) + + row = connection.execute(select(date_table.c.decorated_date_data)).first() + + compare = self.compare or self.data.astimezone(timezone.utc) + compare = compare.strftime("%Y-%m-%dT%H:%M:%S.%fZ") + eq_(row[0].rfc3339(), compare) + assert isinstance(row[0], DatetimeWithNanoseconds) + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null_bound_comparison(self): + super().test_null_bound_comparison() + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null(self, connection): + super().test_null(connection) + + +class DateTimeTest(_DateTimeTest, DateTimeMicrosecondsTest): + """ + SPANNER OVERRIDE: + + DateTimeTest tests have the same failures same as DateTimeMicrosecondsTest tests, + so to avoid those failures and maintain DRY concept just inherit the class to run + tests successfully. + """ + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null_bound_comparison(self): + super().test_null_bound_comparison() + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_null(self, connection): + super().test_null(connection) + + @pytest.mark.skip("Spanner dates are time zone independent") + def test_select_direct(self): + pass + + +@pytest.mark.skip("Not supported by Spanner") +class DifficultParametersTest(_DifficultParametersTest): + pass + + +class FetchLimitOffsetTest(_FetchLimitOffsetTest): + @pytest.mark.skip("Spanner doesn't support composite LIMIT and OFFSET clauses") + def test_expr_limit(self, connection): + pass + + @pytest.mark.skip("Spanner doesn't support composite LIMIT and OFFSET clauses") + def test_expr_offset(self, connection): + pass + + @pytest.mark.skip("Spanner doesn't support composite LIMIT and OFFSET clauses") + def test_expr_limit_offset(self, connection): + pass + + @pytest.mark.skip("Spanner doesn't support composite LIMIT and OFFSET clauses") + def test_expr_limit_simple_offset(self, connection): + pass + + @pytest.mark.skip("Spanner doesn't support composite LIMIT and OFFSET clauses") + def test_simple_limit_expr_offset(self, connection): + pass + + @pytest.mark.skip("Spanner doesn't support composite LIMIT and OFFSET clauses") + def test_bound_offset(self, connection): + pass + + @pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" + ) + def test_limit_render_multiple_times(self, connection): + table = self.tables.some_table + stmt = select(table.c.id).limit(1).scalar_subquery() + + u = union(select(stmt), select(stmt)).subquery().select() + + self._assert_result( + connection, + u, + [(1,)], + ) + + @testing.requires.offset + def test_simple_offset(self, connection): + table = self.tables.some_table + self._assert_result( + connection, + select(table).order_by(table.c.id).offset(2), + [(3, 3, 4), (4, 4, 5), (5, 4, 6)], + ) + + +@pytest.mark.skip("Spanner doesn't support autoincrement") +class IdentityAutoincrementTest(_IdentityAutoincrementTest): + pass + + +@pytest.mark.skip("Spanner doesn't support returning") +class ReturningGuardsTest(_ReturningGuardsTest): + pass + + +@pytest.mark.skip("Spanner doesn't support user made schemas") +class SameNamedSchemaTableTest(_SameNamedSchemaTableTest): + pass + + +class EscapingTest(_EscapingTest): + @provide_metadata + def test_percent_sign_round_trip(self): + """Test that the DBAPI accommodates for escaped / nonescaped + percent signs in a way that matches the compiler + + SPANNER OVERRIDE + Cloud Spanner supports tables with empty primary key, but + only single one row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + m = self.metadata + t = Table("t", m, Column("data", String(50))) + t.create(config.db) + with config.db.begin() as conn: + conn.execute(t.insert(), dict(data="some % value")) + + eq_( + conn.scalar( + select(t.c.data).where(t.c.data == literal_column("'some % value'")) + ), + "some % value", + ) + + conn.execute(t.delete()) + conn.execute(t.insert(), dict(data="some %% other value")) + eq_( + conn.scalar( + select(t.c.data).where( + t.c.data == literal_column("'some %% other value'") + ) + ), + "some %% other value", + ) + + +class ExistsTest(_ExistsTest): + def test_select_exists(self, connection): + """ + SPANNER OVERRIDE: + + The original test is trying to execute a query like: + + SELECT ... + WHERE EXISTS (SELECT ...) + + SELECT WHERE without FROM clause is not supported by Spanner. + Rewriting the test to force it to generate a query like: + + SELECT EXISTS (SELECT ...) + """ + stuff = self.tables.stuff + eq_( + connection.execute( + select(exists().where(stuff.c.data == "some data")) + ).fetchall(), + [(True,)], + ) + + def test_select_exists_false(self, connection): + """ + SPANNER OVERRIDE: + + The original test is trying to execute a query like: + + SELECT ... + WHERE EXISTS (SELECT ...) + + SELECT WHERE without FROM clause is not supported by Spanner. + Rewriting the test to force it to generate a query like: + + SELECT EXISTS (SELECT ...) + """ + stuff = self.tables.stuff + eq_( + connection.execute( + select(exists().where(stuff.c.data == "no data")) + ).fetchall(), + [(False,)], + ) + + +class TableDDLTest(_TableDDLTest): + @pytest.mark.skip( + "Spanner table name must start with an uppercase or lowercase letter" + ) + def test_underscore_names(self): + pass + + @pytest.mark.skip("Table names incuding schemas are not supported by Spanner") + def test_create_table_schema(self): + pass + + +class FutureTableDDLTest(_FutureTableDDLTest): + @pytest.mark.skip("Table names incuding schemas are not supported by Spanner") + def test_create_table_schema(self): + pass + + @pytest.mark.skip( + "Spanner table name must start with an uppercase or lowercase letter" + ) + def test_underscore_names(self): + pass + + +@pytest.mark.skip("Max identifier length in Spanner is 128") +class LongNameBlowoutTest(_LongNameBlowoutTest): + pass + + +@pytest.mark.skip("Spanner doesn't support Time data type.") +class TimeTests(_TimeMicrosecondsTest, _TimeTest): + pass + + +@pytest.mark.skip("Spanner doesn't coerce dates from datetime.") +class DateTimeCoercedToDateTimeTest(_DateTimeCoercedToDateTimeTest): + pass + + +class IntegerTest(_IntegerTest): + @provide_metadata + def _round_trip(self, datatype, data): + """ + SPANNER OVERRIDE: + + This is the helper method for integer class tests which creates a table and + performs an insert operation. + Cloud Spanner supports tables with an empty primary key, but only one + row can be inserted into such a table - following insertions will fail with + `400 id must not be NULL in table date_table`. + Overriding the tests and adding a manual primary key value to avoid the same + failures. + """ + metadata = self.metadata + int_table = Table( + "integer_table", + metadata, + Column("id", Integer, primary_key=True, test_needs_autoincrement=True), + Column("integer_data", datatype), + ) + + metadata.create_all(config.db) + + config.db.execute(int_table.insert(), {"id": 1, "integer_data": data}) + + row = config.db.execute(select(int_table.c.integer_data)).first() + + eq_(row, (data,)) + + if util.py3k: + assert isinstance(row[0], int) + else: + assert isinstance(row[0], (long, int)) # noqa + + def _huge_ints(): + return testing.combinations( + 2147483649, # 32 bits + 2147483648, # 32 bits + 2147483647, # 31 bits + 2147483646, # 31 bits + -2147483649, # 32 bits + -2147483648, # 32 interestingly, asyncpg accepts this one as int32 + -2147483647, # 31 + -2147483646, # 31 + 0, + 1376537018368127, + -1376537018368127, + argnames="intvalue", + ) + + @_huge_ints() + def test_huge_int_auto_accommodation(self, connection, intvalue): + """ + Spanner does not allow query to have FROM clause without a WHERE clause + """ + eq_( + connection.scalar(select(intvalue)), + intvalue, + ) + + def test_literal(self, literal_round_trip_spanner): + literal_round_trip_spanner(Integer, [5], [5]) + + +class _UnicodeFixture(__UnicodeFixture): + @classmethod + def define_tables(cls, metadata): + """ + SPANNER OVERRIDE: + + Cloud Spanner doesn't support auto incrementing ids feature, + which is used by the original test. Overriding the test data + creation method to disable autoincrement and make id column + nullable. + """ + Table( + "unicode_table", + metadata, + Column("id", Integer, primary_key=True, nullable=True), + Column("unicode_data", cls.datatype), + ) + + def test_round_trip_executemany(self, connection): + """ + SPANNER OVERRIDE + + Cloud Spanner supports tables with empty primary key, but + only single one row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + unicode_table = self.tables.unicode_table + + connection.execute( + unicode_table.insert(), + [{"id": i, "unicode_data": self.data} for i in range(1, 4)], + ) + + rows = connection.execute(select(unicode_table.c.unicode_data)).fetchall() + eq_(rows, [(self.data,) for i in range(1, 4)]) + for row in rows: + assert isinstance(row[0], str) + + @pytest.mark.skip("Spanner doesn't support non-ascii characters") + def test_literal(self): + pass + + @pytest.mark.skip("Spanner doesn't support non-ascii characters") + def test_literal_non_ascii(self): + pass + + +class UnicodeVarcharTest(_UnicodeFixture, _UnicodeVarcharTest): + """ + SPANNER OVERRIDE: + + UnicodeVarcharTest class inherits the __UnicodeFixture class's tests, + so to avoid those failures and maintain DRY concept just inherit the class to run + tests successfully. + """ + + pass + + +class UnicodeTextTest(_UnicodeFixture, _UnicodeTextTest): + """ + SPANNER OVERRIDE: + + UnicodeTextTest class inherits the __UnicodeFixture class's tests, + so to avoid those failures and maintain DRY concept just inherit the class to run + tests successfully. + """ + + pass + + +class RowFetchTest(_RowFetchTest): + def test_row_w_scalar_select(self, connection): + """ + SPANNER OVERRIDE: + + Cloud Spanner returns a DatetimeWithNanoseconds() for date + data types. Overriding the test to use a DatetimeWithNanoseconds + type value as an expected result. + -------------- + + test that a scalar select as a column is returned as such + and that type conversion works OK. + + (this is half a SQLAlchemy Core test and half to catch database + backends that may have unusual behavior with scalar selects.) + """ + datetable = self.tables.has_dates + s = select(datetable.alias("x").c.today).scalar_subquery() + s2 = select(datetable.c.id, s.label("somelabel")) + row = connection.execute(s2).first() + + eq_( + row.somelabel, + DatetimeWithNanoseconds(2006, 5, 12, 12, 0, 0).astimezone(timezone.utc), + ) + + +class InsertBehaviorTest(_InsertBehaviorTest): + @pytest.mark.skip("Spanner doesn't support empty inserts") + def test_empty_insert(self): + pass + + @pytest.mark.skip("Spanner doesn't support empty inserts") + def test_empty_insert_multiple(self): + pass + + @pytest.mark.skip("Spanner doesn't support auto increment") + def test_insert_from_select_autoinc(self): + pass + + @pytest.mark.skip("Spanner does not support auto increment") + def test_no_results_for_non_returning_insert(self, connection, style, executemany): + pass + + def test_autoclose_on_insert(self): + """ + SPANNER OVERRIDE: + + Cloud Spanner doesn't support tables with an auto increment primary key, + following insertions will fail with `400 id must not be NULL in table + autoinc_pk`. + + Overriding the tests and adding a manual primary key value to avoid the same + failures. + """ + if ( + hasattr(config.requirements, "returning") + and config.requirements.returning.enabled + ): + engine = engines.testing_engine(options={"implicit_returning": False}) + else: + engine = config.db + + with engine.begin() as conn: + r = conn.execute( + self.tables.autoinc_pk.insert(), dict(id=1, data="some data") + ) + + assert r._soft_closed + assert not r.closed + assert r.is_insert + assert not r.returns_rows + + def test_autoclose_on_insert_implicit_returning(self, connection): + """ + SPANNER OVERRIDE: + + Cloud Spanner doesn't support tables with an auto increment primary key, + following insertions will fail with `400 id must not be NULL in table + autoinc_pk`. + + Overriding the tests and adding a manual primary key value to avoid the same + failures. + """ + r = connection.execute( + # return_defaults() ensures RETURNING will be used, + # new in 2.0 as sqlite/mariadb offer both RETURNING and + # cursor.lastrowid + self.tables.autoinc_pk.insert().return_defaults(), + dict(id=2, data="some data"), + ) + assert r._soft_closed + assert not r.closed + assert r.is_insert + + # Spanner does not return any rows in this case, because the primary key + # is not auto-generated. + assert not r.returns_rows + + +class BytesTest(_LiteralRoundTripFixture, fixtures.TestBase): + __backend__ = True + + def test_nolength_binary(self): + metadata = MetaData() + foo = Table("foo", metadata, Column("one", LargeBinary)) + + foo.create(config.db) + foo.drop(config.db) + + +class StringTest(_StringTest): + @pytest.mark.skip("Spanner doesn't support non-ascii characters") + def test_literal_non_ascii(self): + pass + + def test_dont_truncate_rightside( + self, metadata, connection, expr=None, expected=None + ): + t = Table( + "t", + metadata, + Column("x", String(2)), + Column("id", Integer, primary_key=True), + ) + t.create(connection) + connection.connection.commit() + connection.execute( + t.insert(), + [{"x": "AB", "id": 1}, {"x": "BC", "id": 2}, {"x": "AC", "id": 3}], + ) + combinations = [("%B%", ["AB", "BC"]), ("A%C", ["AC"]), ("A%C%Z", [])] + + for args in combinations: + eq_( + connection.scalars(select(t.c.x).where(t.c.x.like(args[0]))).all(), + args[1], + ) + + def test_literal(self, literal_round_trip_spanner): + # note that in Python 3, this invokes the Unicode + # datatype for the literal part because all strings are unicode + literal_round_trip_spanner(String(40), ["some text"], ["some text"]) + + def test_literal_quoting(self, literal_round_trip_spanner): + data = """some 'text' hey "hi there" that's text""" + literal_round_trip_spanner(String(40), [data], [data]) + + def test_literal_backslashes(self, literal_round_trip_spanner): + data = r"backslash one \ backslash two \\ end" + literal_round_trip_spanner(String(40), [data], [data]) + + +class TextTest(_TextTest): + @classmethod + def define_tables(cls, metadata): + """ + SPANNER OVERRIDE: + + Cloud Spanner doesn't support auto incrementing ids feature, + which is used by the original test. Overriding the test data + creation method to disable autoincrement and make id column + nullable. + """ + Table( + "text_table", + metadata, + Column("id", Integer, primary_key=True, nullable=True), + Column("text_data", Text), + ) + + @pytest.mark.skip("Spanner doesn't support non-ascii characters") + def test_literal_non_ascii(self): + pass + + @pytest.mark.skip("Not supported by Spanner") + def test_text_roundtrip(self, connection): + pass + + @pytest.mark.skip("Not supported by Spanner") + def test_text_empty_strings(self, connection): + pass + + @pytest.mark.skip("Not supported by Spanner") + def test_text_null_strings(self, connection): + pass + + def test_literal(self, literal_round_trip_spanner): + literal_round_trip_spanner(Text, ["some text"], ["some text"]) + + def test_literal_quoting(self, literal_round_trip_spanner): + data = """some 'text' hey "hi there" that's text""" + literal_round_trip_spanner(Text, [data], [data]) + + def test_literal_backslashes(self, literal_round_trip_spanner): + data = r"backslash one \ backslash two \\ end" + literal_round_trip_spanner(Text, [data], [data]) + + def test_literal_percentsigns(self, literal_round_trip_spanner): + data = r"percent % signs %% percent" + literal_round_trip_spanner(Text, [data], [data]) + + +class NumericTest(_NumericTest): + @testing.fixture + def do_numeric_test(self, metadata, connection): + @testing.emits_warning(r".*does \*not\* support Decimal objects natively") + def run(type_, input_, output, filter_=None, check_scale=False): + t = Table( + "t", + metadata, + Column("x", type_), + Column("id", Integer, primary_key=True), + ) + t.create(connection) + connection.connection.commit() + connection.execute( + t.insert(), [{"x": x, "id": i} for i, x in enumerate(input_)] + ) + + result = {row[0] for row in connection.execute(t.select())} + output = set(output) + if filter_: + result = set(filter_(x) for x in result) + output = set(filter_(x) for x in output) + eq_(result, output) + if check_scale: + eq_([str(x) for x in result], [str(x) for x in output]) + + return run + + @emits_warning(r".*does \*not\* support Decimal objects natively") + def test_render_literal_numeric(self, literal_round_trip_spanner): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + literal_round_trip_spanner( + Numeric(precision=8, scale=4), + [decimal.Decimal("15.7563")], + [decimal.Decimal("15.7563")], + ) + + @emits_warning(r".*does \*not\* support Decimal objects natively") + def test_render_literal_numeric_asfloat(self, literal_round_trip_spanner): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + literal_round_trip_spanner( + Numeric(precision=8, scale=4, asdecimal=False), + [decimal.Decimal("15.7563")], + [15.7563], + ) + + def test_render_literal_float(self, literal_round_trip_spanner): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + literal_round_trip_spanner( + Float(4), + [decimal.Decimal("15.7563")], + [15.7563], + filter_=lambda n: n is not None and round(n, 5) or None, + ) + + @requires.precision_generic_float_type + def test_float_custom_scale(self, do_numeric_test): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + do_numeric_test( + Float(None, decimal_return_scale=7, asdecimal=True), + [decimal.Decimal("15.7563827"), decimal.Decimal("15.7563827")], + [decimal.Decimal("15.7563827")], + check_scale=True, + ) + + def test_numeric_as_decimal(self, do_numeric_test): + """ + SPANNER OVERRIDE: + + Spanner throws an error 400 Value has type FLOAT64 which cannot be + inserted into column x, which has type NUMERIC for value 15.7563. + Overriding the test to remove the failure case. + """ + do_numeric_test( + Numeric(precision=8, scale=4), + [decimal.Decimal("15.7563"), decimal.Decimal("15.7563")], + [decimal.Decimal("15.7563")], + ) + + def test_numeric_as_float(self, do_numeric_test): + """ + SPANNER OVERRIDE: + + Spanner throws an error 400 Value has type FLOAT64 which cannot be + inserted into column x, which has type NUMERIC for value 15.7563. + Overriding the test to remove the failure case. + """ + do_numeric_test( + Numeric(precision=8, scale=4, asdecimal=False), + [decimal.Decimal("15.7563"), decimal.Decimal("15.7563")], + [15.7563], + ) + + @requires.floats_to_four_decimals + def test_float_as_decimal(self, do_numeric_test): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + do_numeric_test( + Float(precision=8, asdecimal=True), + [decimal.Decimal("15.7563"), decimal.Decimal("15.7563"), None], + [decimal.Decimal("15.7563"), None], + filter_=lambda n: n is not None and round(n, 4) or None, + ) + + def test_float_as_float(self, do_numeric_test): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + do_numeric_test( + Float(precision=8), + [decimal.Decimal("15.7563"), decimal.Decimal("15.7563")], + [15.7563], + filter_=lambda n: n is not None and round(n, 5) or None, + ) + + @testing.requires.literal_float_coercion + def test_float_coerce_round_trip(self, connection): + expr = 15.7563 + + val = connection.scalar(select(cast(literal(expr), FLOAT))) + eq_(val, expr) + + @requires.precision_numerics_general + def test_precision_decimal(self, do_numeric_test): + """ + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + + Remove an extra digits after decimal point as cloud spanner is + capable of representing an exact numeric value with a precision + of 38 and scale of 9. + """ + numbers = set( + [ + decimal.Decimal("54.246451650"), + decimal.Decimal("0.004354"), + decimal.Decimal("900.0"), + ] + ) + do_numeric_test(Numeric(precision=18, scale=9), numbers, numbers) + + @testing.requires.precision_numerics_enotation_large + def test_enotation_decimal_large(self, do_numeric_test): + """test exceedingly large decimals. + + SPANNER OVERRIDE: + + Cloud Spanner supports tables with an empty primary key, but + only a single row can be inserted into such a table - + following insertions will fail with `Row [] already exists". + Overriding the test to avoid the same failure. + """ + numbers = set( + [ + decimal.Decimal("4E+8"), + decimal.Decimal("5748E+15"), + decimal.Decimal("1.521E+15"), + decimal.Decimal("000000000.1E+9"), + ] + ) + do_numeric_test(Numeric(precision=25, scale=2), numbers, numbers) + + @testing.requires.precision_numerics_enotation_large + def test_enotation_decimal(self, do_numeric_test): + """test exceedingly small decimals. + + Decimal reports values with E notation when the exponent + is greater than 6. + + SPANNER OVERRIDE: + + Remove extra digits after decimal point as Cloud Spanner is + capable of representing an exact numeric value with a precision + of 38 and scale of 9. + """ + numbers = set( + [ + decimal.Decimal("1E-2"), + decimal.Decimal("1E-3"), + decimal.Decimal("1E-4"), + decimal.Decimal("1E-5"), + decimal.Decimal("1E-6"), + decimal.Decimal("1E-7"), + decimal.Decimal("1E-8"), + decimal.Decimal("0.105940696"), + decimal.Decimal("0.005940696"), + decimal.Decimal("0.000000696"), + decimal.Decimal("0.700000696"), + decimal.Decimal("696E-9"), + ] + ) + do_numeric_test(Numeric(precision=38, scale=9), numbers, numbers) + + +class LikeFunctionsTest(_LikeFunctionsTest): + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_contains_autoescape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_contains_autoescape_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_contains_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_endswith_autoescape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_endswith_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_endswith_autoescape_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_startswith_autoescape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_startswith_escape(self): + pass + + @pytest.mark.skip("Spanner doesn't support LIKE ESCAPE clause") + def test_startswith_autoescape_escape(self): + pass + + def test_escape_keyword_raises(self): + """Check that ESCAPE keyword causes an exception when used.""" + with pytest.raises(NotImplementedError): + col = self.tables.some_table.c.data + self._test(col.contains("b##cde", escape="#"), {7}) + + +@pytest.mark.skip("Spanner doesn't support IS DISTINCT FROM clause") +class IsOrIsNotDistinctFromTest(_IsOrIsNotDistinctFromTest): + pass + + +@pytest.mark.skip("Spanner doesn't bizarre characters in foreign key names") +class BizarroCharacterFKResolutionTest(fixtures.TestBase): + pass + + +class BizarroCharacterTest(fixtures.TestBase): + @pytest.mark.skip("Bizarre characters in foreign key names are not supported") + def test_fk_ref(self, testing_engine): + pass + + +class IsolationLevelTest(fixtures.TestBase): + @pytest.mark.skip("Cloud Spanner does not support different isolation levels") + def test_dialect_user_setting_is_restored(self, testing_engine): + pass + + +class OrderByLabelTest(_OrderByLabelTest): + @pytest.mark.skip( + "Spanner requires an alias for the GROUP BY list when specifying derived " + "columns also used in SELECT" + ) + def test_group_by_composed(self): + pass + + +class CompoundSelectTest(_CompoundSelectTest): + """ + See: https://github.com/googleapis/python-spanner/issues/347 + """ + + @pytest.mark.skip( + "Spanner DBAPI incorrectly classify the statement starting with brackets." + ) + def test_limit_offset_selectable_in_unions(self): + pass + + @pytest.mark.skip( + "Spanner DBAPI incorrectly classify the statement starting with brackets." + ) + def test_order_by_selectable_in_unions(self): + pass + + +class TestQueryHints(fixtures.TablesTest): + """ + Compile a complex query with JOIN and check that + the table hint was set into the right place. + """ + + __backend__ = True + + def test_complex_query_table_hints(self): + EXPECTED_QUERY = ( + "SELECT users.id, users.name \nFROM users @{FORCE_INDEX=table_1_by_int_idx}" + " JOIN addresses ON users.id = addresses.user_id " + "\nWHERE users.name IN (__[POSTCOMPILE_name_1])" + ) + + Base = declarative_base() + engine = create_engine( + "spanner:///projects/project-id/instances/instance-id/databases/database-id" + ) + + class User(Base): + __tablename__ = "users" + id = Column(Integer, primary_key=True) + name = Column(String(50)) + addresses = relationship("Address", backref="user") + + class Address(Base): + __tablename__ = "addresses" + id = Column(Integer, primary_key=True) + email = Column(String(50)) + user_id = Column(Integer, ForeignKey("users.id")) + + session = Session(engine) + + query = session.query(User) + query = query.with_hint( + selectable=User, text="@{FORCE_INDEX=table_1_by_int_idx}" + ) + + query = query.filter(User.name.in_(["val1", "val2"])) + query = query.join(Address) + + assert str(query.statement.compile(session.bind)) == EXPECTED_QUERY + + +class InterleavedTablesTest(fixtures.TestBase): + """ + Check that CREATE TABLE statements for interleaved tables are correctly + generated. + """ + + def setUp(self): + self._engine = create_engine( + "spanner:///projects/appdev-soda-spanner-staging/instances/" + "sqlalchemy-dialect-test/databases/compliance-test" + ) + self._metadata = MetaData() + + def test_interleave(self): + EXP_QUERY = ( + "\nCREATE TABLE client (\n\tteam_id INT64 NOT NULL, " + "\n\tclient_id INT64 NOT NULL, " + "\n\tclient_name STRING(16) NOT NULL" + "\n) PRIMARY KEY (team_id, client_id)," + "\nINTERLEAVE IN PARENT team\n\n" + ) + client = Table( + "client", + self._metadata, + Column("team_id", Integer, primary_key=True), + Column("client_id", Integer, primary_key=True), + Column("client_name", String(16), nullable=False), + spanner_interleave_in="team", + ) + with mock.patch("google.cloud.spanner_dbapi.cursor.Cursor.execute") as execute: + client.create(self._engine) + execute.assert_called_once_with(EXP_QUERY, []) + + def test_interleave_on_delete_cascade(self): + EXP_QUERY = ( + "\nCREATE TABLE client (\n\tteam_id INT64 NOT NULL, " + "\n\tclient_id INT64 NOT NULL, " + "\n\tclient_name STRING(16) NOT NULL" + "\n) PRIMARY KEY (team_id, client_id)," + "\nINTERLEAVE IN PARENT team ON DELETE CASCADE\n\n" + ) + client = Table( + "client", + self._metadata, + Column("team_id", Integer, primary_key=True), + Column("client_id", Integer, primary_key=True), + Column("client_name", String(16), nullable=False), + spanner_interleave_in="team", + spanner_interleave_on_delete_cascade=True, + ) + with mock.patch("google.cloud.spanner_dbapi.cursor.Cursor.execute") as execute: + client.create(self._engine) + execute.assert_called_once_with(EXP_QUERY, []) + + +class UserAgentTest(fixtures.TestBase): + """Check that SQLAlchemy dialect uses correct user agent.""" + + def setUp(self): + self._engine = create_engine( + "spanner:///projects/appdev-soda-spanner-staging/instances/" + "sqlalchemy-dialect-test/databases/compliance-test" + ) + self._metadata = MetaData() + + def test_user_agent(self): + with self._engine.connect() as connection: + assert ( + connection.connection.instance._client._client_info.user_agent + == f"gl-sqlalchemy-spanner/{sqlalchemy_spanner_version.__version__}" + ) + + +class SimpleUpdateDeleteTest(_SimpleUpdateDeleteTest): + """ + SPANNER OVERRIDE: + + Spanner doesn't support `rowcount` property. These + test cases overrides omit `rowcount` checks. + """ + + def test_delete(self, connection): + t = self.tables.plain_pk + r = connection.execute(t.delete().where(t.c.id == 2)) + assert not r.is_insert + assert not r.returns_rows + eq_( + connection.execute(t.select().order_by(t.c.id)).fetchall(), + [(1, "d1"), (3, "d3")], + ) + + def test_update(self, connection): + t = self.tables.plain_pk + r = connection.execute(t.update().where(t.c.id == 2), dict(data="d2_new")) + assert not r.is_insert + assert not r.returns_rows + + eq_( + connection.execute(t.select().order_by(t.c.id)).fetchall(), + [(1, "d1"), (2, "d2_new"), (3, "d3")], + ) + + +class HasIndexTest(_HasIndexTest): + __backend__ = True + kind = testing.combinations("dialect", "inspector", argnames="kind") + + @classmethod + def define_tables(cls, metadata): + tt = Table( + "test_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), + Column("data2", String(50)), + ) + sqlalchemy.Index("my_idx", tt.c.data) + + @kind + def test_has_index(self, kind, connection, metadata): + meth = self._has_index(kind, connection) + assert meth("test_table", "my_idx") + assert not meth("test_table", "my_idx_s") + assert not meth("nonexistent_table", "my_idx") + assert not meth("test_table", "nonexistent_idx") + + assert not meth("test_table", "my_idx_2") + assert not meth("test_table_2", "my_idx_3") + idx = Index("my_idx_2", self.tables.test_table.c.data2) + tbl = Table( + "test_table_2", + metadata, + Column("foo", Integer, primary_key=True), + Index("my_idx_3", "foo"), + ) + idx.create(connection) + tbl.create(connection) + + try: + if kind == "inspector": + assert not meth("test_table", "my_idx_2") + assert not meth("test_table_2", "my_idx_3") + meth.__self__.clear_cache() + connection.connection.commit() + assert meth("test_table", "my_idx_2") is True + assert meth("test_table_2", "my_idx_3") is True + finally: + tbl.drop(connection) + idx.drop(connection) + connection.connection.commit() + self.tables["test_table"].indexes.remove(idx) + + @pytest.mark.skip("Not supported by Cloud Spanner") + @kind + def test_has_index_schema(self, kind, connection): + pass + + +class HasTableTest(_HasTableTest): + @classmethod + def define_tables(cls, metadata): + Table( + "test_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), + ) + + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_table_nonexistent_schema(self): + pass + + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_table_schema(self): + pass + + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_table_cache(self): + pass + + @testing.requires.views + def test_has_table_view(self, connection): + pass + + @testing.requires.views + def test_has_table_view_schema(self, connection): + pass + + +class PostCompileParamsTest(_PostCompileParamsTest): + def test_execute(self): + table = self.tables.some_table + + stmt = select(table.c.id).where( + table.c.x == sqlalchemy.bindparam("q", literal_execute=True) + ) + + with self.sql_execution_asserter() as asserter: + with config.db.connect() as conn: + conn.execute(stmt, dict(q=10)) + + asserter.assert_( + sqlalchemy.testing.assertsql.CursorSQL( + "SELECT some_table.id \nFROM some_table " "\nWHERE some_table.x = 10", + [] if config.db.dialect.positional else {}, + ) + ) + + def test_execute_expanding_plus_literal_execute(self): + table = self.tables.some_table + + stmt = select(table.c.id).where( + table.c.x.in_( + sqlalchemy.bindparam("q", expanding=True, literal_execute=True) + ) + ) + + with self.sql_execution_asserter() as asserter: + with config.db.connect() as conn: + conn.execute(stmt, dict(q=[5, 6, 7])) + + asserter.assert_( + sqlalchemy.testing.assertsql.CursorSQL( + "SELECT some_table.id \nFROM some_table " + "\nWHERE some_table.x IN (5, 6, 7)", + [] if config.db.dialect.positional else {}, + ) + ) + + @testing.requires.tuple_in + def test_execute_tuple_expanding_plus_literal_execute(self): + table = self.tables.some_table + + stmt = select(table.c.id).where( + sqlalchemy.tuple_(table.c.x, table.c.y).in_( + sqlalchemy.bindparam("q", expanding=True, literal_execute=True) + ) + ) + + with self.sql_execution_asserter() as asserter: + with config.db.connect() as conn: + conn.execute(stmt, dict(q=[(5, 10), (12, 18)])) + + asserter.assert_( + sqlalchemy.testing.assertsql.CursorSQL( + "SELECT some_table.id \nFROM some_table " + "\nWHERE (some_table.x, some_table.y) " + "IN (%s(5, 10), (12, 18))" + % ("VALUES " if config.db.dialect.tuple_in_values else ""), + () if config.db.dialect.positional else {}, + ) + ) + + @testing.requires.tuple_in + def test_execute_tuple_expanding_plus_literal_heterogeneous_execute(self): + table = self.tables.some_table + + stmt = select(table.c.id).where( + sqlalchemy.tuple_(table.c.x, table.c.z).in_( + sqlalchemy.bindparam("q", expanding=True, literal_execute=True) + ) + ) + + with self.sql_execution_asserter() as asserter: + with config.db.connect() as conn: + conn.execute(stmt, dict(q=[(5, "z1"), (12, "z3")])) + + asserter.assert_( + sqlalchemy.testing.assertsql.CursorSQL( + "SELECT some_table.id \nFROM some_table " + "\nWHERE (some_table.x, some_table.z) " + "IN (%s(5, 'z1'), (12, 'z3'))" + % ("VALUES " if config.db.dialect.tuple_in_values else ""), + () if config.db.dialect.positional else {}, + ) + ) + + +@pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" +) +class JSONTest(_JSONTest): + @pytest.mark.skip("Values without keys are not supported.") + def test_single_element_round_trip(self, element): + pass + + def _test_round_trip(self, data_element, connection): + data_table = self.tables.data_table + + connection.execute( + data_table.insert(), + {"id": random.randint(1, 100000000), "name": "row1", "data": data_element}, + ) + + row = connection.execute(select(data_table.c.data)).first() + + eq_(row, (data_element,)) + + def test_unicode_round_trip(self): + # note we include Unicode supplementary characters as well + with config.db.connect() as conn: + conn.execute( + self.tables.data_table.insert(), + { + "id": random.randint(1, 100000000), + "name": "r1", + "data": { + "réve🐍 illé": "réve🐍 illé", + "data": {"k1": "drôl🐍e"}, + }, + }, + ) + + eq_( + conn.scalar(select(self.tables.data_table.c.data)), + { + "réve🐍 illé": "réve🐍 illé", + "data": {"k1": "drôl🐍e"}, + }, + ) + + @pytest.mark.skip("Parameterized types are not supported.") + def test_eval_none_flag_orm(self): + pass + + @pytest.mark.skip( + "Spanner JSON_VALUE() always returns STRING," + "thus, this test case can't be executed." + ) + def test_index_typed_comparison(self): + pass + + @pytest.mark.skip( + "Spanner JSON_VALUE() always returns STRING," + "thus, this test case can't be executed." + ) + def test_path_typed_comparison(self): + pass + + @pytest.mark.skip("Custom JSON de-/serializers are not supported.") + def test_round_trip_custom_json(self): + pass + + def _index_fixtures(fn): + fn = testing.combinations( + ("boolean", True), + ("boolean", False), + ("boolean", None), + ("string", "some string"), + ("string", None), + ("integer", 15), + ("integer", 1), + ("integer", 0), + ("integer", None), + ("float", 28.5), + ("float", None), + id_="sa", + )(fn) + return fn + + @_index_fixtures + def test_index_typed_access(self, datatype, value): + data_table = self.tables.data_table + data_element = {"key1": value} + with config.db.connect() as conn: + conn.execute( + data_table.insert(), + { + "id": random.randint(1, 100000000), + "name": "row1", + "data": data_element, + "nulldata": data_element, + }, + ) + + expr = data_table.c.data["key1"] + expr = getattr(expr, "as_%s" % datatype)() + + roundtrip = conn.scalar(select(expr)) + if roundtrip in ("true", "false", None): + roundtrip = str(roundtrip).capitalize() + + eq_(str(roundtrip), str(value)) + + @pytest.mark.skip( + "Spanner doesn't support type casts inside JSON_VALUE() function." + ) + def test_round_trip_json_null_as_json_null(self): + pass + + @pytest.mark.skip( + "Spanner doesn't support type casts inside JSON_VALUE() function." + ) + def test_round_trip_none_as_json_null(self): + pass + + @pytest.mark.skip( + "Spanner doesn't support type casts inside JSON_VALUE() function." + ) + def test_round_trip_none_as_sql_null(self): + pass + + +class ExecutionOptionsRequestPriorotyTest(fixtures.TestBase): + def setUp(self): + self._engine = create_engine(get_db_url(), pool_size=1) + metadata = MetaData() + + self._table = Table( + "execution_options2", + metadata, + Column("opt_id", Integer, primary_key=True), + Column("opt_name", String(16), nullable=False), + ) + + metadata.create_all(self._engine) + time.sleep(1) + + def test_request_priority(self): + PRIORITY = RequestOptions.Priority.PRIORITY_MEDIUM + with self._engine.connect().execution_options( + request_priority=PRIORITY + ) as connection: + connection.execute(select(self._table)).fetchall() + + with self._engine.connect() as connection: + assert connection.connection.request_priority is None + + engine = create_engine("sqlite:///database") + with engine.connect() as connection: + pass + + +class CreateEngineWithClientObjectTest(fixtures.TestBase): + def test_create_engine_w_valid_client_object(self): + """ + SPANNER TEST: + + Check that we can connect to SqlAlchemy + by passing custom Client object. + """ + client = Client(project=get_project()) + engine = create_engine(get_db_url(), connect_args={"client": client}) + with engine.connect() as connection: + assert connection.connection.instance._client == client + + def test_create_engine_w_invalid_client_object(self): + """ + SPANNER TEST: + + Check that if project id in url and custom Client + Object passed to enginer mismatch, error is thrown. + """ + client = Client(project="project_id") + engine = create_engine(get_db_url(), connect_args={"client": client}) + + with pytest.raises(ValueError): + engine.connect() + + +class CreateEngineWithoutDatabaseTest(fixtures.TestBase): + def test_create_engine_wo_database(self): + """ + SPANNER TEST: + + Check that we can connect to SqlAlchemy + without passing database id in the + connection URL. + """ + engine = create_engine(get_db_url().split("/database")[0]) + with engine.connect() as connection: + assert connection.connection.database is None + + +class ReturningTest(fixtures.TestBase): + def setUp(self): + self._engine = create_engine(get_db_url()) + metadata = MetaData() + + self._table = Table( + "returning_test", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(16), nullable=False), + ) + + metadata.create_all(self._engine) + + def test_returning_for_insert_and_update(self): + random_id = random.randint(1, 1000) + with self._engine.begin() as connection: + stmt = ( + self._table.insert() + .values(id=random_id, data="some % value") + .returning(self._table.c.id) + ) + row = connection.execute(stmt).fetchall() + eq_( + row, + [(random_id,)], + ) + + with self._engine.begin() as connection: + update_text = "some + value" + stmt = ( + self._table.update() + .values(data=update_text) + .where(self._table.c.id == random_id) + .returning(self._table.c.data) + ) + row = connection.execute(stmt).fetchall() + eq_( + row, + [(update_text,)], + ) + + +@pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" +) +class SequenceTest(_SequenceTest): + @classmethod + def define_tables(cls, metadata): + Table( + "seq_pk", + metadata, + Column( + "id", + Integer, + sqlalchemy.Sequence("tab_id_seq"), + primary_key=True, + ), + Column("data", String(50)), + ) + + Table( + "seq_opt_pk", + metadata, + Column( + "id", + Integer, + sqlalchemy.Sequence("tab_id_seq_opt", data_type=Integer, optional=True), + primary_key=True, + ), + Column("data", String(50)), + ) + + Table( + "seq_no_returning", + metadata, + Column( + "id", + Integer, + sqlalchemy.Sequence("noret_id_seq"), + primary_key=True, + ), + Column("data", String(50)), + implicit_returning=False, + ) + + def test_insert_lastrowid(self, connection): + r = connection.execute(self.tables.seq_pk.insert(), dict(data="some data")) + assert len(r.inserted_primary_key) == 1 + is_instance_of(r.inserted_primary_key[0], int) + + def test_nextval_direct(self, connection): + r = connection.execute(self.tables.seq_pk.c.id.default) + is_instance_of(r, int) + + def _assert_round_trip(self, table, conn): + row = conn.execute(table.select()).first() + id, name = row + is_instance_of(id, int) + eq_(name, "some data") + + @testing.combinations((True,), (False,), argnames="implicit_returning") + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_insert_roundtrip_translate(self, connection, implicit_returning): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_nextval_direct_schema_translate(self, connection): + pass + + +@pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" +) +class HasSequenceTest(_HasSequenceTest): + @classmethod + def define_tables(cls, metadata): + sqlalchemy.Sequence("user_id_seq", metadata=metadata) + sqlalchemy.Sequence( + "other_seq", metadata=metadata, nomaxvalue=True, nominvalue=True + ) + Table( + "user_id_table", + metadata, + Column("id", Integer, primary_key=True), + ) + + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_sequence_cache(self, connection, metadata): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_sequence_schema(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_sequence_schemas_neg(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_sequence_default_not_in_remote(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_has_sequence_remote_not_in_default(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_get_sequence_names_no_sequence_schema(self, connection): + pass + + @testing.requires.schemas + @pytest.mark.skip("Not supported by Cloud Spanner") + def test_get_sequence_names_sequences_schema(self, connection): + pass + + +@pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" +) +class HasSequenceTestEmpty(_HasSequenceTestEmpty): + def test_get_sequence_names_no_sequence(self, connection): + super().test_get_sequence_names_no_sequence(connection) diff --git a/packages/sqlalchemy-spanner/test/unit/__init__.py b/packages/sqlalchemy-spanner/test/unit/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/sqlalchemy-spanner/test/unit/test_alembic.py b/packages/sqlalchemy-spanner/test/unit/test_alembic.py new file mode 100644 index 000000000000..75e395618f25 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/unit/test_alembic.py @@ -0,0 +1,97 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from alembic.ddl import base as ddl_base +from google.cloud.sqlalchemy_spanner import sqlalchemy_spanner +from sqlalchemy import String, TextClause +from sqlalchemy.testing import eq_ +from sqlalchemy.testing.plugin.plugin_base import fixtures + + +class TestAlembicTest(fixtures.TestBase): + def test_visit_column_nullable_with_not_null_column(self): + ddl = sqlalchemy_spanner.visit_column_nullable( + ddl_base.ColumnNullable( + name="tbl", column_name="col", nullable=False, existing_type=String(256) + ), + sqlalchemy_spanner.SpannerDDLCompiler( + sqlalchemy_spanner.SpannerDialect(), None + ), + ) + eq_(ddl, "ALTER TABLE tbl ALTER COLUMN col STRING(256) NOT NULL") + + def test_visit_column_nullable_with_nullable_column(self): + ddl = sqlalchemy_spanner.visit_column_nullable( + ddl_base.ColumnNullable( + name="tbl", column_name="col", nullable=True, existing_type=String(256) + ), + sqlalchemy_spanner.SpannerDDLCompiler( + sqlalchemy_spanner.SpannerDialect(), None + ), + ) + eq_(ddl, "ALTER TABLE tbl ALTER COLUMN col STRING(256)") + + def test_visit_column_nullable_with_default(self): + ddl = sqlalchemy_spanner.visit_column_nullable( + ddl_base.ColumnNullable( + name="tbl", + column_name="col", + nullable=False, + existing_type=String(256), + existing_server_default=TextClause("GENERATE_UUID()"), + ), + sqlalchemy_spanner.SpannerDDLCompiler( + sqlalchemy_spanner.SpannerDialect(), None + ), + ) + eq_( + ddl, + "ALTER TABLE tbl " + "ALTER COLUMN col " + "STRING(256) NOT NULL DEFAULT (GENERATE_UUID())", + ) + + def test_visit_column_type(self): + ddl = sqlalchemy_spanner.visit_column_type( + ddl_base.ColumnType( + name="tbl", + column_name="col", + type_=String(256), + existing_nullable=True, + ), + sqlalchemy_spanner.SpannerDDLCompiler( + sqlalchemy_spanner.SpannerDialect(), None + ), + ) + eq_(ddl, "ALTER TABLE tbl ALTER COLUMN col STRING(256)") + + def test_visit_column_type_with_default(self): + ddl = sqlalchemy_spanner.visit_column_type( + ddl_base.ColumnType( + name="tbl", + column_name="col", + type_=String(256), + existing_nullable=False, + existing_server_default=TextClause("GENERATE_UUID()"), + ), + sqlalchemy_spanner.SpannerDDLCompiler( + sqlalchemy_spanner.SpannerDialect(), None + ), + ) + eq_( + ddl, + "ALTER TABLE tbl " + "ALTER COLUMN col " + "STRING(256) NOT NULL DEFAULT (GENERATE_UUID())", + ) diff --git a/packages/sqlalchemy-spanner/test/unit/test_opentelemetry_tracing.py b/packages/sqlalchemy-spanner/test/unit/test_opentelemetry_tracing.py new file mode 100644 index 000000000000..21ea1bb78bc2 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/unit/test_opentelemetry_tracing.py @@ -0,0 +1,116 @@ +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +import importlib +import mock +import pytest +import sys + +try: + from opentelemetry import trace as trace_api + from opentelemetry.trace.status import StatusCode +except ImportError: + pass + +from google.api_core.exceptions import GoogleAPICallError +from google.cloud.spanner_v1 import SpannerClient +from google.cloud.sqlalchemy_spanner import _opentelemetry_tracing +from test._helpers import OpenTelemetryBase, HAS_OPENTELEMETRY_INSTALLED + + +def _make_rpc_error(error_cls, trailing_metadata=None): + import grpc + + grpc_error = mock.create_autospec(grpc.Call, instance=True) + grpc_error.trailing_metadata.return_value = trailing_metadata + return error_cls("error", errors=(grpc_error,)) + + +# Skip all of these tests if we don't have OpenTelemetry +if HAS_OPENTELEMETRY_INSTALLED: + + class NoTracingTest(OpenTelemetryBase): + def setup_method(self): + self._temp_opentelemetry = sys.modules["opentelemetry"] + + sys.modules["opentelemetry"] = None + importlib.reload(_opentelemetry_tracing) + + def teardown_method(self): + super(NoTracingTest, self).teardown_method() + sys.modules["opentelemetry"] = self._temp_opentelemetry + importlib.reload(_opentelemetry_tracing) + + def test_no_trace_call(self): + with _opentelemetry_tracing.trace_call("Test") as no_span: + assert no_span is None + + class TracingTest(OpenTelemetryBase): + def test_trace_call(self): + extra_attributes = { + "attribute1": "value1", + # Since our database is mocked, we have to override the + # db.instance parameter so it is a string. + "db.instance": "database_name", + } + + expected_attributes = { + "db.type": "spanner", + "db.engine": "sqlalchemy_spanner", + "db.url": SpannerClient.DEFAULT_ENDPOINT, + "net.host.name": SpannerClient.DEFAULT_ENDPOINT, + } + expected_attributes.update(extra_attributes) + + with _opentelemetry_tracing.trace_call( + "CloudSpannerSqlAlchemy.Test", extra_attributes + ) as span: + span.set_attribute("after_setup_attribute", 1) + + expected_attributes["after_setup_attribute"] = 1 + + span_list = self.ot_exporter.get_finished_spans() + assert len(span_list) == 1 + + span = span_list[0] + assert span.kind == trace_api.SpanKind.CLIENT + span_attr = dict(span.attributes) + for key in expected_attributes: + assert key in span_attr + assert span_attr[key] == expected_attributes[key] + assert span.name == "CloudSpannerSqlAlchemy.Test" + assert span.status.status_code == StatusCode.OK + + def test_trace_error(self): + extra_attributes = {"db.instance": "database_name"} + + expected_attributes = { + "db.type": "spanner", + "db.engine": "sqlalchemy_spanner", + "db.url": SpannerClient.DEFAULT_ENDPOINT, + "net.host.name": SpannerClient.DEFAULT_ENDPOINT, + } + expected_attributes.update(extra_attributes) + + with pytest.raises(GoogleAPICallError): + with _opentelemetry_tracing.trace_call( + "CloudSpannerSqlAlchemy.Test", + extra_attributes, + ) as span: + from google.api_core.exceptions import InvalidArgument + + raise _make_rpc_error(InvalidArgument) + + span_list = self.ot_exporter.get_finished_spans() + assert len(span_list) == 1 + span = span_list[0] + assert span.kind == trace_api.SpanKind.CLIENT + span_attr = dict(span.attributes) + for key in expected_attributes: + assert key in span_attr + assert span_attr[key] == expected_attributes[key] + assert span.name == "CloudSpannerSqlAlchemy.Test" + assert span.status.status_code == StatusCode.ERROR diff --git a/packages/sqlalchemy-spanner/test/unit/test_packaging.py b/packages/sqlalchemy-spanner/test/unit/test_packaging.py new file mode 100644 index 000000000000..c05f60426361 --- /dev/null +++ b/packages/sqlalchemy-spanner/test/unit/test_packaging.py @@ -0,0 +1,37 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess +import sys + + +def test_namespace_package_compat(tmp_path): + # The ``google`` namespace package should not be masked + # by the presence of ``sqlalchemy-spanner``. + google = tmp_path / "google" + google.mkdir() + google.joinpath("othermod.py").write_text("") + env = dict(os.environ, PYTHONPATH=str(tmp_path)) + cmd = [sys.executable, "-m", "google.othermod"] + subprocess.check_call(cmd, env=env) + + # The ``google.cloud`` namespace package should not be masked + # by the presence of ``sqlalchemy-spanner``. + google_cloud = tmp_path / "google" / "cloud" + google_cloud.mkdir() + google_cloud.joinpath("othermod.py").write_text("") + env = dict(os.environ, PYTHONPATH=str(tmp_path)) + cmd = [sys.executable, "-m", "google.cloud.othermod"] + subprocess.check_call(cmd, env=env) diff --git a/packages/sqlalchemy-spanner/test_migration_env.py b/packages/sqlalchemy-spanner/test_migration_env.py new file mode 100644 index 000000000000..55919c31655b --- /dev/null +++ b/packages/sqlalchemy-spanner/test_migration_env.py @@ -0,0 +1,97 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logging.config import fileConfig + +from alembic import context +from alembic.ddl.impl import DefaultImpl +from sqlalchemy import engine_from_config, pool + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +target_metadata = None + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +class SpannerImpl(DefaultImpl): + __dialect__ = "spanner+spanner" + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + version_table_pk=False, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + connectable = engine_from_config( + config.get_section(config.config_ini_section), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + with connectable.connect() as connection: + context.configure( + connection=connection, + target_metadata=target_metadata, + version_table_pk=False, + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/scripts/split_repo_migration/single-library.git-migrate-history.sh b/scripts/split_repo_migration/single-library.git-migrate-history.sh index 650abac5ca26..b110af51fe97 100755 --- a/scripts/split_repo_migration/single-library.git-migrate-history.sh +++ b/scripts/split_repo_migration/single-library.git-migrate-history.sh @@ -70,7 +70,7 @@ echo "Created working directory: ${WORKDIR}" pushd "${WORKDIR}" # cd into workdir echo "Cloning source repository: ${SOURCE_REPO}" -git clone --recurse-submodules --recurse-submodules "git@github.com:${SOURCE_REPO}.git" source-repo +git clone --recurse-submodules --recurse-submodules --recurse-submodules "git@github.com:${SOURCE_REPO}.git" source-repo pushd source-repo @@ -124,7 +124,7 @@ git filter-branch \ --force \ --prune-empty \ --tree-filter \ - "git submodule update --init --recursive; find . -mindepth 2 -name .git -exec rm -rf {} +; git submodule update --init --recursive; find . -mindepth 2 -name .git -exec rm -rf {} +; shopt -s dotglob; mkdir -p ${WORKDIR}/migrated-source; mv * ${WORKDIR}/migrated-source; mkdir -p ${TARGET_PATH}; { mv ${WORKDIR}/migrated-source/* ${TARGET_PATH} || echo 'No files to move' ; }" + "git submodule update --init --recursive; find . -mindepth 2 -name .git -exec rm -rf {} +; git submodule update --init --recursive; find . -mindepth 2 -name .git -exec rm -rf {} +; git submodule update --init --recursive; find . -mindepth 2 -name .git -exec rm -rf {} +; shopt -s dotglob; mkdir -p ${WORKDIR}/migrated-source; mv * ${WORKDIR}/migrated-source; mkdir -p ${TARGET_PATH}; { mv ${WORKDIR}/migrated-source/* ${TARGET_PATH} || echo 'No files to move' ; }" # back to workdir popd @@ -142,7 +142,7 @@ echo "Success" popd # back to workdir # Do a diff between source code split repo and migrated code. -git clone --recurse-submodules --recurse-submodules "git@github.com:${SOURCE_REPO}.git" source-repo-validation # Not ideal to clone again. +git clone --recurse-submodules --recurse-submodules --recurse-submodules "git@github.com:${SOURCE_REPO}.git" source-repo-validation # Not ideal to clone again. find source-repo-validation -name .git -exec rm -rf {} + # That folder is not needed for validation. DIFF_FILE="${WORKDIR}/diff.txt"