From 4d3066d3587b0f44076784685809b8be3274b1af Mon Sep 17 00:00:00 2001 From: azure-sdk Date: Tue, 20 Jan 2026 18:33:11 +0000 Subject: [PATCH] Configurations: 'specification/postgresql/DBforPostgreSQL.Management/tspconfig.yaml', API Version: 2026-01-01-preview, SDK Release Type: beta, and CommitSHA: '74fd6c09ef3546c7997e174cb67002b5b273f381' in SpecRepo: 'https://github.com/Azure/azure-rest-api-specs' Pipeline run: https://dev.azure.com/azure-sdk/internal/_build/results?buildId=5775030 Refer to https://eng.ms/docs/products/azure-developer-experience/develop/sdk-release/sdk-release-prerequisites to prepare for SDK release. --- .../azure-mgmt-postgresql/CHANGELOG.md | 7 + sdk/postgresql/azure-mgmt-postgresql/LICENSE | 21 + .../azure-mgmt-postgresql/MANIFEST.in | 7 + .../azure-mgmt-postgresql/README.md | 61 + .../azure-mgmt-postgresql/_metadata.json | 7 + .../apiview-properties.json | 314 + .../azure-mgmt-postgresql/azure/__init__.py | 1 + .../azure/mgmt/__init__.py | 1 + .../azure/mgmt/postgresql/__init__.py | 32 + .../azure/mgmt/postgresql/_client.py | 265 + .../azure/mgmt/postgresql/_configuration.py | 80 + .../azure/mgmt/postgresql/_patch.py | 21 + .../azure/mgmt/postgresql/_utils/__init__.py | 6 + .../mgmt/postgresql/_utils/model_base.py | 1327 ++ .../mgmt/postgresql/_utils/serialization.py | 2041 +++ .../azure/mgmt/postgresql/_validation.py | 66 + .../azure/mgmt/postgresql/_version.py | 9 + .../azure/mgmt/postgresql/aio/__init__.py | 29 + .../azure/mgmt/postgresql/aio/_client.py | 269 + .../mgmt/postgresql/aio/_configuration.py | 80 + .../azure/mgmt/postgresql/aio/_patch.py | 21 + .../postgresql/aio/operations/__init__.py | 69 + .../postgresql/aio/operations/_operations.py | 9588 +++++++++++++ .../mgmt/postgresql/aio/operations/_patch.py | 21 + .../azure/mgmt/postgresql/models/__init__.py | 378 + .../azure/mgmt/postgresql/models/_enums.py | 674 + .../azure/mgmt/postgresql/models/_models.py | 5941 ++++++++ .../azure/mgmt/postgresql/models/_patch.py | 21 + .../mgmt/postgresql/operations/__init__.py | 69 + .../mgmt/postgresql/operations/_operations.py | 11393 ++++++++++++++++ .../mgmt/postgresql/operations/_patch.py | 21 + .../azure/mgmt/postgresql/py.typed | 1 + .../dev_requirements.txt | 5 + .../administrators_microsoft_entra_add.py | 50 + .../administrators_microsoft_entra_delete.py | 42 + .../administrators_microsoft_entra_get.py | 43 + ...strators_microsoft_entra_list_by_server.py | 43 + ...at_protection_settings_create_or_update.py | 44 + ...advanced_threat_protection_settings_get.py | 43 + ...reat_protection_settings_list_by_server.py | 43 + .../backups_automatic_and_on_demand_create.py | 43 + .../backups_automatic_and_on_demand_delete.py | 42 + .../backups_automatic_and_on_demand_get.py | 43 + ..._automatic_and_on_demand_list_by_server.py | 43 + ...long_term_retention_check_prerequisites.py | 43 + .../backups_long_term_retention_get.py | 43 + ...kups_long_term_retention_list_by_server.py | 43 + .../backups_long_term_retention_start.py | 43 + .../capabilities_by_location_list.py | 42 + .../capabilities_by_server_list.py | 43 + .../captured_logs_list_by_server.py | 43 + .../generated_samples/configurations_get.py | 43 + .../configurations_list_by_server.py | 43 + .../configurations_update.py | 44 + .../configurations_update_using_put.py | 44 + .../generated_samples/databases_create.py | 44 + .../generated_samples/databases_delete.py | 42 + .../generated_samples/databases_get.py | 43 + .../databases_list_by_server.py | 43 + .../firewall_rules_create_or_update.py | 44 + .../firewall_rules_delete.py | 42 + .../generated_samples/firewall_rules_get.py | 43 + .../firewall_rules_list_by_server.py | 43 + .../generated_samples/migrations_cancel.py | 43 + .../migrations_check_name_availability.py | 43 + .../generated_samples/migrations_create.py | 59 + ...ate_other_source_types_validate_migrate.py | 61 + .../migrations_create_validate_only.py | 60 + ...create_with_fully_qualified_domain_name.py | 58 + .../migrations_create_with_other_users.py | 60 + ...ns_create_with_private_endpoint_servers.py | 60 + .../migrations_create_with_roles.py | 60 + .../generated_samples/migrations_get.py | 43 + ...ith_successful_validation_and_migration.py | 43 + ...essful_validation_but_migration_failure.py | 43 + ...gration_with_successful_validation_only.py | 43 + ..._get_migration_with_validation_failures.py | 43 + .../migrations_list_by_target_server.py | 43 + .../generated_samples/migrations_update.py | 44 + .../name_availability_check_globally.py | 41 + .../name_availability_check_with_location.py | 42 + .../generated_samples/operations_list.py | 40 + .../private_dns_zone_suffix_get.py | 39 + .../private_endpoint_connections_delete.py | 42 + .../private_endpoint_connections_get.py | 43 + .../private_endpoint_connections_list.py | 43 + .../private_endpoint_connections_update.py | 51 + .../private_link_resources_get.py | 43 + .../private_link_resources_list.py | 43 + .../quota_usages_for_flexible_servers.py | 42 + .../replicas_list_by_server.py | 43 + .../servers_cluster_create.py | 57 + ...eo_restore_with_data_encryption_enabled.py | 65 + ...ith_data_encryption_enabled_auto_update.py | 65 + ...k_with_zone_redundant_high_availability.py | 59 + ...etwork_with_same_zone_high_availability.py | 62 + .../servers_create_point_in_time_restore.py | 51 + .../servers_create_replica.py | 64 + .../servers_create_revive_dropped.py | 51 + ...ers_create_with_data_encryption_enabled.py | 74 + ...ith_data_encryption_enabled_auto_update.py | 74 + ...rtual_network_without_high_availability.py | 67 + .../generated_samples/servers_delete.py | 41 + .../generated_samples/servers_get.py | 42 + .../servers_get_with_private_endpoints.py | 42 + .../servers_get_with_vnet.py | 42 + .../servers_list_by_resource_group.py | 42 + .../servers_list_by_subscription.py | 40 + .../servers_migrate_network_mode.py | 42 + ...ote_replica_as_forced_standalone_server.py | 43 + ...rs_promote_replica_as_forced_switchover.py | 43 + ...te_replica_as_planned_standalone_server.py | 43 + ...s_promote_replica_as_planned_switchover.py | 43 + .../generated_samples/servers_restart.py | 41 + .../servers_restart_with_failover.py | 41 + .../generated_samples/servers_start.py | 41 + .../generated_samples/servers_stop.py | 41 + .../generated_samples/servers_update.py | 51 + ...s_update_with_custom_maintenance_window.py | 48 + ...ers_update_with_data_encryption_enabled.py | 65 + ...ith_data_encryption_enabled_auto_update.py | 65 + ...rvers_update_with_major_version_upgrade.py | 43 + ...ers_update_with_microsoft_entra_enabled.py | 56 + .../generated_samples/tuning_options_get.py | 43 + .../tuning_options_list_by_server.py | 43 + ...ning_options_list_index_recommendations.py | 44 + ...commendations_filtered_for_create_index.py | 44 + ...ning_options_list_table_recommendations.py | 44 + ...ommendations_filtered_for_analyze_table.py | 44 + .../virtual_endpoint_create.py | 44 + .../virtual_endpoint_delete.py | 42 + .../virtual_endpoint_update.py | 44 + .../virtual_endpoints_get.py | 43 + .../virtual_endpoints_list_by_server.py | 43 + .../virtual_network_subnet_usage_list.py | 45 + .../generated_tests/conftest.py | 39 + ...ministrators_microsoft_entra_operations.py | 67 + ...rators_microsoft_entra_operations_async.py | 72 + ...d_threat_protection_settings_operations.py | 42 + ...at_protection_settings_operations_async.py | 43 + ...kups_automatic_and_on_demand_operations.py | 66 + ...utomatic_and_on_demand_operations_async.py | 71 + ..._backups_long_term_retention_operations.py | 66 + ...ps_long_term_retention_operations_async.py | 69 + ...ent_capabilities_by_location_operations.py | 29 + ...pabilities_by_location_operations_async.py | 30 + ...ement_capabilities_by_server_operations.py | 30 + ...capabilities_by_server_operations_async.py | 31 + ...sql_management_captured_logs_operations.py | 30 + ...nagement_captured_logs_operations_async.py | 31 + ...ql_management_configurations_operations.py | 96 + ...agement_configurations_operations_async.py | 101 + ...gre_sql_management_databases_operations.py | 80 + ...l_management_databases_operations_async.py | 85 + ...ql_management_firewall_rules_operations.py | 80 + ...agement_firewall_rules_operations_async.py | 85 + ...re_sql_management_migrations_operations.py | 218 + ..._management_migrations_operations_async.py | 219 + ...management_name_availability_operations.py | 40 + ...ment_name_availability_operations_async.py | 41 + .../test_postgre_sql_management_operations.py | 27 + ...postgre_sql_management_operations_async.py | 28 + ...ment_private_dns_zone_suffix_operations.py | 27 + ...rivate_dns_zone_suffix_operations_async.py | 28 + ...private_endpoint_connections_operations.py | 89 + ...e_endpoint_connections_operations_async.py | 94 + ...ement_private_link_resources_operations.py | 42 + ...private_link_resources_operations_async.py | 43 + ..._sql_management_quota_usages_operations.py | 29 + ...anagement_quota_usages_operations_async.py | 30 + ...tgre_sql_management_replicas_operations.py | 30 + ...ql_management_replicas_operations_async.py | 31 + ...r_threat_protection_settings_operations.py | 45 + ...at_protection_settings_operations_async.py | 48 + ...stgre_sql_management_servers_operations.py | 280 + ...sql_management_servers_operations_async.py | 295 + ...ql_management_tuning_options_operations.py | 54 + ...agement_tuning_options_operations_async.py | 55 + ...management_virtual_endpoints_operations.py | 93 + ...ment_virtual_endpoints_operations_async.py | 100 + ...virtual_network_subnet_usage_operations.py | 30 + ...l_network_subnet_usage_operations_async.py | 31 + .../azure-mgmt-postgresql/pyproject.toml | 86 + .../azure-mgmt-postgresql/tsp-location.yaml | 4 + sdk/postgresql/ci.yml | 34 + 185 files changed, 41042 insertions(+) create mode 100644 sdk/postgresql/azure-mgmt-postgresql/CHANGELOG.md create mode 100644 sdk/postgresql/azure-mgmt-postgresql/LICENSE create mode 100644 sdk/postgresql/azure-mgmt-postgresql/MANIFEST.in create mode 100644 sdk/postgresql/azure-mgmt-postgresql/README.md create mode 100644 sdk/postgresql/azure-mgmt-postgresql/_metadata.json create mode 100644 sdk/postgresql/azure-mgmt-postgresql/apiview-properties.json create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/__init__.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/__init__.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/__init__.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_client.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_configuration.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_patch.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_utils/__init__.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_utils/model_base.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_utils/serialization.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_validation.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_version.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/__init__.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/_client.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/_configuration.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/_patch.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/operations/__init__.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/operations/_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/operations/_patch.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/__init__.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/_enums.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/_models.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/_patch.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/operations/__init__.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/operations/_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/operations/_patch.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/py.typed create mode 100644 sdk/postgresql/azure-mgmt-postgresql/dev_requirements.txt create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_add.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_delete.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_list_by_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/advanced_threat_protection_settings_create_or_update.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/advanced_threat_protection_settings_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/advanced_threat_protection_settings_list_by_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_create.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_delete.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_list_by_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_check_prerequisites.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_list_by_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_start.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/capabilities_by_location_list.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/capabilities_by_server_list.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/captured_logs_list_by_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_list_by_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_update.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_update_using_put.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_create.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_delete.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_list_by_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_create_or_update.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_delete.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_list_by_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_cancel.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_check_name_availability.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_other_source_types_validate_migrate.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_validate_only.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_fully_qualified_domain_name.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_other_users.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_private_endpoint_servers.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_roles.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_successful_validation_and_migration.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_successful_validation_but_migration_failure.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_successful_validation_only.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_validation_failures.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_list_by_target_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_update.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/name_availability_check_globally.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/name_availability_check_with_location.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/operations_list.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_dns_zone_suffix_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_delete.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_list.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_update.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_link_resources_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_link_resources_list.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/quota_usages_for_flexible_servers.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/replicas_list_by_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_cluster_create.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_geo_restore_with_data_encryption_enabled.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_geo_restore_with_data_encryption_enabled_auto_update.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_in_microsoft_owned_virtual_network_with_zone_redundant_high_availability.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_in_your_own_virtual_network_with_same_zone_high_availability.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_point_in_time_restore.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_replica.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_revive_dropped.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_with_data_encryption_enabled.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_with_data_encryption_enabled_auto_update.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_with_microsoft_entra_enabled_in_your_own_virtual_network_without_high_availability.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_delete.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_get_with_private_endpoints.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_get_with_vnet.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_list_by_resource_group.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_list_by_subscription.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_migrate_network_mode.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_forced_standalone_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_forced_switchover.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_planned_standalone_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_planned_switchover.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_restart.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_restart_with_failover.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_start.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_stop.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_custom_maintenance_window.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_data_encryption_enabled.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_data_encryption_enabled_auto_update.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_major_version_upgrade.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_microsoft_entra_enabled.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_by_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_index_recommendations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_index_recommendations_filtered_for_create_index.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_table_recommendations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_table_recommendations_filtered_for_analyze_table.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoint_create.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoint_delete.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoint_update.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoints_get.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoints_list_by_server.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_network_subnet_usage_list.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/conftest.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_administrators_microsoft_entra_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_administrators_microsoft_entra_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_advanced_threat_protection_settings_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_advanced_threat_protection_settings_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_automatic_and_on_demand_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_automatic_and_on_demand_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_long_term_retention_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_long_term_retention_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_location_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_location_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_server_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_server_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_captured_logs_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_captured_logs_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_configurations_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_configurations_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_databases_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_databases_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_firewall_rules_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_firewall_rules_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_migrations_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_migrations_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_name_availability_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_name_availability_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_dns_zone_suffix_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_dns_zone_suffix_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_endpoint_connections_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_endpoint_connections_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_link_resources_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_link_resources_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_quota_usages_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_quota_usages_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_replicas_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_replicas_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_server_threat_protection_settings_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_server_threat_protection_settings_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_servers_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_servers_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_tuning_options_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_tuning_options_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_endpoints_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_endpoints_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_network_subnet_usage_operations.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_network_subnet_usage_operations_async.py create mode 100644 sdk/postgresql/azure-mgmt-postgresql/pyproject.toml create mode 100644 sdk/postgresql/azure-mgmt-postgresql/tsp-location.yaml create mode 100644 sdk/postgresql/ci.yml diff --git a/sdk/postgresql/azure-mgmt-postgresql/CHANGELOG.md b/sdk/postgresql/azure-mgmt-postgresql/CHANGELOG.md new file mode 100644 index 000000000000..9567247f5196 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/CHANGELOG.md @@ -0,0 +1,7 @@ +# Release History + +## 1.0.0b1 (2026-01-20) + +### Other Changes + + - Initial version \ No newline at end of file diff --git a/sdk/postgresql/azure-mgmt-postgresql/LICENSE b/sdk/postgresql/azure-mgmt-postgresql/LICENSE new file mode 100644 index 000000000000..63447fd8bbbf --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) Microsoft Corporation. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/sdk/postgresql/azure-mgmt-postgresql/MANIFEST.in b/sdk/postgresql/azure-mgmt-postgresql/MANIFEST.in new file mode 100644 index 000000000000..7a94095a1239 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/MANIFEST.in @@ -0,0 +1,7 @@ +include *.md +include LICENSE +include azure/mgmt/postgresql/py.typed +recursive-include tests *.py +recursive-include samples *.py *.md +include azure/__init__.py +include azure/mgmt/__init__.py diff --git a/sdk/postgresql/azure-mgmt-postgresql/README.md b/sdk/postgresql/azure-mgmt-postgresql/README.md new file mode 100644 index 000000000000..9d73fecd0f4d --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/README.md @@ -0,0 +1,61 @@ +# Microsoft Azure SDK for Python + +This is the Microsoft Azure Postgresql Management Client Library. +This package has been tested with Python 3.9+. +For a more complete view of Azure libraries, see the [azure sdk python release](https://aka.ms/azsdk/python/all). + +## _Disclaimer_ + +_Azure SDK Python packages support for Python 2.7 has ended 01 January 2022. For more information and questions, please refer to https://github.com/Azure/azure-sdk-for-python/issues/20691_ + +## Getting started + +### Prerequisites + +- Python 3.9+ is required to use this package. +- [Azure subscription](https://azure.microsoft.com/free/) + +### Install the package + +```bash +pip install azure-mgmt-postgresql +pip install azure-identity +``` + +### Authentication + +By default, [Azure Active Directory](https://aka.ms/awps/aad) token authentication depends on correct configuration of the following environment variables. + +- `AZURE_CLIENT_ID` for Azure client ID. +- `AZURE_TENANT_ID` for Azure tenant ID. +- `AZURE_CLIENT_SECRET` for Azure client secret. + +In addition, Azure subscription ID can be configured via environment variable `AZURE_SUBSCRIPTION_ID`. + +With above configuration, client can be authenticated by following code: + +```python +from azure.identity import DefaultAzureCredential +from azure.mgmt.postgresql import PostgreSQLManagementClient +import os + +sub_id = os.getenv("AZURE_SUBSCRIPTION_ID") +client = PostgreSQLManagementClient(credential=DefaultAzureCredential(), subscription_id=sub_id) +``` + +## Examples + +Code samples for this package can be found at: +- [Search Postgresql Management](https://docs.microsoft.com/samples/browse/?languages=python&term=Getting%20started%20-%20Managing&terms=Getting%20started%20-%20Managing) on docs.microsoft.com +- [Azure Python Mgmt SDK Samples Repo](https://aka.ms/azsdk/python/mgmt/samples) + + +## Troubleshooting + +## Next steps + +## Provide Feedback + +If you encounter any bugs or have suggestions, please file an issue in the +[Issues](https://github.com/Azure/azure-sdk-for-python/issues) +section of the project. diff --git a/sdk/postgresql/azure-mgmt-postgresql/_metadata.json b/sdk/postgresql/azure-mgmt-postgresql/_metadata.json new file mode 100644 index 000000000000..07dafd86a259 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/_metadata.json @@ -0,0 +1,7 @@ +{ + "apiVersion": "2026-01-01-preview", + "commit": "74fd6c09ef3546c7997e174cb67002b5b273f381", + "repository_url": "https://github.com/Azure/azure-rest-api-specs", + "typespec_src": "specification/postgresql/DBforPostgreSQL.Management", + "emitterVersion": "0.57.1" +} \ No newline at end of file diff --git a/sdk/postgresql/azure-mgmt-postgresql/apiview-properties.json b/sdk/postgresql/azure-mgmt-postgresql/apiview-properties.json new file mode 100644 index 000000000000..c60998c0a67f --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/apiview-properties.json @@ -0,0 +1,314 @@ +{ + "CrossLanguagePackageId": "Microsoft.DBforPostgreSQL", + "CrossLanguageDefinitionId": { + "azure.mgmt.postgresql.models.AdminCredentials": "Microsoft.DBforPostgreSQL.AdminCredentials", + "azure.mgmt.postgresql.models.AdminCredentialsForPatch": "Microsoft.DBforPostgreSQL.AdminCredentialsForPatch", + "azure.mgmt.postgresql.models.Resource": "Azure.ResourceManager.CommonTypes.Resource", + "azure.mgmt.postgresql.models.ProxyResource": "Azure.ResourceManager.CommonTypes.ProxyResource", + "azure.mgmt.postgresql.models.AdministratorMicrosoftEntra": "Microsoft.DBforPostgreSQL.AdministratorMicrosoftEntra", + "azure.mgmt.postgresql.models.AdministratorMicrosoftEntraAdd": "Microsoft.DBforPostgreSQL.AdministratorMicrosoftEntraAdd", + "azure.mgmt.postgresql.models.AdministratorMicrosoftEntraProperties": "Microsoft.DBforPostgreSQL.AdministratorMicrosoftEntraProperties", + "azure.mgmt.postgresql.models.AdministratorMicrosoftEntraPropertiesForAdd": "Microsoft.DBforPostgreSQL.AdministratorMicrosoftEntraPropertiesForAdd", + "azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel": "Microsoft.DBforPostgreSQL.AdvancedThreatProtectionSettingsModel", + "azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsProperties": "Microsoft.DBforPostgreSQL.AdvancedThreatProtectionSettingsProperties", + "azure.mgmt.postgresql.models.AuthConfig": "Microsoft.DBforPostgreSQL.AuthConfig", + "azure.mgmt.postgresql.models.AuthConfigForPatch": "Microsoft.DBforPostgreSQL.AuthConfigForPatch", + "azure.mgmt.postgresql.models.Backup": "Microsoft.DBforPostgreSQL.Backup", + "azure.mgmt.postgresql.models.BackupAutomaticAndOnDemand": "Microsoft.DBforPostgreSQL.BackupAutomaticAndOnDemand", + "azure.mgmt.postgresql.models.BackupAutomaticAndOnDemandProperties": "Microsoft.DBforPostgreSQL.BackupAutomaticAndOnDemandProperties", + "azure.mgmt.postgresql.models.BackupForPatch": "Microsoft.DBforPostgreSQL.BackupForPatch", + "azure.mgmt.postgresql.models.BackupRequestBase": "Microsoft.DBforPostgreSQL.BackupRequestBase", + "azure.mgmt.postgresql.models.BackupSettings": "Microsoft.DBforPostgreSQL.BackupSettings", + "azure.mgmt.postgresql.models.BackupsLongTermRetentionOperation": "Microsoft.DBforPostgreSQL.BackupsLongTermRetentionOperation", + "azure.mgmt.postgresql.models.BackupsLongTermRetentionRequest": "Microsoft.DBforPostgreSQL.BackupsLongTermRetentionRequest", + "azure.mgmt.postgresql.models.BackupsLongTermRetentionResponse": "Microsoft.DBforPostgreSQL.BackupsLongTermRetentionResponse", + "azure.mgmt.postgresql.models.BackupsLongTermRetentionResponseProperties": "Microsoft.DBforPostgreSQL.BackupsLongTermRetentionResponseProperties", + "azure.mgmt.postgresql.models.BackupStoreDetails": "Microsoft.DBforPostgreSQL.BackupStoreDetails", + "azure.mgmt.postgresql.models.CapabilityBase": "Microsoft.DBforPostgreSQL.CapabilityBase", + "azure.mgmt.postgresql.models.Capability": "Microsoft.DBforPostgreSQL.Capability", + "azure.mgmt.postgresql.models.CapturedLog": "Microsoft.DBforPostgreSQL.CapturedLog", + "azure.mgmt.postgresql.models.CapturedLogProperties": "Microsoft.DBforPostgreSQL.CapturedLogProperties", + "azure.mgmt.postgresql.models.CheckNameAvailabilityRequest": "Azure.ResourceManager.CommonTypes.CheckNameAvailabilityRequest", + "azure.mgmt.postgresql.models.CheckNameAvailabilityResponse": "Azure.ResourceManager.CommonTypes.CheckNameAvailabilityResponse", + "azure.mgmt.postgresql.models.Cluster": "Microsoft.DBforPostgreSQL.Cluster", + "azure.mgmt.postgresql.models.Configuration": "Microsoft.DBforPostgreSQL.Configuration", + "azure.mgmt.postgresql.models.ConfigurationForUpdate": "Microsoft.DBforPostgreSQL.ConfigurationForUpdate", + "azure.mgmt.postgresql.models.ConfigurationProperties": "Microsoft.DBforPostgreSQL.ConfigurationProperties", + "azure.mgmt.postgresql.models.Database": "Microsoft.DBforPostgreSQL.Database", + "azure.mgmt.postgresql.models.DatabaseMigrationState": "Microsoft.DBforPostgreSQL.DatabaseMigrationState", + "azure.mgmt.postgresql.models.DatabaseProperties": "Microsoft.DBforPostgreSQL.DatabaseProperties", + "azure.mgmt.postgresql.models.DataEncryption": "Microsoft.DBforPostgreSQL.DataEncryption", + "azure.mgmt.postgresql.models.DbLevelValidationStatus": "Microsoft.DBforPostgreSQL.DbLevelValidationStatus", + "azure.mgmt.postgresql.models.DbServerMetadata": "Microsoft.DBforPostgreSQL.DbServerMetadata", + "azure.mgmt.postgresql.models.DelegatedSubnetUsage": "Microsoft.DBforPostgreSQL.DelegatedSubnetUsage", + "azure.mgmt.postgresql.models.ErrorAdditionalInfo": "Azure.ResourceManager.CommonTypes.ErrorAdditionalInfo", + "azure.mgmt.postgresql.models.ErrorDetail": "Azure.ResourceManager.CommonTypes.ErrorDetail", + "azure.mgmt.postgresql.models.ErrorResponse": "Azure.ResourceManager.CommonTypes.ErrorResponse", + "azure.mgmt.postgresql.models.FastProvisioningEditionCapability": "Microsoft.DBforPostgreSQL.FastProvisioningEditionCapability", + "azure.mgmt.postgresql.models.FirewallRule": "Microsoft.DBforPostgreSQL.FirewallRule", + "azure.mgmt.postgresql.models.FirewallRuleProperties": "Microsoft.DBforPostgreSQL.FirewallRuleProperties", + "azure.mgmt.postgresql.models.HighAvailability": "Microsoft.DBforPostgreSQL.HighAvailability", + "azure.mgmt.postgresql.models.HighAvailabilityForPatch": "Microsoft.DBforPostgreSQL.HighAvailabilityForPatch", + "azure.mgmt.postgresql.models.ImpactRecord": "Microsoft.DBforPostgreSQL.ImpactRecord", + "azure.mgmt.postgresql.models.LtrBackupOperationResponseProperties": "Microsoft.DBforPostgreSQL.LtrBackupOperationResponseProperties", + "azure.mgmt.postgresql.models.LtrPreBackupRequest": "Microsoft.DBforPostgreSQL.LtrPreBackupRequest", + "azure.mgmt.postgresql.models.LtrPreBackupResponse": "Microsoft.DBforPostgreSQL.LtrPreBackupResponse", + "azure.mgmt.postgresql.models.MaintenanceWindow": "Microsoft.DBforPostgreSQL.MaintenanceWindow", + "azure.mgmt.postgresql.models.MaintenanceWindowForPatch": "Microsoft.DBforPostgreSQL.MaintenanceWindowForPatch", + "azure.mgmt.postgresql.models.MigrateNetworkStatus": "Microsoft.DBforPostgreSQL.MigrateNetworkStatus", + "azure.mgmt.postgresql.models.TrackedResource": "Azure.ResourceManager.CommonTypes.TrackedResource", + "azure.mgmt.postgresql.models.Migration": "Microsoft.DBforPostgreSQL.Migration", + "azure.mgmt.postgresql.models.MigrationNameAvailability": "Microsoft.DBforPostgreSQL.MigrationNameAvailability", + "azure.mgmt.postgresql.models.MigrationProperties": "Microsoft.DBforPostgreSQL.MigrationProperties", + "azure.mgmt.postgresql.models.MigrationPropertiesForPatch": "Microsoft.DBforPostgreSQL.MigrationPropertiesForPatch", + "azure.mgmt.postgresql.models.MigrationResourceForPatch": "Microsoft.DBforPostgreSQL.MigrationResourceForPatch", + "azure.mgmt.postgresql.models.MigrationSecretParameters": "Microsoft.DBforPostgreSQL.MigrationSecretParameters", + "azure.mgmt.postgresql.models.MigrationSecretParametersForPatch": "Microsoft.DBforPostgreSQL.MigrationSecretParametersForPatch", + "azure.mgmt.postgresql.models.MigrationStatus": "Microsoft.DBforPostgreSQL.MigrationStatus", + "azure.mgmt.postgresql.models.MigrationSubstateDetails": "Microsoft.DBforPostgreSQL.MigrationSubstateDetails", + "azure.mgmt.postgresql.models.NameAvailabilityModel": "Microsoft.DBforPostgreSQL.NameAvailabilityModel", + "azure.mgmt.postgresql.models.NameProperty": "Microsoft.DBforPostgreSQL.NameProperty", + "azure.mgmt.postgresql.models.Network": "Microsoft.DBforPostgreSQL.Network", + "azure.mgmt.postgresql.models.ObjectRecommendation": "Microsoft.DBforPostgreSQL.ObjectRecommendation", + "azure.mgmt.postgresql.models.ObjectRecommendationDetails": "Microsoft.DBforPostgreSQL.ObjectRecommendationDetails", + "azure.mgmt.postgresql.models.ObjectRecommendationProperties": "Microsoft.DBforPostgreSQL.ObjectRecommendationProperties", + "azure.mgmt.postgresql.models.ObjectRecommendationPropertiesAnalyzedWorkload": "Microsoft.DBforPostgreSQL.ObjectRecommendationPropertiesAnalyzedWorkload", + "azure.mgmt.postgresql.models.ObjectRecommendationPropertiesImplementationDetails": "Microsoft.DBforPostgreSQL.ObjectRecommendationPropertiesImplementationDetails", + "azure.mgmt.postgresql.models.Operation": "Microsoft.DBforPostgreSQL.Operation", + "azure.mgmt.postgresql.models.OperationDisplay": "Microsoft.DBforPostgreSQL.OperationDisplay", + "azure.mgmt.postgresql.models.PrivateEndpoint": "Azure.ResourceManager.CommonTypes.PrivateEndpoint", + "azure.mgmt.postgresql.models.PrivateEndpointConnection": "Microsoft.DBforPostgreSQL.PrivateEndpointConnection", + "azure.mgmt.postgresql.models.PrivateEndpointConnectionProperties": "Azure.ResourceManager.CommonTypes.PrivateEndpointConnectionProperties", + "azure.mgmt.postgresql.models.PrivateLinkResource": "Microsoft.DBforPostgreSQL.PrivateLinkResource", + "azure.mgmt.postgresql.models.PrivateLinkResourceProperties": "Azure.ResourceManager.CommonTypes.PrivateLinkResourceProperties", + "azure.mgmt.postgresql.models.PrivateLinkServiceConnectionState": "Azure.ResourceManager.CommonTypes.PrivateLinkServiceConnectionState", + "azure.mgmt.postgresql.models.QuotaUsage": "Microsoft.DBforPostgreSQL.QuotaUsage", + "azure.mgmt.postgresql.models.Replica": "Microsoft.DBforPostgreSQL.Replica", + "azure.mgmt.postgresql.models.RestartParameter": "Microsoft.DBforPostgreSQL.RestartParameter", + "azure.mgmt.postgresql.models.Server": "Microsoft.DBforPostgreSQL.Server", + "azure.mgmt.postgresql.models.ServerEditionCapability": "Microsoft.DBforPostgreSQL.ServerEditionCapability", + "azure.mgmt.postgresql.models.ServerForPatch": "Microsoft.DBforPostgreSQL.ServerForPatch", + "azure.mgmt.postgresql.models.ServerProperties": "Microsoft.DBforPostgreSQL.ServerProperties", + "azure.mgmt.postgresql.models.ServerPropertiesForPatch": "Microsoft.DBforPostgreSQL.ServerPropertiesForPatch", + "azure.mgmt.postgresql.models.ServerSku": "Microsoft.DBforPostgreSQL.ServerSku", + "azure.mgmt.postgresql.models.ServerSkuCapability": "Microsoft.DBforPostgreSQL.ServerSkuCapability", + "azure.mgmt.postgresql.models.ServerVersionCapability": "Microsoft.DBforPostgreSQL.ServerVersionCapability", + "azure.mgmt.postgresql.models.Sku": "Microsoft.DBforPostgreSQL.Sku", + "azure.mgmt.postgresql.models.SkuForPatch": "Microsoft.DBforPostgreSQL.SkuForPatch", + "azure.mgmt.postgresql.models.Storage": "Microsoft.DBforPostgreSQL.Storage", + "azure.mgmt.postgresql.models.StorageEditionCapability": "Microsoft.DBforPostgreSQL.StorageEditionCapability", + "azure.mgmt.postgresql.models.StorageMbCapability": "Microsoft.DBforPostgreSQL.StorageMbCapability", + "azure.mgmt.postgresql.models.StorageTierCapability": "Microsoft.DBforPostgreSQL.StorageTierCapability", + "azure.mgmt.postgresql.models.SupportedFeature": "Microsoft.DBforPostgreSQL.SupportedFeature", + "azure.mgmt.postgresql.models.SystemData": "Azure.ResourceManager.CommonTypes.SystemData", + "azure.mgmt.postgresql.models.TuningOptions": "Microsoft.DBforPostgreSQL.TuningOptions", + "azure.mgmt.postgresql.models.UserAssignedIdentity": "Microsoft.DBforPostgreSQL.UserAssignedIdentity", + "azure.mgmt.postgresql.models.UserIdentity": "Microsoft.DBforPostgreSQL.UserIdentity", + "azure.mgmt.postgresql.models.ValidationDetails": "Microsoft.DBforPostgreSQL.ValidationDetails", + "azure.mgmt.postgresql.models.ValidationMessage": "Microsoft.DBforPostgreSQL.ValidationMessage", + "azure.mgmt.postgresql.models.ValidationSummaryItem": "Microsoft.DBforPostgreSQL.ValidationSummaryItem", + "azure.mgmt.postgresql.models.VirtualEndpoint": "Microsoft.DBforPostgreSQL.VirtualEndpoint", + "azure.mgmt.postgresql.models.VirtualEndpointResourceForPatch": "Microsoft.DBforPostgreSQL.VirtualEndpointResourceForPatch", + "azure.mgmt.postgresql.models.VirtualEndpointResourceProperties": "Microsoft.DBforPostgreSQL.VirtualEndpointResourceProperties", + "azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageModel": "Microsoft.DBforPostgreSQL.VirtualNetworkSubnetUsageModel", + "azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageParameter": "Microsoft.DBforPostgreSQL.VirtualNetworkSubnetUsageParameter", + "azure.mgmt.postgresql.models.OperationOrigin": "Microsoft.DBforPostgreSQL.OperationOrigin", + "azure.mgmt.postgresql.models.CreatedByType": "Azure.ResourceManager.CommonTypes.createdByType", + "azure.mgmt.postgresql.models.MigrationState": "Microsoft.DBforPostgreSQL.MigrationState", + "azure.mgmt.postgresql.models.MigrationSubstate": "Microsoft.DBforPostgreSQL.MigrationSubstate", + "azure.mgmt.postgresql.models.MigrationDatabaseState": "Microsoft.DBforPostgreSQL.MigrationDatabaseState", + "azure.mgmt.postgresql.models.ValidationState": "Microsoft.DBforPostgreSQL.ValidationState", + "azure.mgmt.postgresql.models.MigrationMode": "Microsoft.DBforPostgreSQL.MigrationMode", + "azure.mgmt.postgresql.models.MigrationOption": "Microsoft.DBforPostgreSQL.MigrationOption", + "azure.mgmt.postgresql.models.SourceType": "Microsoft.DBforPostgreSQL.SourceType", + "azure.mgmt.postgresql.models.SslMode": "Microsoft.DBforPostgreSQL.SslMode", + "azure.mgmt.postgresql.models.SkuTier": "Microsoft.DBforPostgreSQL.SkuTier", + "azure.mgmt.postgresql.models.LogicalReplicationOnSourceServer": "Microsoft.DBforPostgreSQL.LogicalReplicationOnSourceServer", + "azure.mgmt.postgresql.models.OverwriteDatabasesOnTargetServer": "Microsoft.DBforPostgreSQL.OverwriteDatabasesOnTargetServer", + "azure.mgmt.postgresql.models.MigrateRolesAndPermissions": "Microsoft.DBforPostgreSQL.MigrateRolesAndPermissions", + "azure.mgmt.postgresql.models.StartDataMigration": "Microsoft.DBforPostgreSQL.StartDataMigration", + "azure.mgmt.postgresql.models.TriggerCutover": "Microsoft.DBforPostgreSQL.TriggerCutover", + "azure.mgmt.postgresql.models.Cancel": "Microsoft.DBforPostgreSQL.Cancel", + "azure.mgmt.postgresql.models.MigrationListFilter": "Microsoft.DBforPostgreSQL.MigrationListFilter", + "azure.mgmt.postgresql.models.MigrationNameAvailabilityReason": "Microsoft.DBforPostgreSQL.MigrationNameAvailabilityReason", + "azure.mgmt.postgresql.models.PostgresMajorVersion": "Microsoft.DBforPostgreSQL.PostgresMajorVersion", + "azure.mgmt.postgresql.models.ServerState": "Microsoft.DBforPostgreSQL.ServerState", + "azure.mgmt.postgresql.models.StorageAutoGrow": "Microsoft.DBforPostgreSQL.StorageAutoGrow", + "azure.mgmt.postgresql.models.AzureManagedDiskPerformanceTier": "Microsoft.DBforPostgreSQL.AzureManagedDiskPerformanceTier", + "azure.mgmt.postgresql.models.StorageType": "Microsoft.DBforPostgreSQL.StorageType", + "azure.mgmt.postgresql.models.MicrosoftEntraAuth": "Microsoft.DBforPostgreSQL.MicrosoftEntraAuth", + "azure.mgmt.postgresql.models.PasswordBasedAuth": "Microsoft.DBforPostgreSQL.PasswordBasedAuth", + "azure.mgmt.postgresql.models.DataEncryptionType": "Microsoft.DBforPostgreSQL.DataEncryptionType", + "azure.mgmt.postgresql.models.EncryptionKeyStatus": "Microsoft.DBforPostgreSQL.EncryptionKeyStatus", + "azure.mgmt.postgresql.models.GeographicallyRedundantBackup": "Microsoft.DBforPostgreSQL.GeographicallyRedundantBackup", + "azure.mgmt.postgresql.models.ServerPublicNetworkAccessState": "Microsoft.DBforPostgreSQL.ServerPublicNetworkAccessState", + "azure.mgmt.postgresql.models.PostgreSqlFlexibleServerHighAvailabilityMode": "Microsoft.DBforPostgreSQL.PostgreSqlFlexibleServerHighAvailabilityMode", + "azure.mgmt.postgresql.models.HighAvailabilityState": "Microsoft.DBforPostgreSQL.HighAvailabilityState", + "azure.mgmt.postgresql.models.ReplicationRole": "Microsoft.DBforPostgreSQL.ReplicationRole", + "azure.mgmt.postgresql.models.ReplicationState": "Microsoft.DBforPostgreSQL.ReplicationState", + "azure.mgmt.postgresql.models.ReadReplicaPromoteMode": "Microsoft.DBforPostgreSQL.ReadReplicaPromoteMode", + "azure.mgmt.postgresql.models.ReadReplicaPromoteOption": "Microsoft.DBforPostgreSQL.ReadReplicaPromoteOption", + "azure.mgmt.postgresql.models.CreateMode": "Microsoft.DBforPostgreSQL.CreateMode", + "azure.mgmt.postgresql.models.PrivateEndpointServiceConnectionStatus": "Azure.ResourceManager.CommonTypes.PrivateEndpointServiceConnectionStatus", + "azure.mgmt.postgresql.models.PrivateEndpointConnectionProvisioningState": "Azure.ResourceManager.CommonTypes.PrivateEndpointConnectionProvisioningState", + "azure.mgmt.postgresql.models.IdentityType": "Microsoft.DBforPostgreSQL.IdentityType", + "azure.mgmt.postgresql.models.CreateModeForPatch": "Microsoft.DBforPostgreSQL.CreateModeForPatch", + "azure.mgmt.postgresql.models.FailoverMode": "Microsoft.DBforPostgreSQL.FailoverMode", + "azure.mgmt.postgresql.models.NetworkMigrationState": "Microsoft.DBforPostgreSQL.NetworkMigrationState", + "azure.mgmt.postgresql.models.ConfigurationDataType": "Microsoft.DBforPostgreSQL.ConfigurationDataType", + "azure.mgmt.postgresql.models.VirtualEndpointType": "Microsoft.DBforPostgreSQL.VirtualEndpointType", + "azure.mgmt.postgresql.models.PrincipalType": "Microsoft.DBforPostgreSQL.PrincipalType", + "azure.mgmt.postgresql.models.CapabilityStatus": "Microsoft.DBforPostgreSQL.CapabilityStatus", + "azure.mgmt.postgresql.models.HighAvailabilityMode": "Microsoft.DBforPostgreSQL.HighAvailabilityMode", + "azure.mgmt.postgresql.models.FeatureStatus": "Microsoft.DBforPostgreSQL.FeatureStatus", + "azure.mgmt.postgresql.models.FastProvisioningSupport": "Microsoft.DBforPostgreSQL.FastProvisioningSupport", + "azure.mgmt.postgresql.models.GeographicallyRedundantBackupSupport": "Microsoft.DBforPostgreSQL.GeographicallyRedundantBackupSupport", + "azure.mgmt.postgresql.models.ZoneRedundantHighAvailabilitySupport": "Microsoft.DBforPostgreSQL.ZoneRedundantHighAvailabilitySupport", + "azure.mgmt.postgresql.models.ZoneRedundantHighAvailabilityAndGeographicallyRedundantBackupSupport": "Microsoft.DBforPostgreSQL.ZoneRedundantHighAvailabilityAndGeographicallyRedundantBackupSupport", + "azure.mgmt.postgresql.models.StorageAutoGrowthSupport": "Microsoft.DBforPostgreSQL.StorageAutoGrowthSupport", + "azure.mgmt.postgresql.models.OnlineStorageResizeSupport": "Microsoft.DBforPostgreSQL.OnlineStorageResizeSupport", + "azure.mgmt.postgresql.models.LocationRestricted": "Microsoft.DBforPostgreSQL.LocationRestricted", + "azure.mgmt.postgresql.models.ExecutionStatus": "Microsoft.DBforPostgreSQL.ExecutionStatus", + "azure.mgmt.postgresql.models.ThreatProtectionState": "Microsoft.DBforPostgreSQL.ThreatProtectionState", + "azure.mgmt.postgresql.models.ThreatProtectionName": "Microsoft.DBforPostgreSQL.ThreatProtectionName", + "azure.mgmt.postgresql.models.BackupType": "Microsoft.DBforPostgreSQL.BackupType", + "azure.mgmt.postgresql.models.TuningOptionParameterEnum": "Microsoft.DBforPostgreSQL.TuningOptionParameterEnum", + "azure.mgmt.postgresql.models.RecommendationTypeEnum": "Microsoft.DBforPostgreSQL.RecommendationTypeEnum", + "azure.mgmt.postgresql.models.RecommendationTypeParameterEnum": "Microsoft.DBforPostgreSQL.RecommendationTypeParameterEnum", + "azure.mgmt.postgresql.models.CheckNameAvailabilityReason": "Azure.ResourceManager.CommonTypes.CheckNameAvailabilityReason", + "azure.mgmt.postgresql.operations.Operations.list": "Azure.ResourceManager.Legacy.Operations.list", + "azure.mgmt.postgresql.aio.operations.Operations.list": "Azure.ResourceManager.Legacy.Operations.list", + "azure.mgmt.postgresql.operations.MigrationsOperations.get": "Microsoft.DBforPostgreSQL.Migrations.get", + "azure.mgmt.postgresql.aio.operations.MigrationsOperations.get": "Microsoft.DBforPostgreSQL.Migrations.get", + "azure.mgmt.postgresql.operations.MigrationsOperations.create": "Microsoft.DBforPostgreSQL.Migrations.create", + "azure.mgmt.postgresql.aio.operations.MigrationsOperations.create": "Microsoft.DBforPostgreSQL.Migrations.create", + "azure.mgmt.postgresql.operations.MigrationsOperations.update": "Microsoft.DBforPostgreSQL.Migrations.update", + "azure.mgmt.postgresql.aio.operations.MigrationsOperations.update": "Microsoft.DBforPostgreSQL.Migrations.update", + "azure.mgmt.postgresql.operations.MigrationsOperations.cancel": "Microsoft.DBforPostgreSQL.Migrations.cancel", + "azure.mgmt.postgresql.aio.operations.MigrationsOperations.cancel": "Microsoft.DBforPostgreSQL.Migrations.cancel", + "azure.mgmt.postgresql.operations.MigrationsOperations.list_by_target_server": "Microsoft.DBforPostgreSQL.Migrations.listByTargetServer", + "azure.mgmt.postgresql.aio.operations.MigrationsOperations.list_by_target_server": "Microsoft.DBforPostgreSQL.Migrations.listByTargetServer", + "azure.mgmt.postgresql.operations.MigrationsOperations.check_name_availability": "Microsoft.DBforPostgreSQL.Servers.checkNameAvailability", + "azure.mgmt.postgresql.aio.operations.MigrationsOperations.check_name_availability": "Microsoft.DBforPostgreSQL.Servers.checkNameAvailability", + "azure.mgmt.postgresql.operations.ServersOperations.get": "Microsoft.DBforPostgreSQL.Servers.get", + "azure.mgmt.postgresql.aio.operations.ServersOperations.get": "Microsoft.DBforPostgreSQL.Servers.get", + "azure.mgmt.postgresql.operations.ServersOperations.begin_create_or_update": "Microsoft.DBforPostgreSQL.Servers.createOrUpdate", + "azure.mgmt.postgresql.aio.operations.ServersOperations.begin_create_or_update": "Microsoft.DBforPostgreSQL.Servers.createOrUpdate", + "azure.mgmt.postgresql.operations.ServersOperations.begin_update": "Microsoft.DBforPostgreSQL.Servers.update", + "azure.mgmt.postgresql.aio.operations.ServersOperations.begin_update": "Microsoft.DBforPostgreSQL.Servers.update", + "azure.mgmt.postgresql.operations.ServersOperations.begin_delete": "Microsoft.DBforPostgreSQL.Servers.delete", + "azure.mgmt.postgresql.aio.operations.ServersOperations.begin_delete": "Microsoft.DBforPostgreSQL.Servers.delete", + "azure.mgmt.postgresql.operations.ServersOperations.list_by_resource_group": "Microsoft.DBforPostgreSQL.Servers.listByResourceGroup", + "azure.mgmt.postgresql.aio.operations.ServersOperations.list_by_resource_group": "Microsoft.DBforPostgreSQL.Servers.listByResourceGroup", + "azure.mgmt.postgresql.operations.ServersOperations.list_by_subscription": "Microsoft.DBforPostgreSQL.Servers.listBySubscription", + "azure.mgmt.postgresql.aio.operations.ServersOperations.list_by_subscription": "Microsoft.DBforPostgreSQL.Servers.listBySubscription", + "azure.mgmt.postgresql.operations.ServersOperations.begin_restart": "Microsoft.DBforPostgreSQL.Servers.restart", + "azure.mgmt.postgresql.aio.operations.ServersOperations.begin_restart": "Microsoft.DBforPostgreSQL.Servers.restart", + "azure.mgmt.postgresql.operations.ServersOperations.begin_start": "Microsoft.DBforPostgreSQL.Servers.serversStart", + "azure.mgmt.postgresql.aio.operations.ServersOperations.begin_start": "Microsoft.DBforPostgreSQL.Servers.serversStart", + "azure.mgmt.postgresql.operations.ServersOperations.begin_stop": "Microsoft.DBforPostgreSQL.Servers.stop", + "azure.mgmt.postgresql.aio.operations.ServersOperations.begin_stop": "Microsoft.DBforPostgreSQL.Servers.stop", + "azure.mgmt.postgresql.operations.ServersOperations.begin_migrate_network_mode": "Microsoft.DBforPostgreSQL.Servers.migrateNetworkMode", + "azure.mgmt.postgresql.aio.operations.ServersOperations.begin_migrate_network_mode": "Microsoft.DBforPostgreSQL.Servers.migrateNetworkMode", + "azure.mgmt.postgresql.operations.ConfigurationsOperations.get": "Microsoft.DBforPostgreSQL.Configurations.get", + "azure.mgmt.postgresql.aio.operations.ConfigurationsOperations.get": "Microsoft.DBforPostgreSQL.Configurations.get", + "azure.mgmt.postgresql.operations.ConfigurationsOperations.begin_put": "Microsoft.DBforPostgreSQL.Configurations.put", + "azure.mgmt.postgresql.aio.operations.ConfigurationsOperations.begin_put": "Microsoft.DBforPostgreSQL.Configurations.put", + "azure.mgmt.postgresql.operations.ConfigurationsOperations.begin_update": "Microsoft.DBforPostgreSQL.Configurations.update", + "azure.mgmt.postgresql.aio.operations.ConfigurationsOperations.begin_update": "Microsoft.DBforPostgreSQL.Configurations.update", + "azure.mgmt.postgresql.operations.ConfigurationsOperations.list_by_server": "Microsoft.DBforPostgreSQL.Configurations.listByServer", + "azure.mgmt.postgresql.aio.operations.ConfigurationsOperations.list_by_server": "Microsoft.DBforPostgreSQL.Configurations.listByServer", + "azure.mgmt.postgresql.operations.DatabasesOperations.get": "Microsoft.DBforPostgreSQL.Databases.get", + "azure.mgmt.postgresql.aio.operations.DatabasesOperations.get": "Microsoft.DBforPostgreSQL.Databases.get", + "azure.mgmt.postgresql.operations.DatabasesOperations.begin_create": "Microsoft.DBforPostgreSQL.Databases.create", + "azure.mgmt.postgresql.aio.operations.DatabasesOperations.begin_create": "Microsoft.DBforPostgreSQL.Databases.create", + "azure.mgmt.postgresql.operations.DatabasesOperations.begin_delete": "Microsoft.DBforPostgreSQL.Databases.delete", + "azure.mgmt.postgresql.aio.operations.DatabasesOperations.begin_delete": "Microsoft.DBforPostgreSQL.Databases.delete", + "azure.mgmt.postgresql.operations.DatabasesOperations.list_by_server": "Microsoft.DBforPostgreSQL.Databases.listByServer", + "azure.mgmt.postgresql.aio.operations.DatabasesOperations.list_by_server": "Microsoft.DBforPostgreSQL.Databases.listByServer", + "azure.mgmt.postgresql.operations.FirewallRulesOperations.get": "Microsoft.DBforPostgreSQL.FirewallRules.get", + "azure.mgmt.postgresql.aio.operations.FirewallRulesOperations.get": "Microsoft.DBforPostgreSQL.FirewallRules.get", + "azure.mgmt.postgresql.operations.FirewallRulesOperations.begin_create_or_update": "Microsoft.DBforPostgreSQL.FirewallRules.createOrUpdate", + "azure.mgmt.postgresql.aio.operations.FirewallRulesOperations.begin_create_or_update": "Microsoft.DBforPostgreSQL.FirewallRules.createOrUpdate", + "azure.mgmt.postgresql.operations.FirewallRulesOperations.begin_delete": "Microsoft.DBforPostgreSQL.FirewallRules.delete", + "azure.mgmt.postgresql.aio.operations.FirewallRulesOperations.begin_delete": "Microsoft.DBforPostgreSQL.FirewallRules.delete", + "azure.mgmt.postgresql.operations.FirewallRulesOperations.list_by_server": "Microsoft.DBforPostgreSQL.FirewallRules.listByServer", + "azure.mgmt.postgresql.aio.operations.FirewallRulesOperations.list_by_server": "Microsoft.DBforPostgreSQL.FirewallRules.listByServer", + "azure.mgmt.postgresql.operations.PrivateEndpointConnectionsOperations.get": "Microsoft.DBforPostgreSQL.PrivateEndpointConnections.get", + "azure.mgmt.postgresql.aio.operations.PrivateEndpointConnectionsOperations.get": "Microsoft.DBforPostgreSQL.PrivateEndpointConnections.get", + "azure.mgmt.postgresql.operations.PrivateEndpointConnectionsOperations.begin_update": "Microsoft.DBforPostgreSQL.PrivateEndpointConnections.update", + "azure.mgmt.postgresql.aio.operations.PrivateEndpointConnectionsOperations.begin_update": "Microsoft.DBforPostgreSQL.PrivateEndpointConnections.update", + "azure.mgmt.postgresql.operations.PrivateEndpointConnectionsOperations.begin_delete": "Microsoft.DBforPostgreSQL.PrivateEndpointConnections.delete", + "azure.mgmt.postgresql.aio.operations.PrivateEndpointConnectionsOperations.begin_delete": "Microsoft.DBforPostgreSQL.PrivateEndpointConnections.delete", + "azure.mgmt.postgresql.operations.PrivateEndpointConnectionsOperations.list_by_server": "Microsoft.DBforPostgreSQL.PrivateEndpointConnections.listByServer", + "azure.mgmt.postgresql.aio.operations.PrivateEndpointConnectionsOperations.list_by_server": "Microsoft.DBforPostgreSQL.PrivateEndpointConnections.listByServer", + "azure.mgmt.postgresql.operations.PrivateLinkResourcesOperations.get": "Microsoft.DBforPostgreSQL.PrivateLinkResources.get", + "azure.mgmt.postgresql.aio.operations.PrivateLinkResourcesOperations.get": "Microsoft.DBforPostgreSQL.PrivateLinkResources.get", + "azure.mgmt.postgresql.operations.PrivateLinkResourcesOperations.list_by_server": "Microsoft.DBforPostgreSQL.PrivateLinkResources.listByServer", + "azure.mgmt.postgresql.aio.operations.PrivateLinkResourcesOperations.list_by_server": "Microsoft.DBforPostgreSQL.PrivateLinkResources.listByServer", + "azure.mgmt.postgresql.operations.VirtualEndpointsOperations.get": "Microsoft.DBforPostgreSQL.VirtualEndpoints.get", + "azure.mgmt.postgresql.aio.operations.VirtualEndpointsOperations.get": "Microsoft.DBforPostgreSQL.VirtualEndpoints.get", + "azure.mgmt.postgresql.operations.VirtualEndpointsOperations.begin_create": "Microsoft.DBforPostgreSQL.VirtualEndpoints.create", + "azure.mgmt.postgresql.aio.operations.VirtualEndpointsOperations.begin_create": "Microsoft.DBforPostgreSQL.VirtualEndpoints.create", + "azure.mgmt.postgresql.operations.VirtualEndpointsOperations.begin_update": "Microsoft.DBforPostgreSQL.VirtualEndpoints.update", + "azure.mgmt.postgresql.aio.operations.VirtualEndpointsOperations.begin_update": "Microsoft.DBforPostgreSQL.VirtualEndpoints.update", + "azure.mgmt.postgresql.operations.VirtualEndpointsOperations.begin_delete": "Microsoft.DBforPostgreSQL.VirtualEndpoints.delete", + "azure.mgmt.postgresql.aio.operations.VirtualEndpointsOperations.begin_delete": "Microsoft.DBforPostgreSQL.VirtualEndpoints.delete", + "azure.mgmt.postgresql.operations.VirtualEndpointsOperations.list_by_server": "Microsoft.DBforPostgreSQL.VirtualEndpoints.listByServer", + "azure.mgmt.postgresql.aio.operations.VirtualEndpointsOperations.list_by_server": "Microsoft.DBforPostgreSQL.VirtualEndpoints.listByServer", + "azure.mgmt.postgresql.operations.AdministratorsMicrosoftEntraOperations.get": "Microsoft.DBforPostgreSQL.AdministratorMicrosoftEntras.get", + "azure.mgmt.postgresql.aio.operations.AdministratorsMicrosoftEntraOperations.get": "Microsoft.DBforPostgreSQL.AdministratorMicrosoftEntras.get", + "azure.mgmt.postgresql.operations.AdministratorsMicrosoftEntraOperations.begin_create_or_update": "Microsoft.DBforPostgreSQL.AdministratorMicrosoftEntras.createOrUpdate", + "azure.mgmt.postgresql.aio.operations.AdministratorsMicrosoftEntraOperations.begin_create_or_update": "Microsoft.DBforPostgreSQL.AdministratorMicrosoftEntras.createOrUpdate", + "azure.mgmt.postgresql.operations.AdministratorsMicrosoftEntraOperations.begin_delete": "Microsoft.DBforPostgreSQL.AdministratorMicrosoftEntras.delete", + "azure.mgmt.postgresql.aio.operations.AdministratorsMicrosoftEntraOperations.begin_delete": "Microsoft.DBforPostgreSQL.AdministratorMicrosoftEntras.delete", + "azure.mgmt.postgresql.operations.AdministratorsMicrosoftEntraOperations.list_by_server": "Microsoft.DBforPostgreSQL.AdministratorMicrosoftEntras.listByServer", + "azure.mgmt.postgresql.aio.operations.AdministratorsMicrosoftEntraOperations.list_by_server": "Microsoft.DBforPostgreSQL.AdministratorMicrosoftEntras.listByServer", + "azure.mgmt.postgresql.operations.CapabilitiesByServerOperations.list": "Microsoft.DBforPostgreSQL.Servers.list", + "azure.mgmt.postgresql.aio.operations.CapabilitiesByServerOperations.list": "Microsoft.DBforPostgreSQL.Servers.list", + "azure.mgmt.postgresql.operations.CapturedLogsOperations.list_by_server": "Microsoft.DBforPostgreSQL.Servers.listByServer", + "azure.mgmt.postgresql.aio.operations.CapturedLogsOperations.list_by_server": "Microsoft.DBforPostgreSQL.Servers.listByServer", + "azure.mgmt.postgresql.operations.BackupsLongTermRetentionOperations.check_prerequisites": "Microsoft.DBforPostgreSQL.Servers.checkPrerequisites", + "azure.mgmt.postgresql.aio.operations.BackupsLongTermRetentionOperations.check_prerequisites": "Microsoft.DBforPostgreSQL.Servers.checkPrerequisites", + "azure.mgmt.postgresql.operations.BackupsLongTermRetentionOperations.begin_start": "Microsoft.DBforPostgreSQL.Servers.start", + "azure.mgmt.postgresql.aio.operations.BackupsLongTermRetentionOperations.begin_start": "Microsoft.DBforPostgreSQL.Servers.start", + "azure.mgmt.postgresql.operations.BackupsLongTermRetentionOperations.get": "Microsoft.DBforPostgreSQL.BackupsLongTermRetentionOperations.get", + "azure.mgmt.postgresql.aio.operations.BackupsLongTermRetentionOperations.get": "Microsoft.DBforPostgreSQL.BackupsLongTermRetentionOperations.get", + "azure.mgmt.postgresql.operations.BackupsLongTermRetentionOperations.list_by_server": "Microsoft.DBforPostgreSQL.BackupsLongTermRetentionOperations.listByServer", + "azure.mgmt.postgresql.aio.operations.BackupsLongTermRetentionOperations.list_by_server": "Microsoft.DBforPostgreSQL.BackupsLongTermRetentionOperations.listByServer", + "azure.mgmt.postgresql.operations.ReplicasOperations.list_by_server": "Microsoft.DBforPostgreSQL.Servers.replicasListByServer", + "azure.mgmt.postgresql.aio.operations.ReplicasOperations.list_by_server": "Microsoft.DBforPostgreSQL.Servers.replicasListByServer", + "azure.mgmt.postgresql.operations.AdvancedThreatProtectionSettingsOperations.get": "Microsoft.DBforPostgreSQL.AdvancedThreatProtectionSettingsModels.get", + "azure.mgmt.postgresql.aio.operations.AdvancedThreatProtectionSettingsOperations.get": "Microsoft.DBforPostgreSQL.AdvancedThreatProtectionSettingsModels.get", + "azure.mgmt.postgresql.operations.AdvancedThreatProtectionSettingsOperations.list_by_server": "Microsoft.DBforPostgreSQL.AdvancedThreatProtectionSettingsModels.listByServer", + "azure.mgmt.postgresql.aio.operations.AdvancedThreatProtectionSettingsOperations.list_by_server": "Microsoft.DBforPostgreSQL.AdvancedThreatProtectionSettingsModels.listByServer", + "azure.mgmt.postgresql.operations.ServerThreatProtectionSettingsOperations.begin_create_or_update": "Microsoft.DBforPostgreSQL.AdvancedThreatProtectionSettingsModels.createOrUpdate", + "azure.mgmt.postgresql.aio.operations.ServerThreatProtectionSettingsOperations.begin_create_or_update": "Microsoft.DBforPostgreSQL.AdvancedThreatProtectionSettingsModels.createOrUpdate", + "azure.mgmt.postgresql.operations.BackupsAutomaticAndOnDemandOperations.get": "Microsoft.DBforPostgreSQL.BackupAutomaticAndOnDemands.get", + "azure.mgmt.postgresql.aio.operations.BackupsAutomaticAndOnDemandOperations.get": "Microsoft.DBforPostgreSQL.BackupAutomaticAndOnDemands.get", + "azure.mgmt.postgresql.operations.BackupsAutomaticAndOnDemandOperations.begin_create": "Microsoft.DBforPostgreSQL.BackupAutomaticAndOnDemands.create", + "azure.mgmt.postgresql.aio.operations.BackupsAutomaticAndOnDemandOperations.begin_create": "Microsoft.DBforPostgreSQL.BackupAutomaticAndOnDemands.create", + "azure.mgmt.postgresql.operations.BackupsAutomaticAndOnDemandOperations.begin_delete": "Microsoft.DBforPostgreSQL.BackupAutomaticAndOnDemands.delete", + "azure.mgmt.postgresql.aio.operations.BackupsAutomaticAndOnDemandOperations.begin_delete": "Microsoft.DBforPostgreSQL.BackupAutomaticAndOnDemands.delete", + "azure.mgmt.postgresql.operations.BackupsAutomaticAndOnDemandOperations.list_by_server": "Microsoft.DBforPostgreSQL.BackupAutomaticAndOnDemands.listByServer", + "azure.mgmt.postgresql.aio.operations.BackupsAutomaticAndOnDemandOperations.list_by_server": "Microsoft.DBforPostgreSQL.BackupAutomaticAndOnDemands.listByServer", + "azure.mgmt.postgresql.operations.TuningOptionsOperations.get": "Microsoft.DBforPostgreSQL.TuningOptionsOperationGroup.get", + "azure.mgmt.postgresql.aio.operations.TuningOptionsOperations.get": "Microsoft.DBforPostgreSQL.TuningOptionsOperationGroup.get", + "azure.mgmt.postgresql.operations.TuningOptionsOperations.list_by_server": "Microsoft.DBforPostgreSQL.TuningOptionsOperationGroup.listByServer", + "azure.mgmt.postgresql.aio.operations.TuningOptionsOperations.list_by_server": "Microsoft.DBforPostgreSQL.TuningOptionsOperationGroup.listByServer", + "azure.mgmt.postgresql.operations.TuningOptionsOperations.list_recommendations": "Microsoft.DBforPostgreSQL.TuningOptionsOperationGroup.listRecommendations", + "azure.mgmt.postgresql.aio.operations.TuningOptionsOperations.list_recommendations": "Microsoft.DBforPostgreSQL.TuningOptionsOperationGroup.listRecommendations", + "azure.mgmt.postgresql.operations.CapabilitiesByLocationOperations.list": "Microsoft.DBforPostgreSQL.CapabilitiesByLocationOperationGroup.list", + "azure.mgmt.postgresql.aio.operations.CapabilitiesByLocationOperations.list": "Microsoft.DBforPostgreSQL.CapabilitiesByLocationOperationGroup.list", + "azure.mgmt.postgresql.operations.NameAvailabilityOperations.check_globally": "Microsoft.DBforPostgreSQL.NameAvailabilityOperationGroup.checkGlobally", + "azure.mgmt.postgresql.aio.operations.NameAvailabilityOperations.check_globally": "Microsoft.DBforPostgreSQL.NameAvailabilityOperationGroup.checkGlobally", + "azure.mgmt.postgresql.operations.NameAvailabilityOperations.check_with_location": "Microsoft.DBforPostgreSQL.NameAvailabilityOperationGroup.checkWithLocation", + "azure.mgmt.postgresql.aio.operations.NameAvailabilityOperations.check_with_location": "Microsoft.DBforPostgreSQL.NameAvailabilityOperationGroup.checkWithLocation", + "azure.mgmt.postgresql.operations.PrivateDnsZoneSuffixOperations.get": "Microsoft.DBforPostgreSQL.PrivateDnsZoneSuffixOperationGroup.get", + "azure.mgmt.postgresql.aio.operations.PrivateDnsZoneSuffixOperations.get": "Microsoft.DBforPostgreSQL.PrivateDnsZoneSuffixOperationGroup.get", + "azure.mgmt.postgresql.operations.QuotaUsagesOperations.list": "Microsoft.DBforPostgreSQL.QuotaUsagesOperationGroup.list", + "azure.mgmt.postgresql.aio.operations.QuotaUsagesOperations.list": "Microsoft.DBforPostgreSQL.QuotaUsagesOperationGroup.list", + "azure.mgmt.postgresql.operations.VirtualNetworkSubnetUsageOperations.list": "Microsoft.DBforPostgreSQL.VirtualNetworkSubnetUsageOperationGroup.list", + "azure.mgmt.postgresql.aio.operations.VirtualNetworkSubnetUsageOperations.list": "Microsoft.DBforPostgreSQL.VirtualNetworkSubnetUsageOperationGroup.list" + } +} \ No newline at end of file diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/__init__.py b/sdk/postgresql/azure-mgmt-postgresql/azure/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/__init__.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/__init__.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/__init__.py new file mode 100644 index 000000000000..6534a56248e1 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/__init__.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import PostgreSQLManagementClient # type: ignore +from ._version import VERSION + +__version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "PostgreSQLManagementClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_client.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_client.py new file mode 100644 index 000000000000..1988dff1050d --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_client.py @@ -0,0 +1,265 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Optional, TYPE_CHECKING, cast +from typing_extensions import Self + +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.settings import settings +from azure.mgmt.core import ARMPipelineClient +from azure.mgmt.core.policies import ARMAutoResourceProviderRegistrationPolicy +from azure.mgmt.core.tools import get_arm_endpoints + +from ._configuration import PostgreSQLManagementClientConfiguration +from ._utils.serialization import Deserializer, Serializer +from .operations import ( + AdministratorsMicrosoftEntraOperations, + AdvancedThreatProtectionSettingsOperations, + BackupsAutomaticAndOnDemandOperations, + BackupsLongTermRetentionOperations, + CapabilitiesByLocationOperations, + CapabilitiesByServerOperations, + CapturedLogsOperations, + ConfigurationsOperations, + DatabasesOperations, + FirewallRulesOperations, + MigrationsOperations, + NameAvailabilityOperations, + Operations, + PrivateDnsZoneSuffixOperations, + PrivateEndpointConnectionsOperations, + PrivateLinkResourcesOperations, + QuotaUsagesOperations, + ReplicasOperations, + ServerThreatProtectionSettingsOperations, + ServersOperations, + TuningOptionsOperations, + VirtualEndpointsOperations, + VirtualNetworkSubnetUsageOperations, +) + +if TYPE_CHECKING: + from azure.core import AzureClouds + from azure.core.credentials import TokenCredential + + +class PostgreSQLManagementClient: # pylint: disable=too-many-instance-attributes + """The Azure Database for PostgreSQL management API provides create, read, update, and delete + functionality for Azure PostgreSQL resources including servers, databases, firewall rules, + network configuration, security alert policies, log files and configurations with new business + model. + + :ivar operations: Operations operations + :vartype operations: azure.mgmt.postgresql.operations.Operations + :ivar migrations: MigrationsOperations operations + :vartype migrations: azure.mgmt.postgresql.operations.MigrationsOperations + :ivar servers: ServersOperations operations + :vartype servers: azure.mgmt.postgresql.operations.ServersOperations + :ivar configurations: ConfigurationsOperations operations + :vartype configurations: azure.mgmt.postgresql.operations.ConfigurationsOperations + :ivar databases: DatabasesOperations operations + :vartype databases: azure.mgmt.postgresql.operations.DatabasesOperations + :ivar firewall_rules: FirewallRulesOperations operations + :vartype firewall_rules: azure.mgmt.postgresql.operations.FirewallRulesOperations + :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations + :vartype private_endpoint_connections: + azure.mgmt.postgresql.operations.PrivateEndpointConnectionsOperations + :ivar private_link_resources: PrivateLinkResourcesOperations operations + :vartype private_link_resources: + azure.mgmt.postgresql.operations.PrivateLinkResourcesOperations + :ivar virtual_endpoints: VirtualEndpointsOperations operations + :vartype virtual_endpoints: azure.mgmt.postgresql.operations.VirtualEndpointsOperations + :ivar administrators_microsoft_entra: AdministratorsMicrosoftEntraOperations operations + :vartype administrators_microsoft_entra: + azure.mgmt.postgresql.operations.AdministratorsMicrosoftEntraOperations + :ivar capabilities_by_server: CapabilitiesByServerOperations operations + :vartype capabilities_by_server: + azure.mgmt.postgresql.operations.CapabilitiesByServerOperations + :ivar captured_logs: CapturedLogsOperations operations + :vartype captured_logs: azure.mgmt.postgresql.operations.CapturedLogsOperations + :ivar backups_long_term_retention: BackupsLongTermRetentionOperations operations + :vartype backups_long_term_retention: + azure.mgmt.postgresql.operations.BackupsLongTermRetentionOperations + :ivar replicas: ReplicasOperations operations + :vartype replicas: azure.mgmt.postgresql.operations.ReplicasOperations + :ivar advanced_threat_protection_settings: AdvancedThreatProtectionSettingsOperations + operations + :vartype advanced_threat_protection_settings: + azure.mgmt.postgresql.operations.AdvancedThreatProtectionSettingsOperations + :ivar server_threat_protection_settings: ServerThreatProtectionSettingsOperations operations + :vartype server_threat_protection_settings: + azure.mgmt.postgresql.operations.ServerThreatProtectionSettingsOperations + :ivar backups_automatic_and_on_demand: BackupsAutomaticAndOnDemandOperations operations + :vartype backups_automatic_and_on_demand: + azure.mgmt.postgresql.operations.BackupsAutomaticAndOnDemandOperations + :ivar tuning_options: TuningOptionsOperations operations + :vartype tuning_options: azure.mgmt.postgresql.operations.TuningOptionsOperations + :ivar capabilities_by_location: CapabilitiesByLocationOperations operations + :vartype capabilities_by_location: + azure.mgmt.postgresql.operations.CapabilitiesByLocationOperations + :ivar name_availability: NameAvailabilityOperations operations + :vartype name_availability: azure.mgmt.postgresql.operations.NameAvailabilityOperations + :ivar private_dns_zone_suffix: PrivateDnsZoneSuffixOperations operations + :vartype private_dns_zone_suffix: + azure.mgmt.postgresql.operations.PrivateDnsZoneSuffixOperations + :ivar quota_usages: QuotaUsagesOperations operations + :vartype quota_usages: azure.mgmt.postgresql.operations.QuotaUsagesOperations + :ivar virtual_network_subnet_usage: VirtualNetworkSubnetUsageOperations operations + :vartype virtual_network_subnet_usage: + azure.mgmt.postgresql.operations.VirtualNetworkSubnetUsageOperations + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. + :type subscription_id: str + :param base_url: Service host. Default value is None. + :type base_url: str + :keyword cloud_setting: The cloud setting for which to get the ARM endpoint. Default value is + None. + :paramtype cloud_setting: ~azure.core.AzureClouds + :keyword api_version: The API version to use for this operation. Default value is + "2026-01-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + """ + + def __init__( + self, + credential: "TokenCredential", + subscription_id: str, + base_url: Optional[str] = None, + *, + cloud_setting: Optional["AzureClouds"] = None, + **kwargs: Any + ) -> None: + _endpoint = "{endpoint}" + _cloud = cloud_setting or settings.current.azure_cloud # type: ignore + _endpoints = get_arm_endpoints(_cloud) + if not base_url: + base_url = _endpoints["resource_manager"] + credential_scopes = kwargs.pop("credential_scopes", _endpoints["credential_scopes"]) + self._config = PostgreSQLManagementClientConfiguration( + credential=credential, + subscription_id=subscription_id, + base_url=cast(str, base_url), + cloud_setting=cloud_setting, + credential_scopes=credential_scopes, + **kwargs + ) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + ARMAutoResourceProviderRegistrationPolicy(), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: ARMPipelineClient = ARMPipelineClient(base_url=cast(str, _endpoint), policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + self.operations = Operations(self._client, self._config, self._serialize, self._deserialize) + self.migrations = MigrationsOperations(self._client, self._config, self._serialize, self._deserialize) + self.servers = ServersOperations(self._client, self._config, self._serialize, self._deserialize) + self.configurations = ConfigurationsOperations(self._client, self._config, self._serialize, self._deserialize) + self.databases = DatabasesOperations(self._client, self._config, self._serialize, self._deserialize) + self.firewall_rules = FirewallRulesOperations(self._client, self._config, self._serialize, self._deserialize) + self.private_endpoint_connections = PrivateEndpointConnectionsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.private_link_resources = PrivateLinkResourcesOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.virtual_endpoints = VirtualEndpointsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.administrators_microsoft_entra = AdministratorsMicrosoftEntraOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.capabilities_by_server = CapabilitiesByServerOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.captured_logs = CapturedLogsOperations(self._client, self._config, self._serialize, self._deserialize) + self.backups_long_term_retention = BackupsLongTermRetentionOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.replicas = ReplicasOperations(self._client, self._config, self._serialize, self._deserialize) + self.advanced_threat_protection_settings = AdvancedThreatProtectionSettingsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.server_threat_protection_settings = ServerThreatProtectionSettingsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.backups_automatic_and_on_demand = BackupsAutomaticAndOnDemandOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.tuning_options = TuningOptionsOperations(self._client, self._config, self._serialize, self._deserialize) + self.capabilities_by_location = CapabilitiesByLocationOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.name_availability = NameAvailabilityOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.private_dns_zone_suffix = PrivateDnsZoneSuffixOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.quota_usages = QuotaUsagesOperations(self._client, self._config, self._serialize, self._deserialize) + self.virtual_network_subnet_usage = VirtualNetworkSubnetUsageOperations( + self._client, self._config, self._serialize, self._deserialize + ) + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> Self: + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_configuration.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_configuration.py new file mode 100644 index 000000000000..4a58fdb8f3e7 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_configuration.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, Optional, TYPE_CHECKING + +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy + +from ._version import VERSION + +if TYPE_CHECKING: + from azure.core import AzureClouds + from azure.core.credentials import TokenCredential + + +class PostgreSQLManagementClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for PostgreSQLManagementClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. + :type subscription_id: str + :param base_url: Service host. Default value is "https://management.azure.com". + :type base_url: str + :param cloud_setting: The cloud setting for which to get the ARM endpoint. Default value is + None. + :type cloud_setting: ~azure.core.AzureClouds + :keyword api_version: The API version to use for this operation. Default value is + "2026-01-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + credential: "TokenCredential", + subscription_id: str, + base_url: str = "https://management.azure.com", + cloud_setting: Optional["AzureClouds"] = None, + **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "2026-01-01-preview") + + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + + self.credential = credential + self.subscription_id = subscription_id + self.base_url = base_url + self.cloud_setting = cloud_setting + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "mgmt-postgresql/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = ARMChallengeAuthenticationPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_patch.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_patch.py new file mode 100644 index 000000000000..87676c65a8f0 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + + +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_utils/__init__.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_utils/__init__.py new file mode 100644 index 000000000000..8026245c2abc --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_utils/__init__.py @@ -0,0 +1,6 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_utils/model_base.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_utils/model_base.py new file mode 100644 index 000000000000..d962e0e8fceb --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_utils/model_base.py @@ -0,0 +1,1327 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access, broad-except + +import copy +import calendar +import decimal +import functools +import sys +import logging +import base64 +import re +import typing +import enum +import email.utils +from datetime import datetime, date, time, timedelta, timezone +from json import JSONEncoder +import xml.etree.ElementTree as ET +from collections.abc import MutableMapping +from typing_extensions import Self +import isodate +from azure.core.exceptions import DeserializationError +from azure.core import CaseInsensitiveEnumMeta +from azure.core.pipeline import PipelineResponse +from azure.core.serialization import _Null +from azure.core.rest import HttpResponse + +_LOGGER = logging.getLogger(__name__) + +__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] + +TZ_UTC = timezone.utc +_T = typing.TypeVar("_T") +_NONE_TYPE = type(None) + + +def _timedelta_as_isostr(td: timedelta) -> str: + """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' + + Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython + + :param timedelta td: The timedelta to convert + :rtype: str + :return: ISO8601 version of this timedelta + """ + + # Split seconds to larger units + seconds = td.total_seconds() + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + + days, hours, minutes = list(map(int, (days, hours, minutes))) + seconds = round(seconds, 6) + + # Build date + date_str = "" + if days: + date_str = "%sD" % days + + if hours or minutes or seconds: + # Build time + time_str = "T" + + # Hours + bigger_exists = date_str or hours + if bigger_exists: + time_str += "{:02}H".format(hours) + + # Minutes + bigger_exists = bigger_exists or minutes + if bigger_exists: + time_str += "{:02}M".format(minutes) + + # Seconds + try: + if seconds.is_integer(): + seconds_string = "{:02}".format(int(seconds)) + else: + # 9 chars long w/ leading 0, 6 digits after decimal + seconds_string = "%09.6f" % seconds + # Remove trailing zeros + seconds_string = seconds_string.rstrip("0") + except AttributeError: # int.is_integer() raises + seconds_string = "{:02}".format(seconds) + + time_str += "{}S".format(seconds_string) + else: + time_str = "" + + return "P" + date_str + time_str + + +def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: + encoded = base64.b64encode(o).decode() + if format == "base64url": + return encoded.strip("=").replace("+", "-").replace("/", "_") + return encoded + + +def _serialize_datetime(o, format: typing.Optional[str] = None): + if hasattr(o, "year") and hasattr(o, "hour"): + if format == "rfc7231": + return email.utils.format_datetime(o, usegmt=True) + if format == "unix-timestamp": + return int(calendar.timegm(o.utctimetuple())) + + # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) + if not o.tzinfo: + iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() + else: + iso_formatted = o.astimezone(TZ_UTC).isoformat() + # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) + return iso_formatted.replace("+00:00", "Z") + # Next try datetime.date or datetime.time + return o.isoformat() + + +def _is_readonly(p): + try: + return p._visibility == ["read"] + except AttributeError: + return False + + +class SdkJSONEncoder(JSONEncoder): + """A JSON encoder that's capable of serializing datetime objects and bytes.""" + + def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): + super().__init__(*args, **kwargs) + self.exclude_readonly = exclude_readonly + self.format = format + + def default(self, o): # pylint: disable=too-many-return-statements + if _is_model(o): + if self.exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + return {k: v for k, v in o.items() if k not in readonly_props} + return dict(o.items()) + try: + return super(SdkJSONEncoder, self).default(o) + except TypeError: + if isinstance(o, _Null): + return None + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, self.format) + try: + # First try datetime.datetime + return _serialize_datetime(o, self.format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return super(SdkJSONEncoder, self).default(o) + + +_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") +_VALID_RFC7231 = re.compile( + r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" + r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" +) + +_ARRAY_ENCODE_MAPPING = { + "pipeDelimited": "|", + "spaceDelimited": " ", + "commaDelimited": ",", + "newlineDelimited": "\n", +} + + +def _deserialize_array_encoded(delimit: str, attr): + if isinstance(attr, str): + if attr == "": + return [] + return attr.split(delimit) + return attr + + +def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + attr = attr.upper() + match = _VALID_DATE.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + return date_obj # type: ignore[no-any-return] + + +def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize RFC7231 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + match = _VALID_RFC7231.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + return email.utils.parsedate_to_datetime(attr) + + +def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: + """Deserialize unix timestamp into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + return datetime.fromtimestamp(attr, TZ_UTC) + + +def _deserialize_date(attr: typing.Union[str, date]) -> date: + """Deserialize ISO-8601 formatted string into Date object. + :param str attr: response string to be deserialized. + :rtype: date + :returns: The date object from that input + """ + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + if isinstance(attr, date): + return attr + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore + + +def _deserialize_time(attr: typing.Union[str, time]) -> time: + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :returns: The time object from that input + """ + if isinstance(attr, time): + return attr + return isodate.parse_time(attr) # type: ignore[no-any-return] + + +def _deserialize_bytes(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + return bytes(base64.b64decode(attr)) + + +def _deserialize_bytes_base64(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return bytes(base64.b64decode(encoded)) + + +def _deserialize_duration(attr): + if isinstance(attr, timedelta): + return attr + return isodate.parse_duration(attr) + + +def _deserialize_decimal(attr): + if isinstance(attr, decimal.Decimal): + return attr + return decimal.Decimal(str(attr)) + + +def _deserialize_int_as_str(attr): + if isinstance(attr, int): + return attr + return int(attr) + + +_DESERIALIZE_MAPPING = { + datetime: _deserialize_datetime, + date: _deserialize_date, + time: _deserialize_time, + bytes: _deserialize_bytes, + bytearray: _deserialize_bytes, + timedelta: _deserialize_duration, + typing.Any: lambda x: x, + decimal.Decimal: _deserialize_decimal, +} + +_DESERIALIZE_MAPPING_WITHFORMAT = { + "rfc3339": _deserialize_datetime, + "rfc7231": _deserialize_datetime_rfc7231, + "unix-timestamp": _deserialize_datetime_unix_timestamp, + "base64": _deserialize_bytes, + "base64url": _deserialize_bytes_base64, +} + + +def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if annotation is int and rf and rf._format == "str": + return _deserialize_int_as_str + if annotation is str and rf and rf._format in _ARRAY_ENCODE_MAPPING: + return functools.partial(_deserialize_array_encoded, _ARRAY_ENCODE_MAPPING[rf._format]) + if rf and rf._format: + return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) + return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore + + +def _get_type_alias_type(module_name: str, alias_name: str): + types = { + k: v + for k, v in sys.modules[module_name].__dict__.items() + if isinstance(v, typing._GenericAlias) # type: ignore + } + if alias_name not in types: + return alias_name + return types[alias_name] + + +def _get_model(module_name: str, model_name: str): + models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} + module_end = module_name.rsplit(".", 1)[0] + models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) + if isinstance(model_name, str): + model_name = model_name.split(".")[-1] + if model_name not in models: + return model_name + return models[model_name] + + +_UNSET = object() + + +class _MyMutableMapping(MutableMapping[str, typing.Any]): + def __init__(self, data: dict[str, typing.Any]) -> None: + self._data = data + + def __contains__(self, key: typing.Any) -> bool: + return key in self._data + + def __getitem__(self, key: str) -> typing.Any: + # If this key has been deserialized (for mutable types), we need to handle serialization + if hasattr(self, "_attr_to_rest_field"): + cache_attr = f"_deserialized_{key}" + if hasattr(self, cache_attr): + rf = _get_rest_field(getattr(self, "_attr_to_rest_field"), key) + if rf: + value = self._data.get(key) + if isinstance(value, (dict, list, set)): + # For mutable types, serialize and return + # But also update _data with serialized form and clear flag + # so mutations via this returned value affect _data + serialized = _serialize(value, rf._format) + # If serialized form is same type (no transformation needed), + # return _data directly so mutations work + if isinstance(serialized, type(value)) and serialized == value: + return self._data.get(key) + # Otherwise return serialized copy and clear flag + try: + object.__delattr__(self, cache_attr) + except AttributeError: + pass + # Store serialized form back + self._data[key] = serialized + return serialized + return self._data.__getitem__(key) + + def __setitem__(self, key: str, value: typing.Any) -> None: + # Clear any cached deserialized value when setting through dictionary access + cache_attr = f"_deserialized_{key}" + try: + object.__delattr__(self, cache_attr) + except AttributeError: + pass + self._data.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + self._data.__delitem__(key) + + def __iter__(self) -> typing.Iterator[typing.Any]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __ne__(self, other: typing.Any) -> bool: + return not self.__eq__(other) + + def keys(self) -> typing.KeysView[str]: + """ + :returns: a set-like object providing a view on D's keys + :rtype: ~typing.KeysView + """ + return self._data.keys() + + def values(self) -> typing.ValuesView[typing.Any]: + """ + :returns: an object providing a view on D's values + :rtype: ~typing.ValuesView + """ + return self._data.values() + + def items(self) -> typing.ItemsView[str, typing.Any]: + """ + :returns: set-like object providing a view on D's items + :rtype: ~typing.ItemsView + """ + return self._data.items() + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + """ + Get the value for key if key is in the dictionary, else default. + :param str key: The key to look up. + :param any default: The value to return if key is not in the dictionary. Defaults to None + :returns: D[k] if k in D, else d. + :rtype: any + """ + try: + return self[key] + except KeyError: + return default + + @typing.overload + def pop(self, key: str) -> typing.Any: ... # pylint: disable=arguments-differ + + @typing.overload + def pop(self, key: str, default: _T) -> _T: ... # pylint: disable=signature-differs + + @typing.overload + def pop(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs + + def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Removes specified key and return the corresponding value. + :param str key: The key to pop. + :param any default: The value to return if key is not in the dictionary + :returns: The value corresponding to the key. + :rtype: any + :raises KeyError: If key is not found and default is not given. + """ + if default is _UNSET: + return self._data.pop(key) + return self._data.pop(key, default) + + def popitem(self) -> tuple[str, typing.Any]: + """ + Removes and returns some (key, value) pair + :returns: The (key, value) pair. + :rtype: tuple + :raises KeyError: if D is empty. + """ + return self._data.popitem() + + def clear(self) -> None: + """ + Remove all items from D. + """ + self._data.clear() + + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: # pylint: disable=arguments-differ + """ + Updates D from mapping/iterable E and F. + :param any args: Either a mapping object or an iterable of key-value pairs. + """ + self._data.update(*args, **kwargs) + + @typing.overload + def setdefault(self, key: str, default: None = None) -> None: ... + + @typing.overload + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs + + def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Same as calling D.get(k, d), and setting D[k]=d if k not found + :param str key: The key to look up. + :param any default: The value to set if key is not in the dictionary + :returns: D[k] if k in D, else d. + :rtype: any + """ + if default is _UNSET: + return self._data.setdefault(key) + return self._data.setdefault(key, default) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_model = self.__class__(other) + except Exception: + return False + return self._data == other_model._data + + def __repr__(self) -> str: + return str(self._data) + + +def _is_model(obj: typing.Any) -> bool: + return getattr(obj, "_is_model", False) + + +def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements + if isinstance(o, list): + if format in _ARRAY_ENCODE_MAPPING and all(isinstance(x, str) for x in o): + return _ARRAY_ENCODE_MAPPING[format].join(o) + return [_serialize(x, format) for x in o] + if isinstance(o, dict): + return {k: _serialize(v, format) for k, v in o.items()} + if isinstance(o, set): + return {_serialize(x, format) for x in o} + if isinstance(o, tuple): + return tuple(_serialize(x, format) for x in o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, format) + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, enum.Enum): + return o.value + if isinstance(o, int): + if format == "str": + return str(o) + return o + try: + # First try datetime.datetime + return _serialize_datetime(o, format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return o + + +def _get_rest_field(attr_to_rest_field: dict[str, "_RestField"], rest_name: str) -> typing.Optional["_RestField"]: + try: + return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) + except StopIteration: + return None + + +def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + if isinstance(value, ET.Element): + value = _deserialize(rf._type, value) + return _serialize(value, rf._format) + + +class Model(_MyMutableMapping): + _is_model = True + # label whether current class's _attr_to_rest_field has been calculated + # could not see _attr_to_rest_field directly because subclass inherits it from parent class + _calculated: set[str] = set() + + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: + class_name = self.__class__.__name__ + if len(args) > 1: + raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") + dict_to_pass = { + rest_field._rest_name: rest_field._default + for rest_field in self._attr_to_rest_field.values() + if rest_field._default is not _UNSET + } + if args: # pylint: disable=too-many-nested-blocks + if isinstance(args[0], ET.Element): + existed_attr_keys = [] + model_meta = getattr(self, "_xml", {}) + + for rf in self._attr_to_rest_field.values(): + prop_meta = getattr(rf, "_xml", {}) + xml_name = prop_meta.get("name", rf._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + # attribute + if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name)) + continue + + # unwrapped element is array + if prop_meta.get("unwrapped", False): + # unwrapped array could either use prop items meta/prop meta + if prop_meta.get("itemsName"): + xml_name = prop_meta.get("itemsName") + xml_ns = prop_meta.get("itemNs") + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + items = args[0].findall(xml_name) # pyright: ignore + if len(items) > 0: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + continue + + # text element is primitive type + if prop_meta.get("text", False): + if args[0].text is not None: + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text) + continue + + # wrapped element could be normal property or array, it should only have one element + item = args[0].find(xml_name) + if item is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, item) + + # rest thing is additional properties + for e in args[0]: + if e.tag not in existed_attr_keys: + dict_to_pass[e.tag] = _convert_element(e) + else: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) + else: + non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] + if non_attr_kwargs: + # actual type errors only throw the first wrong keyword arg they see, so following that. + raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") + dict_to_pass.update( + { + self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) + for k, v in kwargs.items() + if v is not None + } + ) + super().__init__(dict_to_pass) + + def copy(self) -> "Model": + return Model(self.__dict__) + + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: + if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: + # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', + # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' + mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order + attr_to_rest_field: dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") + for k, v in mro_class.__annotations__.items() + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") + + return super().__new__(cls) + + def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: + for base in cls.__bases__: + if hasattr(base, "__mapping__"): + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore + + @classmethod + def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: + for v in cls.__dict__.values(): + if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators: + return v + return None + + @classmethod + def _deserialize(cls, data, exist_discriminators): + if not hasattr(cls, "__mapping__"): + return cls(data) + discriminator = cls._get_discriminator(exist_discriminators) + if discriminator is None: + return cls(data) + exist_discriminators.append(discriminator._rest_name) + if isinstance(data, ET.Element): + model_meta = getattr(cls, "_xml", {}) + prop_meta = getattr(discriminator, "_xml", {}) + xml_name = prop_meta.get("name", discriminator._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + if data.get(xml_name) is not None: + discriminator_value = data.get(xml_name) + else: + discriminator_value = data.find(xml_name).text # pyright: ignore + else: + discriminator_value = data.get(discriminator._rest_name) + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore # pylint: disable=no-member + return mapped_cls._deserialize(data, exist_discriminators) + + def as_dict(self, *, exclude_readonly: bool = False) -> dict[str, typing.Any]: + """Return a dict that can be turned into json using json.dump. + + :keyword bool exclude_readonly: Whether to remove the readonly properties. + :returns: A dict JSON compatible object + :rtype: dict + """ + + result = {} + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] + for k, v in self.items(): + if exclude_readonly and k in readonly_props: # pyright: ignore + continue + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) + return result + + @staticmethod + def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: + if v is None or isinstance(v, _Null): + return None + if isinstance(v, (list, tuple, set)): + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) + if isinstance(v, dict): + return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} + return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v + + +def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + +def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + +def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + +def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj: dict[typing.Any, typing.Any], +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = {child.tag: child for child in obj} + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + +def _deserialize_multiple_sequence( + entry_deserializers: list[typing.Optional[typing.Callable]], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) + + +def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = list(obj) + try: + if ( + isinstance(obj, str) + and isinstance(deserializer, functools.partial) + and isinstance(deserializer.args[0], functools.partial) + and deserializer.args[0].func == _deserialize_array_encoded # pylint: disable=comparison-with-callable + ): + # encoded string may be deserialized to sequence + return deserializer(obj) + except: # pylint: disable=bare-except + pass + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + +def _sorted_annotations(types: list[typing.Any]) -> list[typing.Any]: + return sorted( + types, + key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), + ) + + +def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-statements, too-many-branches + annotation: typing.Any, + module: typing.Optional[str], + rf: typing.Optional["_RestField"] = None, +) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + if not annotation: + return None + + # is it a type alias? + if isinstance(annotation, str): + if module is not None: + annotation = _get_type_alias_type(module, annotation) + + # is it a forward ref / in quotes? + if isinstance(annotation, (str, typing.ForwardRef)): + try: + model_name = annotation.__forward_arg__ # type: ignore + except AttributeError: + model_name = annotation + if module is not None: + annotation = _get_model(module, model_name) # type: ignore + + try: + if module and _is_model(annotation): + if rf: + rf._is_model = True + + return functools.partial(_deserialize_model, annotation) # pyright: ignore + except Exception: + pass + + # is it a literal? + try: + if annotation.__origin__ is typing.Literal: # pyright: ignore + return None + except AttributeError: + pass + + # is it optional? + try: + if any(a is _NONE_TYPE for a in annotation.__args__): # pyright: ignore + if len(annotation.__args__) <= 2: # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a is not _NONE_TYPE), module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + # the type is Optional[Union[...]], we need to remove the None type from the Union + annotation_copy = copy.copy(annotation) + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a is not _NONE_TYPE] # pyright: ignore + return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) + except AttributeError: + pass + + # is it union? + if getattr(annotation, "__origin__", None) is typing.Union: + # initial ordering is we make `string` the last deserialization option, because it is often them most generic + deserializers = [ + _get_deserialize_callable_from_annotation(arg, module, rf) + for arg in _sorted_annotations(annotation.__args__) # pyright: ignore + ] + + return functools.partial(_deserialize_with_union, deserializers) + + try: + annotation_name = ( + annotation.__name__ if hasattr(annotation, "__name__") else annotation._name # pyright: ignore + ) + if annotation_name.lower() == "dict": + value_deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[1], module, rf # pyright: ignore + ) + + return functools.partial( + _deserialize_dict, + value_deserializer, + module, + ) + except (AttributeError, IndexError): + pass + try: + annotation_name = ( + annotation.__name__ if hasattr(annotation, "__name__") else annotation._name # pyright: ignore + ) + if annotation_name.lower() in ["list", "set", "tuple", "sequence"]: + if len(annotation.__args__) > 1: # pyright: ignore + entry_deserializers = [ + _get_deserialize_callable_from_annotation(dt, module, rf) + for dt in annotation.__args__ # pyright: ignore + ] + return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) + deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[0], module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_sequence, deserializer, module) + except (TypeError, IndexError, AttributeError, SyntaxError): + pass + + def _deserialize_default( + deserializer, + obj, + ): + if obj is None: + return obj + try: + return _deserialize_with_callable(deserializer, obj) + except Exception: + pass + return obj + + if get_deserializer(annotation, rf): + return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) + + return functools.partial(_deserialize_default, annotation) + + +def _deserialize_with_callable( + deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], + value: typing.Any, +): # pylint: disable=too-many-return-statements + try: + if value is None or isinstance(value, _Null): + return None + if isinstance(value, ET.Element): + if deserializer is str: + return value.text or "" + if deserializer is int: + return int(value.text) if value.text else None + if deserializer is float: + return float(value.text) if value.text else None + if deserializer is bool: + return value.text == "true" if value.text else None + if deserializer is None: + return value + if deserializer in [int, float, bool]: + return deserializer(value) + if isinstance(deserializer, CaseInsensitiveEnumMeta): + try: + return deserializer(value) + except ValueError: + # for unknown value, return raw value + return value + if isinstance(deserializer, type) and issubclass(deserializer, Model): + return deserializer._deserialize(value, []) + return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) + except Exception as e: + raise DeserializationError() from e + + +def _deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + if isinstance(value, PipelineResponse): + value = value.http_response.json() + if rf is None and format: + rf = _RestField(format=format) + if not isinstance(deserializer, functools.partial): + deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) + return _deserialize_with_callable(deserializer, value) + + +def _failsafe_deserialize( + deserializer: typing.Any, + response: HttpResponse, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + try: + return _deserialize(deserializer, response.json(), module, rf, format) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +def _failsafe_deserialize_xml( + deserializer: typing.Any, + response: HttpResponse, +) -> typing.Any: + try: + return _deserialize_xml(deserializer, response.text()) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +class _RestField: + def __init__( + self, + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + is_discriminator: bool = False, + visibility: typing.Optional[list[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[dict[str, typing.Any]] = None, + ): + self._type = type + self._rest_name_input = name + self._module: typing.Optional[str] = None + self._is_discriminator = is_discriminator + self._visibility = visibility + self._is_model = False + self._default = default + self._format = format + self._is_multipart_file_input = is_multipart_file_input + self._xml = xml if xml is not None else {} + + @property + def _class_type(self) -> typing.Any: + result = getattr(self._type, "args", [None])[0] + # type may be wrapped by nested functools.partial so we need to check for that + if isinstance(result, functools.partial): + return getattr(result, "args", [None])[0] + return result + + @property + def _rest_name(self) -> str: + if self._rest_name_input is None: + raise ValueError("Rest name was never set") + return self._rest_name_input + + def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin + # by this point, type and rest_name will have a value bc we default + # them in __new__ of the Model class + # Use _data.get() directly to avoid triggering __getitem__ which clears the cache + item = obj._data.get(self._rest_name) + if item is None: + return item + if self._is_model: + return item + + # For mutable types, we want mutations to directly affect _data + # Check if we've already deserialized this value + cache_attr = f"_deserialized_{self._rest_name}" + if hasattr(obj, cache_attr): + # Return the value from _data directly (it's been deserialized in place) + return obj._data.get(self._rest_name) + + deserialized = _deserialize(self._type, _serialize(item, self._format), rf=self) + + # For mutable types, store the deserialized value back in _data + # so mutations directly affect _data + if isinstance(deserialized, (dict, list, set)): + obj._data[self._rest_name] = deserialized + object.__setattr__(obj, cache_attr, True) # Mark as deserialized + return deserialized + + return deserialized + + def __set__(self, obj: Model, value) -> None: + # Clear the cached deserialized object when setting a new value + cache_attr = f"_deserialized_{self._rest_name}" + if hasattr(obj, cache_attr): + object.__delattr__(obj, cache_attr) + + if value is None: + # we want to wipe out entries if users set attr to None + try: + obj.__delitem__(self._rest_name) + except KeyError: + pass + return + if self._is_model: + if not _is_model(value): + value = _deserialize(self._type, value) + obj.__setitem__(self._rest_name, value) + return + obj.__setitem__(self._rest_name, _serialize(value, self._format)) + + def _get_deserialize_callable_from_annotation( + self, annotation: typing.Any + ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + return _get_deserialize_callable_from_annotation(annotation, self._module, self) + + +def rest_field( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[list[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + xml=xml, + ) + + +def rest_discriminator( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[list[str]] = None, + xml: typing.Optional[dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) + + +def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: + """Serialize a model to XML. + + :param Model model: The model to serialize. + :param bool exclude_readonly: Whether to exclude readonly properties. + :returns: The XML representation of the model. + :rtype: str + """ + return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore + + +def _get_element( + o: typing.Any, + exclude_readonly: bool = False, + parent_meta: typing.Optional[dict[str, typing.Any]] = None, + wrapped_element: typing.Optional[ET.Element] = None, +) -> typing.Union[ET.Element, list[ET.Element]]: + if _is_model(o): + model_meta = getattr(o, "_xml", {}) + + # if prop is a model, then use the prop element directly, else generate a wrapper of model + if wrapped_element is None: + wrapped_element = _create_xml_element( + model_meta.get("name", o.__class__.__name__), + model_meta.get("prefix"), + model_meta.get("ns"), + ) + + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + + for k, v in o.items(): + # do not serialize readonly properties + if exclude_readonly and k in readonly_props: + continue + + prop_rest_field = _get_rest_field(o._attr_to_rest_field, k) + if prop_rest_field: + prop_meta = getattr(prop_rest_field, "_xml").copy() + # use the wire name as xml name if no specific name is set + if prop_meta.get("name") is None: + prop_meta["name"] = k + else: + # additional properties will not have rest field, use the wire name as xml name + prop_meta = {"name": k} + + # if no ns for prop, use model's + if prop_meta.get("ns") is None and model_meta.get("ns"): + prop_meta["ns"] = model_meta.get("ns") + prop_meta["prefix"] = model_meta.get("prefix") + + if prop_meta.get("unwrapped", False): + # unwrapped could only set on array + wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta)) + elif prop_meta.get("text", False): + # text could only set on primitive type + wrapped_element.text = _get_primitive_type_value(v) + elif prop_meta.get("attribute", False): + xml_name = prop_meta.get("name", k) + if prop_meta.get("ns"): + ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore + xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore + # attribute should be primitive type + wrapped_element.set(xml_name, _get_primitive_type_value(v)) + else: + # other wrapped prop element + wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta)) + return wrapped_element + if isinstance(o, list): + return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore + if isinstance(o, dict): + result = [] + for k, v in o.items(): + result.append( + _get_wrapped_element( + v, + exclude_readonly, + { + "name": k, + "ns": parent_meta.get("ns") if parent_meta else None, + "prefix": parent_meta.get("prefix") if parent_meta else None, + }, + ) + ) + return result + + # primitive case need to create element based on parent_meta + if parent_meta: + return _get_wrapped_element( + o, + exclude_readonly, + { + "name": parent_meta.get("itemsName", parent_meta.get("name")), + "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")), + "ns": parent_meta.get("itemsNs", parent_meta.get("ns")), + }, + ) + + raise ValueError("Could not serialize value into xml: " + o) + + +def _get_wrapped_element( + v: typing.Any, + exclude_readonly: bool, + meta: typing.Optional[dict[str, typing.Any]], +) -> ET.Element: + wrapped_element = _create_xml_element( + meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None + ) + if isinstance(v, (dict, list)): + wrapped_element.extend(_get_element(v, exclude_readonly, meta)) + elif _is_model(v): + _get_element(v, exclude_readonly, meta, wrapped_element) + else: + wrapped_element.text = _get_primitive_type_value(v) + return wrapped_element # type: ignore[no-any-return] + + +def _get_primitive_type_value(v) -> str: + if v is True: + return "true" + if v is False: + return "false" + if isinstance(v, _Null): + return "" + return str(v) + + +def _create_xml_element( + tag: typing.Any, prefix: typing.Optional[str] = None, ns: typing.Optional[str] = None +) -> ET.Element: + if prefix and ns: + ET.register_namespace(prefix, ns) + if ns: + return ET.Element("{" + ns + "}" + tag) + return ET.Element(tag) + + +def _deserialize_xml( + deserializer: typing.Any, + value: str, +) -> typing.Any: + element = ET.fromstring(value) # nosec + return _deserialize(deserializer, element) + + +def _convert_element(e: ET.Element): + # dict case + if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: + dict_result: dict[str, typing.Any] = {} + for child in e: + if dict_result.get(child.tag) is not None: + if isinstance(dict_result[child.tag], list): + dict_result[child.tag].append(_convert_element(child)) + else: + dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)] + else: + dict_result[child.tag] = _convert_element(child) + dict_result.update(e.attrib) + return dict_result + # array case + if len(e) > 0: + array_result: list[typing.Any] = [] + for child in e: + array_result.append(_convert_element(child)) + return array_result + # primitive case + return e.text diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_utils/serialization.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_utils/serialization.py new file mode 100644 index 000000000000..81ec1de5922b --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_utils/serialization.py @@ -0,0 +1,2041 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + MutableMapping, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore +from typing_extensions import Self + +from azure.core.exceptions import DeserializationError, SerializationError +from azure.core.serialization import NULL as CoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + :return: The deserialized data. + :rtype: object + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) from err + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError as err: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise DeserializationError("XML is invalid") from err + elif content_type.startswith("text/"): + return data_as_str + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + + :param bytes body_bytes: The body of the response. + :param dict headers: The headers of the response. + :returns: The deserialized data. + :rtype: object + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + +TZ_UTC = datetime.timezone.utc + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Optional[dict[str, Any]] = {} + for k in kwargs: # pylint: disable=consider-using-dict-items + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are equal + :rtype: bool + """ + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are not equal + :rtype: bool + """ + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node. + + :returns: The XML node + :rtype: xml.etree.ElementTree.Element + """ + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to server from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, keep_readonly=keep_readonly, **kwargs + ) + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[[str, dict[str, Any], Any], Any] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs + ) + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: # pylint: disable=broad-exception-caught + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises DeserializationError: if something went wrong + :rtype: Self + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def from_dict( + cls, + data: Any, + key_extractors: Optional[Callable[[str, dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> Self: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param function key_extractors: A key extractor function. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises DeserializationError: if something went wrong + :rtype: Self + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result |= objects[valuetype]._flatten_subtype(key, objects) # pylint: disable=protected-access + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + + :param dict response: The initial data + :param dict objects: The class objects + :returns: The class to be used + :rtype: class + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + :returns: The decoded key + :rtype: str + """ + return key.replace("\\.", ".") + + +class Serializer: # pylint: disable=too-many-public-methods + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: dict[str, type] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals + self, target_obj, data_type=None, **kwargs + ): + """Serialize data into a string according to type. + + :param object target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises SerializationError: if serialization fails. + :returns: The serialized data. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() # pylint: disable=protected-access + try: + attributes = target_obj._attribute_map # pylint: disable=protected-access + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access + attr_name, {} + ).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized |= target_obj.additional_properties + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, + # we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise SerializationError(msg) from err + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises SerializationError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized request body + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access + except DeserializationError as err: + raise SerializationError("Unable to build a model: " + str(err)) from err + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param str name: The name of the URL path parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :returns: The serialized URL path + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + output = output.replace("{", quote("{")).replace("}", quote("}")) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param str name: The name of the query parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, list + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized query parameter + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get("skip_quote", False) + return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param str name: The name of the header. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized header + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :raises AttributeError: if required data is None. + :raises ValueError: if data is None + :raises SerializationError: if serialization fails. + :returns: The serialized data. + :rtype: str, int, float, bool, dict, list + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is CoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + if data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, cast(type, data.__class__)) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise SerializationError(msg.format(data, data_type)) from err + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param obj data: Object to be serialized. + :param str data_type: Type of object in the iterable. + :rtype: str, int, float, bool + :return: serialized object + :raises TypeError: raise if data_type is not one of str, int, float, bool. + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + if data_type == "int": + return int(data) + if data_type == "float": + return float(data) + if data_type == "bool": + return bool(data) + raise TypeError("Unknown basic data type: {}".format(data_type)) + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param str data: Object to be serialized. + :rtype: str + :return: serialized object + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list data: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + Defaults to False. + :rtype: list, str + :return: serialized iterable + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get("do_quote", False): + serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :rtype: dict + :return: serialized dictionary + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + :return: serialized object + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + if obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError as exc: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) from exc + + @staticmethod + def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument + """Serialize bytearray into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument + """Serialize str into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Decimal object to float. + + :param decimal attr: Object to be serialized. + :rtype: float + :return: serialized decimal + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): # pylint: disable=unused-argument + """Serialize long (Py2) or int (Py3). + + :param int attr: Object to be serialized. + :rtype: int/long + :return: serialized long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + :return: serialized date + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + :return: serialized time + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + :return: serialized duration + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises TypeError: if format invalid. + :return: serialized rfc + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError as exc: + raise TypeError("RFC1123 object must be valid Datetime object.") from exc + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises SerializationError: if format invalid. + :return: serialized iso + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise SerializationError(msg) from err + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise TypeError(msg) from err + + @staticmethod + def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises SerializationError: if format invalid + :return: serialied unix + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError as exc: + raise TypeError("Unix time object must be valid Datetime object.") from exc + + +def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(list[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements + attr, attr_desc, data +): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer: + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: dict[str, type] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, str): + return self.deserialize_data(data, response) + if isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None or data is CoreNull: + return data + try: + attributes = response._attribute_map # type: ignore # pylint: disable=protected-access + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise DeserializationError(msg) from err + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :return: The classified target object and its class name. + :rtype: tuple + """ + if target is None: + return None, None + + if isinstance(target, str): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + :return: Deserialized object. + :rtype: object + """ + try: + return self(target_obj, data, content_type=content_type) + except: # pylint: disable=bare-except + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param obj raw_data: Data to be processed. + :param str content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + :rtype: object + :return: Unpacked content. + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param Response response: The response model class. + :param dict attrs: The deserialized response attributes. + :param dict additional_properties: Additional properties to be set. + :rtype: Response + :return: The instantiated response model. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("readonly") + ] + const = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("constant") + ] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties # type: ignore + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) from err + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) from exp + + def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment + "object", + "[]", + r"{}", + ] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise DeserializationError(msg) from err + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :return: Deserialized iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :return: Deserialized dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :return: Deserialized object. + :rtype: dict + :raises TypeError: if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, str): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :return: Deserialized basic type. + :rtype: str, int, float or bool + :raises TypeError: if string format is not valid or data_type is not one of str, int, float, bool. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + if isinstance(attr, str): + if attr.lower() in ["true", "1"]: + return True + if attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + if data_type == "int": + return int(attr) + if data_type == "float": + return float(attr) + raise TypeError("Unknown basic data type: {}".format(data_type)) + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :return: Deserialized string. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :return: Deserialized enum object. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + try: + return list(enum_obj.__members__.values())[data] + except IndexError as exc: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) from exc + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :return: Deserialized bytearray + :rtype: bytearray + :raises TypeError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :return: Deserialized base64 string + :rtype: bytearray + :raises TypeError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :return: Deserialized decimal + :raises DeserializationError: if string format invalid. + :rtype: decimal + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(str(attr)) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise DeserializationError(msg) from err + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :return: Deserialized int + :rtype: long or int + :raises ValueError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :return: Deserialized duration + :rtype: TimeDelta + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise DeserializationError(msg) from err + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :return: Deserialized date + :rtype: Date + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=0, defaultday=0) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :return: Deserialized time + :rtype: datetime.time + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized RFC datetime + :rtype: Datetime + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized ISO datetime + :rtype: Datetime + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :return: Deserialized datetime + :rtype: Datetime + :raises DeserializationError: if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + attr = int(attr) + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise DeserializationError(msg) from err + return date_obj diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_validation.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_validation.py new file mode 100644 index 000000000000..f5af3a4eb8a2 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_validation.py @@ -0,0 +1,66 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import functools + + +def api_version_validation(**kwargs): + params_added_on = kwargs.pop("params_added_on", {}) + method_added_on = kwargs.pop("method_added_on", "") + api_versions_list = kwargs.pop("api_versions_list", []) + + def _index_with_default(value: str, default: int = -1) -> int: + """Get the index of value in lst, or return default if not found. + + :param value: The value to search for in the api_versions_list. + :type value: str + :param default: The default value to return if the value is not found. + :type default: int + :return: The index of the value in the list, or the default value if not found. + :rtype: int + """ + try: + return api_versions_list.index(value) + except ValueError: + return default + + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + # this assumes the client has an _api_version attribute + client = args[0] + client_api_version = client._config.api_version # pylint: disable=protected-access + except AttributeError: + return func(*args, **kwargs) + + if _index_with_default(method_added_on) > _index_with_default(client_api_version): + raise ValueError( + f"'{func.__name__}' is not available in API version " + f"{client_api_version}. Pass service API version {method_added_on} or newer to your client." + ) + + unsupported = { + parameter: api_version + for api_version, parameters in params_added_on.items() + for parameter in parameters + if parameter in kwargs and _index_with_default(api_version) > _index_with_default(client_api_version) + } + if unsupported: + raise ValueError( + "".join( + [ + f"'{param}' is not available in API version {client_api_version}. " + f"Use service API version {version} or newer.\n" + for param, version in unsupported.items() + ] + ) + ) + return func(*args, **kwargs) + + return wrapper + + return decorator diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_version.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_version.py new file mode 100644 index 000000000000..be71c81bd282 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0.0b1" diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/__init__.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/__init__.py new file mode 100644 index 000000000000..56b391f6ff1d --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import PostgreSQLManagementClient # type: ignore + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "PostgreSQLManagementClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/_client.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/_client.py new file mode 100644 index 000000000000..fbc6abcd8261 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/_client.py @@ -0,0 +1,269 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, Optional, TYPE_CHECKING, cast +from typing_extensions import Self + +from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.settings import settings +from azure.mgmt.core import AsyncARMPipelineClient +from azure.mgmt.core.policies import AsyncARMAutoResourceProviderRegistrationPolicy +from azure.mgmt.core.tools import get_arm_endpoints + +from .._utils.serialization import Deserializer, Serializer +from ._configuration import PostgreSQLManagementClientConfiguration +from .operations import ( + AdministratorsMicrosoftEntraOperations, + AdvancedThreatProtectionSettingsOperations, + BackupsAutomaticAndOnDemandOperations, + BackupsLongTermRetentionOperations, + CapabilitiesByLocationOperations, + CapabilitiesByServerOperations, + CapturedLogsOperations, + ConfigurationsOperations, + DatabasesOperations, + FirewallRulesOperations, + MigrationsOperations, + NameAvailabilityOperations, + Operations, + PrivateDnsZoneSuffixOperations, + PrivateEndpointConnectionsOperations, + PrivateLinkResourcesOperations, + QuotaUsagesOperations, + ReplicasOperations, + ServerThreatProtectionSettingsOperations, + ServersOperations, + TuningOptionsOperations, + VirtualEndpointsOperations, + VirtualNetworkSubnetUsageOperations, +) + +if TYPE_CHECKING: + from azure.core import AzureClouds + from azure.core.credentials_async import AsyncTokenCredential + + +class PostgreSQLManagementClient: # pylint: disable=too-many-instance-attributes + """The Azure Database for PostgreSQL management API provides create, read, update, and delete + functionality for Azure PostgreSQL resources including servers, databases, firewall rules, + network configuration, security alert policies, log files and configurations with new business + model. + + :ivar operations: Operations operations + :vartype operations: azure.mgmt.postgresql.aio.operations.Operations + :ivar migrations: MigrationsOperations operations + :vartype migrations: azure.mgmt.postgresql.aio.operations.MigrationsOperations + :ivar servers: ServersOperations operations + :vartype servers: azure.mgmt.postgresql.aio.operations.ServersOperations + :ivar configurations: ConfigurationsOperations operations + :vartype configurations: azure.mgmt.postgresql.aio.operations.ConfigurationsOperations + :ivar databases: DatabasesOperations operations + :vartype databases: azure.mgmt.postgresql.aio.operations.DatabasesOperations + :ivar firewall_rules: FirewallRulesOperations operations + :vartype firewall_rules: azure.mgmt.postgresql.aio.operations.FirewallRulesOperations + :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations + :vartype private_endpoint_connections: + azure.mgmt.postgresql.aio.operations.PrivateEndpointConnectionsOperations + :ivar private_link_resources: PrivateLinkResourcesOperations operations + :vartype private_link_resources: + azure.mgmt.postgresql.aio.operations.PrivateLinkResourcesOperations + :ivar virtual_endpoints: VirtualEndpointsOperations operations + :vartype virtual_endpoints: azure.mgmt.postgresql.aio.operations.VirtualEndpointsOperations + :ivar administrators_microsoft_entra: AdministratorsMicrosoftEntraOperations operations + :vartype administrators_microsoft_entra: + azure.mgmt.postgresql.aio.operations.AdministratorsMicrosoftEntraOperations + :ivar capabilities_by_server: CapabilitiesByServerOperations operations + :vartype capabilities_by_server: + azure.mgmt.postgresql.aio.operations.CapabilitiesByServerOperations + :ivar captured_logs: CapturedLogsOperations operations + :vartype captured_logs: azure.mgmt.postgresql.aio.operations.CapturedLogsOperations + :ivar backups_long_term_retention: BackupsLongTermRetentionOperations operations + :vartype backups_long_term_retention: + azure.mgmt.postgresql.aio.operations.BackupsLongTermRetentionOperations + :ivar replicas: ReplicasOperations operations + :vartype replicas: azure.mgmt.postgresql.aio.operations.ReplicasOperations + :ivar advanced_threat_protection_settings: AdvancedThreatProtectionSettingsOperations + operations + :vartype advanced_threat_protection_settings: + azure.mgmt.postgresql.aio.operations.AdvancedThreatProtectionSettingsOperations + :ivar server_threat_protection_settings: ServerThreatProtectionSettingsOperations operations + :vartype server_threat_protection_settings: + azure.mgmt.postgresql.aio.operations.ServerThreatProtectionSettingsOperations + :ivar backups_automatic_and_on_demand: BackupsAutomaticAndOnDemandOperations operations + :vartype backups_automatic_and_on_demand: + azure.mgmt.postgresql.aio.operations.BackupsAutomaticAndOnDemandOperations + :ivar tuning_options: TuningOptionsOperations operations + :vartype tuning_options: azure.mgmt.postgresql.aio.operations.TuningOptionsOperations + :ivar capabilities_by_location: CapabilitiesByLocationOperations operations + :vartype capabilities_by_location: + azure.mgmt.postgresql.aio.operations.CapabilitiesByLocationOperations + :ivar name_availability: NameAvailabilityOperations operations + :vartype name_availability: azure.mgmt.postgresql.aio.operations.NameAvailabilityOperations + :ivar private_dns_zone_suffix: PrivateDnsZoneSuffixOperations operations + :vartype private_dns_zone_suffix: + azure.mgmt.postgresql.aio.operations.PrivateDnsZoneSuffixOperations + :ivar quota_usages: QuotaUsagesOperations operations + :vartype quota_usages: azure.mgmt.postgresql.aio.operations.QuotaUsagesOperations + :ivar virtual_network_subnet_usage: VirtualNetworkSubnetUsageOperations operations + :vartype virtual_network_subnet_usage: + azure.mgmt.postgresql.aio.operations.VirtualNetworkSubnetUsageOperations + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. + :type subscription_id: str + :param base_url: Service host. Default value is None. + :type base_url: str + :keyword cloud_setting: The cloud setting for which to get the ARM endpoint. Default value is + None. + :paramtype cloud_setting: ~azure.core.AzureClouds + :keyword api_version: The API version to use for this operation. Default value is + "2026-01-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + subscription_id: str, + base_url: Optional[str] = None, + *, + cloud_setting: Optional["AzureClouds"] = None, + **kwargs: Any + ) -> None: + _endpoint = "{endpoint}" + _cloud = cloud_setting or settings.current.azure_cloud # type: ignore + _endpoints = get_arm_endpoints(_cloud) + if not base_url: + base_url = _endpoints["resource_manager"] + credential_scopes = kwargs.pop("credential_scopes", _endpoints["credential_scopes"]) + self._config = PostgreSQLManagementClientConfiguration( + credential=credential, + subscription_id=subscription_id, + base_url=cast(str, base_url), + cloud_setting=cloud_setting, + credential_scopes=credential_scopes, + **kwargs + ) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + AsyncARMAutoResourceProviderRegistrationPolicy(), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncARMPipelineClient = AsyncARMPipelineClient( + base_url=cast(str, _endpoint), policies=_policies, **kwargs + ) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + self.operations = Operations(self._client, self._config, self._serialize, self._deserialize) + self.migrations = MigrationsOperations(self._client, self._config, self._serialize, self._deserialize) + self.servers = ServersOperations(self._client, self._config, self._serialize, self._deserialize) + self.configurations = ConfigurationsOperations(self._client, self._config, self._serialize, self._deserialize) + self.databases = DatabasesOperations(self._client, self._config, self._serialize, self._deserialize) + self.firewall_rules = FirewallRulesOperations(self._client, self._config, self._serialize, self._deserialize) + self.private_endpoint_connections = PrivateEndpointConnectionsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.private_link_resources = PrivateLinkResourcesOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.virtual_endpoints = VirtualEndpointsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.administrators_microsoft_entra = AdministratorsMicrosoftEntraOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.capabilities_by_server = CapabilitiesByServerOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.captured_logs = CapturedLogsOperations(self._client, self._config, self._serialize, self._deserialize) + self.backups_long_term_retention = BackupsLongTermRetentionOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.replicas = ReplicasOperations(self._client, self._config, self._serialize, self._deserialize) + self.advanced_threat_protection_settings = AdvancedThreatProtectionSettingsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.server_threat_protection_settings = ServerThreatProtectionSettingsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.backups_automatic_and_on_demand = BackupsAutomaticAndOnDemandOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.tuning_options = TuningOptionsOperations(self._client, self._config, self._serialize, self._deserialize) + self.capabilities_by_location = CapabilitiesByLocationOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.name_availability = NameAvailabilityOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.private_dns_zone_suffix = PrivateDnsZoneSuffixOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.quota_usages = QuotaUsagesOperations(self._client, self._config, self._serialize, self._deserialize) + self.virtual_network_subnet_usage = VirtualNetworkSubnetUsageOperations( + self._client, self._config, self._serialize, self._deserialize + ) + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/_configuration.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/_configuration.py new file mode 100644 index 000000000000..72696752e6af --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/_configuration.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, Optional, TYPE_CHECKING + +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy + +from .._version import VERSION + +if TYPE_CHECKING: + from azure.core import AzureClouds + from azure.core.credentials_async import AsyncTokenCredential + + +class PostgreSQLManagementClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for PostgreSQLManagementClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: The ID of the target subscription. The value must be an UUID. Required. + :type subscription_id: str + :param base_url: Service host. Default value is "https://management.azure.com". + :type base_url: str + :param cloud_setting: The cloud setting for which to get the ARM endpoint. Default value is + None. + :type cloud_setting: ~azure.core.AzureClouds + :keyword api_version: The API version to use for this operation. Default value is + "2026-01-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + subscription_id: str, + base_url: str = "https://management.azure.com", + cloud_setting: Optional["AzureClouds"] = None, + **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "2026-01-01-preview") + + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + + self.credential = credential + self.subscription_id = subscription_id + self.base_url = base_url + self.cloud_setting = cloud_setting + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "mgmt-postgresql/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = AsyncARMChallengeAuthenticationPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/_patch.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/_patch.py new file mode 100644 index 000000000000..87676c65a8f0 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + + +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/operations/__init__.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/operations/__init__.py new file mode 100644 index 000000000000..ec5ea57d4214 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/operations/__init__.py @@ -0,0 +1,69 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import Operations # type: ignore +from ._operations import MigrationsOperations # type: ignore +from ._operations import ServersOperations # type: ignore +from ._operations import ConfigurationsOperations # type: ignore +from ._operations import DatabasesOperations # type: ignore +from ._operations import FirewallRulesOperations # type: ignore +from ._operations import PrivateEndpointConnectionsOperations # type: ignore +from ._operations import PrivateLinkResourcesOperations # type: ignore +from ._operations import VirtualEndpointsOperations # type: ignore +from ._operations import AdministratorsMicrosoftEntraOperations # type: ignore +from ._operations import CapabilitiesByServerOperations # type: ignore +from ._operations import CapturedLogsOperations # type: ignore +from ._operations import BackupsLongTermRetentionOperations # type: ignore +from ._operations import ReplicasOperations # type: ignore +from ._operations import AdvancedThreatProtectionSettingsOperations # type: ignore +from ._operations import ServerThreatProtectionSettingsOperations # type: ignore +from ._operations import BackupsAutomaticAndOnDemandOperations # type: ignore +from ._operations import TuningOptionsOperations # type: ignore +from ._operations import CapabilitiesByLocationOperations # type: ignore +from ._operations import NameAvailabilityOperations # type: ignore +from ._operations import PrivateDnsZoneSuffixOperations # type: ignore +from ._operations import QuotaUsagesOperations # type: ignore +from ._operations import VirtualNetworkSubnetUsageOperations # type: ignore + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "Operations", + "MigrationsOperations", + "ServersOperations", + "ConfigurationsOperations", + "DatabasesOperations", + "FirewallRulesOperations", + "PrivateEndpointConnectionsOperations", + "PrivateLinkResourcesOperations", + "VirtualEndpointsOperations", + "AdministratorsMicrosoftEntraOperations", + "CapabilitiesByServerOperations", + "CapturedLogsOperations", + "BackupsLongTermRetentionOperations", + "ReplicasOperations", + "AdvancedThreatProtectionSettingsOperations", + "ServerThreatProtectionSettingsOperations", + "BackupsAutomaticAndOnDemandOperations", + "TuningOptionsOperations", + "CapabilitiesByLocationOperations", + "NameAvailabilityOperations", + "PrivateDnsZoneSuffixOperations", + "QuotaUsagesOperations", + "VirtualNetworkSubnetUsageOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/operations/_operations.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/operations/_operations.py new file mode 100644 index 000000000000..08eb38f2ebcf --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/operations/_operations.py @@ -0,0 +1,9588 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from collections.abc import MutableMapping +from io import IOBase +import json +from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core import AsyncPipelineClient +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models as _models +from ..._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from ..._utils.serialization import Deserializer, Serializer +from ..._validation import api_version_validation +from ...operations._operations import ( + build_administrators_microsoft_entra_create_or_update_request, + build_administrators_microsoft_entra_delete_request, + build_administrators_microsoft_entra_get_request, + build_administrators_microsoft_entra_list_by_server_request, + build_advanced_threat_protection_settings_get_request, + build_advanced_threat_protection_settings_list_by_server_request, + build_backups_automatic_and_on_demand_create_request, + build_backups_automatic_and_on_demand_delete_request, + build_backups_automatic_and_on_demand_get_request, + build_backups_automatic_and_on_demand_list_by_server_request, + build_backups_long_term_retention_check_prerequisites_request, + build_backups_long_term_retention_get_request, + build_backups_long_term_retention_list_by_server_request, + build_backups_long_term_retention_start_request, + build_capabilities_by_location_list_request, + build_capabilities_by_server_list_request, + build_captured_logs_list_by_server_request, + build_configurations_get_request, + build_configurations_list_by_server_request, + build_configurations_put_request, + build_configurations_update_request, + build_databases_create_request, + build_databases_delete_request, + build_databases_get_request, + build_databases_list_by_server_request, + build_firewall_rules_create_or_update_request, + build_firewall_rules_delete_request, + build_firewall_rules_get_request, + build_firewall_rules_list_by_server_request, + build_migrations_cancel_request, + build_migrations_check_name_availability_request, + build_migrations_create_request, + build_migrations_get_request, + build_migrations_list_by_target_server_request, + build_migrations_update_request, + build_name_availability_check_globally_request, + build_name_availability_check_with_location_request, + build_operations_list_request, + build_private_dns_zone_suffix_get_request, + build_private_endpoint_connections_delete_request, + build_private_endpoint_connections_get_request, + build_private_endpoint_connections_list_by_server_request, + build_private_endpoint_connections_update_request, + build_private_link_resources_get_request, + build_private_link_resources_list_by_server_request, + build_quota_usages_list_request, + build_replicas_list_by_server_request, + build_server_threat_protection_settings_create_or_update_request, + build_servers_create_or_update_request, + build_servers_delete_request, + build_servers_get_request, + build_servers_list_by_resource_group_request, + build_servers_list_by_subscription_request, + build_servers_migrate_network_mode_request, + build_servers_restart_request, + build_servers_start_request, + build_servers_stop_request, + build_servers_update_request, + build_tuning_options_get_request, + build_tuning_options_list_by_server_request, + build_tuning_options_list_recommendations_request, + build_virtual_endpoints_create_request, + build_virtual_endpoints_delete_request, + build_virtual_endpoints_get_request, + build_virtual_endpoints_list_by_server_request, + build_virtual_endpoints_update_request, + build_virtual_network_subnet_usage_list_request, +) +from .._configuration import PostgreSQLManagementClientConfiguration + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] +JSON = MutableMapping[str, Any] +List = list + + +class Operations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`operations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list(self, **kwargs: Any) -> AsyncItemPaged["_models.Operation"]: + """Lists all available REST API operations. + + :return: An iterator like instance of Operation + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.Operation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Operation]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_operations_list_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Operation], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class MigrationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`migrations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get( + self, resource_group_name: str, server_name: str, migration_name: str, **kwargs: Any + ) -> _models.Migration: + """Gets information about a migration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Migration] = kwargs.pop("cls", None) + + _request = build_migrations_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + migration_name=migration_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Migration, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: _models.Migration, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Migration: + """Creates a new migration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required for creating a migration. Required. + :type parameters: ~azure.mgmt.postgresql.models.Migration + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Migration: + """Creates a new migration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required for creating a migration. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Migration: + """Creates a new migration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required for creating a migration. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: Union[_models.Migration, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.Migration: + """Creates a new migration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required for creating a migration. Is one of the following types: + Migration, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.Migration or JSON or IO[bytes] + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Migration] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_migrations_create_request( + resource_group_name=resource_group_name, + server_name=server_name, + migration_name=migration_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Migration, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: _models.MigrationResourceForPatch, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Migration: + """Updates an existing migration. The request body can contain one to many of the mutable + properties present in the migration definition. Certain property updates initiate migration + state transitions. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required to update an existing migration. Required. + :type parameters: ~azure.mgmt.postgresql.models.MigrationResourceForPatch + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Migration: + """Updates an existing migration. The request body can contain one to many of the mutable + properties present in the migration definition. Certain property updates initiate migration + state transitions. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required to update an existing migration. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Migration: + """Updates an existing migration. The request body can contain one to many of the mutable + properties present in the migration definition. Certain property updates initiate migration + state transitions. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required to update an existing migration. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: Union[_models.MigrationResourceForPatch, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.Migration: + """Updates an existing migration. The request body can contain one to many of the mutable + properties present in the migration definition. Certain property updates initiate migration + state transitions. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required to update an existing migration. Is one of the following + types: MigrationResourceForPatch, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.MigrationResourceForPatch or JSON or IO[bytes] + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Migration] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_migrations_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + migration_name=migration_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Migration, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def cancel( + self, resource_group_name: str, server_name: str, migration_name: str, **kwargs: Any + ) -> Optional[_models.Migration]: + """Cancels an active migration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :return: Migration or None. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration or None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[_models.Migration]] = kwargs.pop("cls", None) + + _request = build_migrations_cancel_request( + resource_group_name=resource_group_name, + server_name=server_name, + migration_name=migration_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Migration, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_by_target_server( + self, + resource_group_name: str, + server_name: str, + *, + migration_list_filter: Optional[Union[str, _models.MigrationListFilter]] = None, + **kwargs: Any + ) -> AsyncItemPaged["_models.Migration"]: + """Lists all migrations of a target flexible server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :keyword migration_list_filter: Migration list filter. Indicates if the request should retrieve + only active migrations or all migrations. Defaults to Active. Known values are: "Active" and + "All". Default value is None. + :paramtype migration_list_filter: str or ~azure.mgmt.postgresql.models.MigrationListFilter + :return: An iterator like instance of Migration + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.Migration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Migration]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_migrations_list_by_target_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + migration_list_filter=migration_list_filter, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Migration], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @overload + async def check_name_availability( + self, + resource_group_name: str, + server_name: str, + parameters: _models.MigrationNameAvailability, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.MigrationNameAvailability: + """Check the validity and availability of the given name, to assign it to a new migration. + + Checks if a proposed migration name is valid and available. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to check if a migration name is valid and available. + Required. + :type parameters: ~azure.mgmt.postgresql.models.MigrationNameAvailability + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: MigrationNameAvailability. The MigrationNameAvailability is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.MigrationNameAvailability + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def check_name_availability( + self, + resource_group_name: str, + server_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.MigrationNameAvailability: + """Check the validity and availability of the given name, to assign it to a new migration. + + Checks if a proposed migration name is valid and available. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to check if a migration name is valid and available. + Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: MigrationNameAvailability. The MigrationNameAvailability is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.MigrationNameAvailability + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def check_name_availability( + self, + resource_group_name: str, + server_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.MigrationNameAvailability: + """Check the validity and availability of the given name, to assign it to a new migration. + + Checks if a proposed migration name is valid and available. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to check if a migration name is valid and available. + Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: MigrationNameAvailability. The MigrationNameAvailability is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.MigrationNameAvailability + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def check_name_availability( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.MigrationNameAvailability, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.MigrationNameAvailability: + """Check the validity and availability of the given name, to assign it to a new migration. + + Checks if a proposed migration name is valid and available. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to check if a migration name is valid and available. Is + one of the following types: MigrationNameAvailability, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.MigrationNameAvailability or JSON or IO[bytes] + :return: MigrationNameAvailability. The MigrationNameAvailability is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.MigrationNameAvailability + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.MigrationNameAvailability] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_migrations_check_name_availability_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.MigrationNameAvailability, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class ServersOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`servers` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get(self, resource_group_name: str, server_name: str, **kwargs: Any) -> _models.Server: + """Gets information about an existing server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: Server. The Server is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Server + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Server] = kwargs.pop("cls", None) + + _request = build_servers_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Server, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _create_or_update_initial( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.Server, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_servers_create_or_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + parameters: _models.Server, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Server]: + """Creates a new server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to create a new server or to update an existing server. + Required. + :type parameters: ~azure.mgmt.postgresql.models.Server + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Server]: + """Creates a new server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to create a new server or to update an existing server. + Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Server]: + """Creates a new server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to create a new server or to update an existing server. + Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.Server, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.Server]: + """Creates a new server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to create a new server or to update an existing server. + Is one of the following types: Server, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.Server or JSON or IO[bytes] + :return: An instance of AsyncLROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Server] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.Server, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.Server].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.Server]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _update_initial( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.ServerForPatch, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_servers_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_update( + self, + resource_group_name: str, + server_name: str, + parameters: _models.ServerForPatch, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Server]: + """Updates an existing server. The request body can contain one or multiple of the properties + present in the normal server definition. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to update a server. Required. + :type parameters: ~azure.mgmt.postgresql.models.ServerForPatch + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + server_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Server]: + """Updates an existing server. The request body can contain one or multiple of the properties + present in the normal server definition. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to update a server. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + server_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Server]: + """Updates an existing server. The request body can contain one or multiple of the properties + present in the normal server definition. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to update a server. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_update( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.ServerForPatch, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.Server]: + """Updates an existing server. The request body can contain one or multiple of the properties + present in the normal server definition. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to update a server. Is one of the following types: + ServerForPatch, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.ServerForPatch or JSON or IO[bytes] + :return: An instance of AsyncLROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Server] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.Server, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.Server].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.Server]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _delete_initial(self, resource_group_name: str, server_name: str, **kwargs: Any) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_servers_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_delete(self, resource_group_name: str, server_name: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Deletes or drops an existing server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncItemPaged["_models.Server"]: + """Lists all servers in a resource group. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :return: An iterator like instance of Server + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Server]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_servers_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Server], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace + def list_by_subscription(self, **kwargs: Any) -> AsyncItemPaged["_models.Server"]: + """Lists all servers in a subscription. + + :return: An iterator like instance of Server + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Server]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_servers_list_by_subscription_request( + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Server], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + async def _restart_initial( + self, + resource_group_name: str, + server_name: str, + parameters: Optional[Union[_models.RestartParameter, JSON, IO[bytes]]] = None, + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + content_type = content_type if parameters else None + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" if parameters else None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + if parameters is not None: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_servers_restart_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_restart( + self, + resource_group_name: str, + server_name: str, + parameters: Optional[_models.RestartParameter] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Restarts PostgreSQL database engine in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters to restart a server. Default value is None. + :type parameters: ~azure.mgmt.postgresql.models.RestartParameter + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_restart( + self, + resource_group_name: str, + server_name: str, + parameters: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Restarts PostgreSQL database engine in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters to restart a server. Default value is None. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_restart( + self, + resource_group_name: str, + server_name: str, + parameters: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Restarts PostgreSQL database engine in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters to restart a server. Default value is None. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_restart( + self, + resource_group_name: str, + server_name: str, + parameters: Optional[Union[_models.RestartParameter, JSON, IO[bytes]]] = None, + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Restarts PostgreSQL database engine in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters to restart a server. Is one of the following types: + RestartParameter, JSON, IO[bytes] Default value is None. + :type parameters: ~azure.mgmt.postgresql.models.RestartParameter or JSON or IO[bytes] + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + content_type = content_type if parameters else None + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._restart_initial( + resource_group_name=resource_group_name, + server_name=server_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + async def _start_initial(self, resource_group_name: str, server_name: str, **kwargs: Any) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_servers_start_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_start(self, resource_group_name: str, server_name: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Starts a stopped server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._start_initial( + resource_group_name=resource_group_name, + server_name=server_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + async def _stop_initial(self, resource_group_name: str, server_name: str, **kwargs: Any) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_servers_stop_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_stop(self, resource_group_name: str, server_name: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Stops a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._stop_initial( + resource_group_name=resource_group_name, + server_name=server_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @api_version_validation( + method_added_on="2026-01-01-preview", + params_added_on={ + "2026-01-01-preview": ["api_version", "subscription_id", "resource_group_name", "server_name", "accept"] + }, + api_versions_list=["2026-01-01-preview"], + ) + async def _migrate_network_mode_initial( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_servers_migrate_network_mode_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 200: + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + @api_version_validation( + method_added_on="2026-01-01-preview", + params_added_on={ + "2026-01-01-preview": ["api_version", "subscription_id", "resource_group_name", "server_name", "accept"] + }, + api_versions_list=["2026-01-01-preview"], + ) + async def begin_migrate_network_mode( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncLROPoller[_models.MigrateNetworkStatus]: + """Migrates the network configuration of a PostgreSQL flexible server from customer owned VNET to + Microsoft owned VNET with support for private endpoints, or from Microsoft owned VNET with no + support for private endpoints to Microsoft owned VNET with support for private endpoints. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An instance of AsyncLROPoller that returns MigrateNetworkStatus. The + MigrateNetworkStatus is compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.MigrateNetworkStatus] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.MigrateNetworkStatus] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._migrate_network_mode_initial( + resource_group_name=resource_group_name, + server_name=server_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + deserialized = _deserialize(_models.MigrateNetworkStatus, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.MigrateNetworkStatus].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.MigrateNetworkStatus]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + +class ConfigurationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`configurations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get( + self, resource_group_name: str, server_name: str, configuration_name: str, **kwargs: Any + ) -> _models.Configuration: + """Gets information about a specific configuration (also known as server parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :return: Configuration. The Configuration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Configuration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Configuration] = kwargs.pop("cls", None) + + _request = build_configurations_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + configuration_name=configuration_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Configuration, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _put_initial( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: Union[_models.ConfigurationForUpdate, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_configurations_put_request( + resource_group_name=resource_group_name, + server_name=server_name, + configuration_name=configuration_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_put( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: _models.ConfigurationForUpdate, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Configuration]: + """Updates, using Put verb, the value assigned to a specific modifiable configuration (also known + as server parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Required. + :type parameters: ~azure.mgmt.postgresql.models.ConfigurationForUpdate + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Configuration. The Configuration is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_put( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Configuration]: + """Updates, using Put verb, the value assigned to a specific modifiable configuration (also known + as server parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Configuration. The Configuration is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_put( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Configuration]: + """Updates, using Put verb, the value assigned to a specific modifiable configuration (also known + as server parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Configuration. The Configuration is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_put( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: Union[_models.ConfigurationForUpdate, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.Configuration]: + """Updates, using Put verb, the value assigned to a specific modifiable configuration (also known + as server parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Is one of the following types: + ConfigurationForUpdate, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.ConfigurationForUpdate or JSON or IO[bytes] + :return: An instance of AsyncLROPoller that returns Configuration. The Configuration is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Configuration] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._put_initial( + resource_group_name=resource_group_name, + server_name=server_name, + configuration_name=configuration_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.Configuration, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.Configuration].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.Configuration]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _update_initial( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: Union[_models.ConfigurationForUpdate, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_configurations_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + configuration_name=configuration_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_update( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: _models.ConfigurationForUpdate, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Configuration]: + """Updates the value assigned to a specific modifiable configuration (also known as server + parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Required. + :type parameters: ~azure.mgmt.postgresql.models.ConfigurationForUpdate + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Configuration. The Configuration is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Configuration]: + """Updates the value assigned to a specific modifiable configuration (also known as server + parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Configuration. The Configuration is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Configuration]: + """Updates the value assigned to a specific modifiable configuration (also known as server + parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Configuration. The Configuration is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_update( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: Union[_models.ConfigurationForUpdate, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.Configuration]: + """Updates the value assigned to a specific modifiable configuration (also known as server + parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Is one of the following types: + ConfigurationForUpdate, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.ConfigurationForUpdate or JSON or IO[bytes] + :return: An instance of AsyncLROPoller that returns Configuration. The Configuration is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Configuration] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + configuration_name=configuration_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.Configuration, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.Configuration].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.Configuration]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.Configuration"]: + """Lists all configurations (also known as server parameters) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of Configuration + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Configuration]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_configurations_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Configuration], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class DatabasesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`databases` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get( + self, resource_group_name: str, server_name: str, database_name: str, **kwargs: Any + ) -> _models.Database: + """Gets information about an existing database. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param database_name: Name of the database (case-sensitive). Exact database names can be + retrieved by getting the list of all existing databases in a server. Required. + :type database_name: str + :return: Database. The Database is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Database + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Database] = kwargs.pop("cls", None) + + _request = build_databases_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + database_name=database_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Database, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _create_initial( + self, + resource_group_name: str, + server_name: str, + database_name: str, + parameters: Union[_models.Database, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_databases_create_request( + resource_group_name=resource_group_name, + server_name=server_name, + database_name=database_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_create( + self, + resource_group_name: str, + server_name: str, + database_name: str, + parameters: _models.Database, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Database]: + """Creates a new database. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param database_name: Name of the database (case-sensitive). Exact database names can be + retrieved by getting the list of all existing databases in a server. Required. + :type database_name: str + :param parameters: Parameters required to create a new database. Required. + :type parameters: ~azure.mgmt.postgresql.models.Database + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Database. The Database is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Database] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create( + self, + resource_group_name: str, + server_name: str, + database_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Database]: + """Creates a new database. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param database_name: Name of the database (case-sensitive). Exact database names can be + retrieved by getting the list of all existing databases in a server. Required. + :type database_name: str + :param parameters: Parameters required to create a new database. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Database. The Database is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Database] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create( + self, + resource_group_name: str, + server_name: str, + database_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Database]: + """Creates a new database. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param database_name: Name of the database (case-sensitive). Exact database names can be + retrieved by getting the list of all existing databases in a server. Required. + :type database_name: str + :param parameters: Parameters required to create a new database. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Database. The Database is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Database] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create( + self, + resource_group_name: str, + server_name: str, + database_name: str, + parameters: Union[_models.Database, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.Database]: + """Creates a new database. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param database_name: Name of the database (case-sensitive). Exact database names can be + retrieved by getting the list of all existing databases in a server. Required. + :type database_name: str + :param parameters: Parameters required to create a new database. Is one of the following types: + Database, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.Database or JSON or IO[bytes] + :return: An instance of AsyncLROPoller that returns Database. The Database is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.Database] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Database] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_initial( + resource_group_name=resource_group_name, + server_name=server_name, + database_name=database_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.Database, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.Database].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.Database]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _delete_initial( + self, resource_group_name: str, server_name: str, database_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_databases_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + database_name=database_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_delete( + self, resource_group_name: str, server_name: str, database_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes an existing database. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param database_name: Name of the database (case-sensitive). Exact database names can be + retrieved by getting the list of all existing databases in a server. Required. + :type database_name: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + database_name=database_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.Database"]: + """Lists all databases in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of Database + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.Database] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Database]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_databases_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Database], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class FirewallRulesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`firewall_rules` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get( + self, resource_group_name: str, server_name: str, firewall_rule_name: str, **kwargs: Any + ) -> _models.FirewallRule: + """Gets information about a firewall rule in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param firewall_rule_name: Name of the firewall rule. Required. + :type firewall_rule_name: str + :return: FirewallRule. The FirewallRule is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.FirewallRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FirewallRule] = kwargs.pop("cls", None) + + _request = build_firewall_rules_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + firewall_rule_name=firewall_rule_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FirewallRule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _create_or_update_initial( + self, + resource_group_name: str, + server_name: str, + firewall_rule_name: str, + parameters: Union[_models.FirewallRule, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_firewall_rules_create_or_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + firewall_rule_name=firewall_rule_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + firewall_rule_name: str, + parameters: _models.FirewallRule, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.FirewallRule]: + """Creates a new firewall rule or updates an existing firewall rule. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param firewall_rule_name: Name of the firewall rule. Required. + :type firewall_rule_name: str + :param parameters: Parameters required for creating or updating a firewall rule. Required. + :type parameters: ~azure.mgmt.postgresql.models.FirewallRule + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns FirewallRule. The FirewallRule is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.FirewallRule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + firewall_rule_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.FirewallRule]: + """Creates a new firewall rule or updates an existing firewall rule. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param firewall_rule_name: Name of the firewall rule. Required. + :type firewall_rule_name: str + :param parameters: Parameters required for creating or updating a firewall rule. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns FirewallRule. The FirewallRule is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.FirewallRule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + firewall_rule_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.FirewallRule]: + """Creates a new firewall rule or updates an existing firewall rule. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param firewall_rule_name: Name of the firewall rule. Required. + :type firewall_rule_name: str + :param parameters: Parameters required for creating or updating a firewall rule. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns FirewallRule. The FirewallRule is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.FirewallRule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + firewall_rule_name: str, + parameters: Union[_models.FirewallRule, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.FirewallRule]: + """Creates a new firewall rule or updates an existing firewall rule. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param firewall_rule_name: Name of the firewall rule. Required. + :type firewall_rule_name: str + :param parameters: Parameters required for creating or updating a firewall rule. Is one of the + following types: FirewallRule, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.FirewallRule or JSON or IO[bytes] + :return: An instance of AsyncLROPoller that returns FirewallRule. The FirewallRule is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.FirewallRule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FirewallRule] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + firewall_rule_name=firewall_rule_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.FirewallRule, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.FirewallRule].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.FirewallRule]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _delete_initial( + self, resource_group_name: str, server_name: str, firewall_rule_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_firewall_rules_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + firewall_rule_name=firewall_rule_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_delete( + self, resource_group_name: str, server_name: str, firewall_rule_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes an existing firewall rule. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param firewall_rule_name: Name of the firewall rule. Required. + :type firewall_rule_name: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + firewall_rule_name=firewall_rule_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.FirewallRule"]: + """Lists information about all firewall rules in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of FirewallRule + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.FirewallRule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.FirewallRule]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_firewall_rules_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.FirewallRule], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class PrivateEndpointConnectionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`private_endpoint_connections` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get( + self, resource_group_name: str, server_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> _models.PrivateEndpointConnection: + """Gets a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection associated + with the Azure resource. Required. + :type private_endpoint_connection_name: str + :return: PrivateEndpointConnection. The PrivateEndpointConnection is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.PrivateEndpointConnection + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + + _request = build_private_endpoint_connections_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PrivateEndpointConnection, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _update_initial( + self, + resource_group_name: str, + server_name: str, + private_endpoint_connection_name: str, + parameters: Union[_models.PrivateEndpointConnection, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_private_endpoint_connections_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_update( + self, + resource_group_name: str, + server_name: str, + private_endpoint_connection_name: str, + parameters: _models.PrivateEndpointConnection, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.PrivateEndpointConnection]: + """Approves or rejects a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection associated + with the Azure resource. Required. + :type private_endpoint_connection_name: str + :param parameters: Parameters required to update a private endpoint connection. Required. + :type parameters: ~azure.mgmt.postgresql.models.PrivateEndpointConnection + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns PrivateEndpointConnection. The + PrivateEndpointConnection is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + server_name: str, + private_endpoint_connection_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.PrivateEndpointConnection]: + """Approves or rejects a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection associated + with the Azure resource. Required. + :type private_endpoint_connection_name: str + :param parameters: Parameters required to update a private endpoint connection. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns PrivateEndpointConnection. The + PrivateEndpointConnection is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + server_name: str, + private_endpoint_connection_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.PrivateEndpointConnection]: + """Approves or rejects a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection associated + with the Azure resource. Required. + :type private_endpoint_connection_name: str + :param parameters: Parameters required to update a private endpoint connection. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns PrivateEndpointConnection. The + PrivateEndpointConnection is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_update( + self, + resource_group_name: str, + server_name: str, + private_endpoint_connection_name: str, + parameters: Union[_models.PrivateEndpointConnection, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.PrivateEndpointConnection]: + """Approves or rejects a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection associated + with the Azure resource. Required. + :type private_endpoint_connection_name: str + :param parameters: Parameters required to update a private endpoint connection. Is one of the + following types: PrivateEndpointConnection, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.PrivateEndpointConnection or JSON or IO[bytes] + :return: An instance of AsyncLROPoller that returns PrivateEndpointConnection. The + PrivateEndpointConnection is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + private_endpoint_connection_name=private_endpoint_connection_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.PrivateEndpointConnection, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.PrivateEndpointConnection].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.PrivateEndpointConnection]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _delete_initial( + self, resource_group_name: str, server_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_private_endpoint_connections_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_delete( + self, resource_group_name: str, server_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection associated + with the Azure resource. Required. + :type private_endpoint_connection_name: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + private_endpoint_connection_name=private_endpoint_connection_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.PrivateEndpointConnection"]: + """Lists all private endpoint connections on a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of PrivateEndpointConnection + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.PrivateEndpointConnection]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_private_endpoint_connections_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.PrivateEndpointConnection], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class PrivateLinkResourcesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`private_link_resources` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get( + self, resource_group_name: str, server_name: str, group_name: str, **kwargs: Any + ) -> _models.PrivateLinkResource: + """Gets a private link resource for PostgreSQL server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param group_name: The name of the private link resource. Required. + :type group_name: str + :return: PrivateLinkResource. The PrivateLinkResource is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.PrivateLinkResource + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PrivateLinkResource] = kwargs.pop("cls", None) + + _request = build_private_link_resources_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + group_name=group_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PrivateLinkResource, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.PrivateLinkResource"]: + """Gets the private link resources for PostgreSQL server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of PrivateLinkResource + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.PrivateLinkResource] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.PrivateLinkResource]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_private_link_resources_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.PrivateLinkResource], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class VirtualEndpointsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`virtual_endpoints` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get( + self, resource_group_name: str, server_name: str, virtual_endpoint_name: str, **kwargs: Any + ) -> _models.VirtualEndpoint: + """Gets information about a pair of virtual endpoints. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :return: VirtualEndpoint. The VirtualEndpoint is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.VirtualEndpoint + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VirtualEndpoint] = kwargs.pop("cls", None) + + _request = build_virtual_endpoints_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VirtualEndpoint, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _create_initial( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: Union[_models.VirtualEndpoint, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_virtual_endpoints_create_request( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_create( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: _models.VirtualEndpoint, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.VirtualEndpoint]: + """Creates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to create or update a pair of virtual endpoints. + Required. + :type parameters: ~azure.mgmt.postgresql.models.VirtualEndpoint + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.VirtualEndpoint]: + """Creates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to create or update a pair of virtual endpoints. + Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.VirtualEndpoint]: + """Creates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to create or update a pair of virtual endpoints. + Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: Union[_models.VirtualEndpoint, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.VirtualEndpoint]: + """Creates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to create or update a pair of virtual endpoints. Is one + of the following types: VirtualEndpoint, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.VirtualEndpoint or JSON or IO[bytes] + :return: An instance of AsyncLROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VirtualEndpoint] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_initial( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.VirtualEndpoint, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.VirtualEndpoint].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.VirtualEndpoint]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _update_initial( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: Union[_models.VirtualEndpointResourceForPatch, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_virtual_endpoints_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_update( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: _models.VirtualEndpointResourceForPatch, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.VirtualEndpoint]: + """Updates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to update a pair of virtual endpoints. Required. + :type parameters: ~azure.mgmt.postgresql.models.VirtualEndpointResourceForPatch + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.VirtualEndpoint]: + """Updates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to update a pair of virtual endpoints. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.VirtualEndpoint]: + """Updates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to update a pair of virtual endpoints. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_update( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: Union[_models.VirtualEndpointResourceForPatch, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.VirtualEndpoint]: + """Updates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to update a pair of virtual endpoints. Is one of the + following types: VirtualEndpointResourceForPatch, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.VirtualEndpointResourceForPatch or JSON or + IO[bytes] + :return: An instance of AsyncLROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VirtualEndpoint] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.VirtualEndpoint, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.VirtualEndpoint].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.VirtualEndpoint]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _delete_initial( + self, resource_group_name: str, server_name: str, virtual_endpoint_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_virtual_endpoints_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_delete( + self, resource_group_name: str, server_name: str, virtual_endpoint_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes a pair of virtual endpoints. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.VirtualEndpoint"]: + """Lists pair of virtual endpoints associated to a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of VirtualEndpoint + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.VirtualEndpoint]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_virtual_endpoints_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.VirtualEndpoint], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class AdministratorsMicrosoftEntraOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`administrators_microsoft_entra` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get( + self, resource_group_name: str, server_name: str, object_id: str, **kwargs: Any + ) -> _models.AdministratorMicrosoftEntra: + """Gets information about a server administrator associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param object_id: Object identifier of the Microsoft Entra principal. Required. + :type object_id: str + :return: AdministratorMicrosoftEntra. The AdministratorMicrosoftEntra is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.AdministratorMicrosoftEntra + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AdministratorMicrosoftEntra] = kwargs.pop("cls", None) + + _request = build_administrators_microsoft_entra_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + object_id=object_id, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AdministratorMicrosoftEntra, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _create_or_update_initial( + self, + resource_group_name: str, + server_name: str, + object_id: str, + parameters: Union[_models.AdministratorMicrosoftEntraAdd, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_administrators_microsoft_entra_create_or_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + object_id=object_id, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + object_id: str, + parameters: _models.AdministratorMicrosoftEntraAdd, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AdministratorMicrosoftEntra]: + """Creates a new server administrator associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param object_id: Object identifier of the Microsoft Entra principal. Required. + :type object_id: str + :param parameters: Required parameters for adding a server administrator associated to a + Microsoft Entra principal. Required. + :type parameters: ~azure.mgmt.postgresql.models.AdministratorMicrosoftEntraAdd + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns AdministratorMicrosoftEntra. The + AdministratorMicrosoftEntra is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.AdministratorMicrosoftEntra] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + object_id: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AdministratorMicrosoftEntra]: + """Creates a new server administrator associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param object_id: Object identifier of the Microsoft Entra principal. Required. + :type object_id: str + :param parameters: Required parameters for adding a server administrator associated to a + Microsoft Entra principal. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns AdministratorMicrosoftEntra. The + AdministratorMicrosoftEntra is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.AdministratorMicrosoftEntra] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + object_id: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AdministratorMicrosoftEntra]: + """Creates a new server administrator associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param object_id: Object identifier of the Microsoft Entra principal. Required. + :type object_id: str + :param parameters: Required parameters for adding a server administrator associated to a + Microsoft Entra principal. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns AdministratorMicrosoftEntra. The + AdministratorMicrosoftEntra is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.AdministratorMicrosoftEntra] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + object_id: str, + parameters: Union[_models.AdministratorMicrosoftEntraAdd, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.AdministratorMicrosoftEntra]: + """Creates a new server administrator associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param object_id: Object identifier of the Microsoft Entra principal. Required. + :type object_id: str + :param parameters: Required parameters for adding a server administrator associated to a + Microsoft Entra principal. Is one of the following types: AdministratorMicrosoftEntraAdd, JSON, + IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.AdministratorMicrosoftEntraAdd or JSON or + IO[bytes] + :return: An instance of AsyncLROPoller that returns AdministratorMicrosoftEntra. The + AdministratorMicrosoftEntra is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.AdministratorMicrosoftEntra] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AdministratorMicrosoftEntra] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + object_id=object_id, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.AdministratorMicrosoftEntra, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.AdministratorMicrosoftEntra].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.AdministratorMicrosoftEntra]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _delete_initial( + self, resource_group_name: str, server_name: str, object_id: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_administrators_microsoft_entra_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + object_id=object_id, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_delete( + self, resource_group_name: str, server_name: str, object_id: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes an existing server administrator associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param object_id: Object identifier of the Microsoft Entra principal. Required. + :type object_id: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + object_id=object_id, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.AdministratorMicrosoftEntra"]: + """List all server administrators associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of AdministratorMicrosoftEntra + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.AdministratorMicrosoftEntra] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.AdministratorMicrosoftEntra]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_administrators_microsoft_entra_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.AdministratorMicrosoftEntra], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class CapabilitiesByServerOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`capabilities_by_server` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list(self, resource_group_name: str, server_name: str, **kwargs: Any) -> AsyncItemPaged["_models.Capability"]: + """Lists the capabilities available for a given server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of Capability + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.Capability] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Capability]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_capabilities_by_server_list_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Capability], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class CapturedLogsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`captured_logs` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.CapturedLog"]: + """Lists all captured logs for download in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of CapturedLog + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.CapturedLog] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.CapturedLog]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_captured_logs_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.CapturedLog], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class BackupsLongTermRetentionOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`backups_long_term_retention` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + async def check_prerequisites( + self, + resource_group_name: str, + server_name: str, + parameters: _models.LtrPreBackupRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.LtrPreBackupResponse: + """Performs all checks required for a long term retention backup operation to succeed. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Required. + :type parameters: ~azure.mgmt.postgresql.models.LtrPreBackupRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: LtrPreBackupResponse. The LtrPreBackupResponse is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.LtrPreBackupResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def check_prerequisites( + self, + resource_group_name: str, + server_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.LtrPreBackupResponse: + """Performs all checks required for a long term retention backup operation to succeed. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: LtrPreBackupResponse. The LtrPreBackupResponse is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.LtrPreBackupResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def check_prerequisites( + self, + resource_group_name: str, + server_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.LtrPreBackupResponse: + """Performs all checks required for a long term retention backup operation to succeed. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: LtrPreBackupResponse. The LtrPreBackupResponse is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.LtrPreBackupResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def check_prerequisites( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.LtrPreBackupRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.LtrPreBackupResponse: + """Performs all checks required for a long term retention backup operation to succeed. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Is one of the following types: + LtrPreBackupRequest, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.LtrPreBackupRequest or JSON or IO[bytes] + :return: LtrPreBackupResponse. The LtrPreBackupResponse is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.LtrPreBackupResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.LtrPreBackupResponse] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_backups_long_term_retention_check_prerequisites_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LtrPreBackupResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + async def _start_initial( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.BackupsLongTermRetentionRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_backups_long_term_retention_start_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 200: + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_start( + self, + resource_group_name: str, + server_name: str, + parameters: _models.BackupsLongTermRetentionRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.BackupsLongTermRetentionResponse]: + """Initiates a long term retention backup. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Required. + :type parameters: ~azure.mgmt.postgresql.models.BackupsLongTermRetentionRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns BackupsLongTermRetentionResponse. The + BackupsLongTermRetentionResponse is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.BackupsLongTermRetentionResponse] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_start( + self, + resource_group_name: str, + server_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.BackupsLongTermRetentionResponse]: + """Initiates a long term retention backup. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns BackupsLongTermRetentionResponse. The + BackupsLongTermRetentionResponse is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.BackupsLongTermRetentionResponse] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_start( + self, + resource_group_name: str, + server_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.BackupsLongTermRetentionResponse]: + """Initiates a long term retention backup. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns BackupsLongTermRetentionResponse. The + BackupsLongTermRetentionResponse is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.BackupsLongTermRetentionResponse] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_start( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.BackupsLongTermRetentionRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.BackupsLongTermRetentionResponse]: + """Initiates a long term retention backup. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Is one of the following types: + BackupsLongTermRetentionRequest, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.BackupsLongTermRetentionRequest or JSON or + IO[bytes] + :return: An instance of AsyncLROPoller that returns BackupsLongTermRetentionResponse. The + BackupsLongTermRetentionResponse is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.BackupsLongTermRetentionResponse] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.BackupsLongTermRetentionResponse] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._start_initial( + resource_group_name=resource_group_name, + server_name=server_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + deserialized = _deserialize(_models.BackupsLongTermRetentionResponse, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.BackupsLongTermRetentionResponse].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.BackupsLongTermRetentionResponse]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + @distributed_trace_async + async def get( + self, resource_group_name: str, server_name: str, backup_name: str, **kwargs: Any + ) -> _models.BackupsLongTermRetentionOperation: + """Gets the results of a long retention backup operation for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param backup_name: The name of the backup. Required. + :type backup_name: str + :return: BackupsLongTermRetentionOperation. The BackupsLongTermRetentionOperation is compatible + with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.BackupsLongTermRetentionOperation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BackupsLongTermRetentionOperation] = kwargs.pop("cls", None) + + _request = build_backups_long_term_retention_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + backup_name=backup_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BackupsLongTermRetentionOperation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.BackupsLongTermRetentionOperation"]: + """Lists the results of the long term retention backup operations for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of BackupsLongTermRetentionOperation + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.BackupsLongTermRetentionOperation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BackupsLongTermRetentionOperation]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_backups_long_term_retention_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BackupsLongTermRetentionOperation], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class ReplicasOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`replicas` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.Server"]: + """Lists all read replicas of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of Server + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Server]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_replicas_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Server], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class AdvancedThreatProtectionSettingsOperations: # pylint: disable=name-too-long + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`advanced_threat_protection_settings` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get( + self, + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + **kwargs: Any + ) -> _models.AdvancedThreatProtectionSettingsModel: + """Gets state of advanced threat protection settings for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param threat_protection_name: Name of the advanced threat protection settings. "Default" + Required. + :type threat_protection_name: str or ~azure.mgmt.postgresql.models.ThreatProtectionName + :return: AdvancedThreatProtectionSettingsModel. The AdvancedThreatProtectionSettingsModel is + compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AdvancedThreatProtectionSettingsModel] = kwargs.pop("cls", None) + + _request = build_advanced_threat_protection_settings_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + threat_protection_name=threat_protection_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AdvancedThreatProtectionSettingsModel, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.AdvancedThreatProtectionSettingsModel"]: + """Lists state of advanced threat protection settings for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of AdvancedThreatProtectionSettingsModel + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.AdvancedThreatProtectionSettingsModel]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_advanced_threat_protection_settings_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.AdvancedThreatProtectionSettingsModel], deserialized.get("value", []) + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class ServerThreatProtectionSettingsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`server_threat_protection_settings` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + async def _create_or_update_initial( + self, + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + parameters: Union[_models.AdvancedThreatProtectionSettingsModel, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_server_threat_protection_settings_create_or_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + threat_protection_name=threat_protection_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + parameters: _models.AdvancedThreatProtectionSettingsModel, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AdvancedThreatProtectionSettingsModel]: + """Creates or updates a server's Advanced Threat Protection settings. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param threat_protection_name: Name of the advanced threat protection settings. "Default" + Required. + :type threat_protection_name: str or ~azure.mgmt.postgresql.models.ThreatProtectionName + :param parameters: The Advanced Threat Protection state for the server. Required. + :type parameters: ~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns AdvancedThreatProtectionSettingsModel. The + AdvancedThreatProtectionSettingsModel is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AdvancedThreatProtectionSettingsModel]: + """Creates or updates a server's Advanced Threat Protection settings. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param threat_protection_name: Name of the advanced threat protection settings. "Default" + Required. + :type threat_protection_name: str or ~azure.mgmt.postgresql.models.ThreatProtectionName + :param parameters: The Advanced Threat Protection state for the server. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns AdvancedThreatProtectionSettingsModel. The + AdvancedThreatProtectionSettingsModel is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AdvancedThreatProtectionSettingsModel]: + """Creates or updates a server's Advanced Threat Protection settings. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param threat_protection_name: Name of the advanced threat protection settings. "Default" + Required. + :type threat_protection_name: str or ~azure.mgmt.postgresql.models.ThreatProtectionName + :param parameters: The Advanced Threat Protection state for the server. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns AdvancedThreatProtectionSettingsModel. The + AdvancedThreatProtectionSettingsModel is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + parameters: Union[_models.AdvancedThreatProtectionSettingsModel, JSON, IO[bytes]], + **kwargs: Any + ) -> AsyncLROPoller[_models.AdvancedThreatProtectionSettingsModel]: + """Creates or updates a server's Advanced Threat Protection settings. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param threat_protection_name: Name of the advanced threat protection settings. "Default" + Required. + :type threat_protection_name: str or ~azure.mgmt.postgresql.models.ThreatProtectionName + :param parameters: The Advanced Threat Protection state for the server. Is one of the following + types: AdvancedThreatProtectionSettingsModel, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel or JSON + or IO[bytes] + :return: An instance of AsyncLROPoller that returns AdvancedThreatProtectionSettingsModel. The + AdvancedThreatProtectionSettingsModel is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AdvancedThreatProtectionSettingsModel] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + threat_protection_name=threat_protection_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.AdvancedThreatProtectionSettingsModel, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.AdvancedThreatProtectionSettingsModel].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.AdvancedThreatProtectionSettingsModel]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + +class BackupsAutomaticAndOnDemandOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`backups_automatic_and_on_demand` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get( + self, resource_group_name: str, server_name: str, backup_name: str, **kwargs: Any + ) -> _models.BackupAutomaticAndOnDemand: + """Gets information of an on demand backup, given its name. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param backup_name: Name of the backup. Required. + :type backup_name: str + :return: BackupAutomaticAndOnDemand. The BackupAutomaticAndOnDemand is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.BackupAutomaticAndOnDemand + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BackupAutomaticAndOnDemand] = kwargs.pop("cls", None) + + _request = build_backups_automatic_and_on_demand_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + backup_name=backup_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BackupAutomaticAndOnDemand, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _create_initial( + self, resource_group_name: str, server_name: str, backup_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_backups_automatic_and_on_demand_create_request( + resource_group_name=resource_group_name, + server_name=server_name, + backup_name=backup_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_create( + self, resource_group_name: str, server_name: str, backup_name: str, **kwargs: Any + ) -> AsyncLROPoller[_models.BackupAutomaticAndOnDemand]: + """Creates an on demand backup of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param backup_name: Name of the backup. Required. + :type backup_name: str + :return: An instance of AsyncLROPoller that returns BackupAutomaticAndOnDemand. The + BackupAutomaticAndOnDemand is compatible with MutableMapping + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.postgresql.models.BackupAutomaticAndOnDemand] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BackupAutomaticAndOnDemand] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_initial( + resource_group_name=resource_group_name, + server_name=server_name, + backup_name=backup_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.BackupAutomaticAndOnDemand, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.BackupAutomaticAndOnDemand].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.BackupAutomaticAndOnDemand]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + async def _delete_initial( + self, resource_group_name: str, server_name: str, backup_name: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_backups_automatic_and_on_demand_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + backup_name=backup_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_delete( + self, resource_group_name: str, server_name: str, backup_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes a specific backup, given its name. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param backup_name: Name of the backup. Required. + :type backup_name: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + backup_name=backup_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.BackupAutomaticAndOnDemand"]: + """Lists all available backups of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of BackupAutomaticAndOnDemand + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.BackupAutomaticAndOnDemand] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BackupAutomaticAndOnDemand]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_backups_automatic_and_on_demand_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BackupAutomaticAndOnDemand], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class TuningOptionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`tuning_options` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get( + self, + resource_group_name: str, + server_name: str, + tuning_option: Union[str, _models.TuningOptionParameterEnum], + **kwargs: Any + ) -> _models.TuningOptions: + """Gets the tuning options of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param tuning_option: The name of the tuning option. Known values are: "index" and "table". + Required. + :type tuning_option: str or ~azure.mgmt.postgresql.models.TuningOptionParameterEnum + :return: TuningOptions. The TuningOptions is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.TuningOptions + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.TuningOptions] = kwargs.pop("cls", None) + + _request = build_tuning_options_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + tuning_option=tuning_option, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TuningOptions, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> AsyncItemPaged["_models.TuningOptions"]: + """Lists the tuning options of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of TuningOptions + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.TuningOptions] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.TuningOptions]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_tuning_options_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.TuningOptions], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace + def list_recommendations( + self, + resource_group_name: str, + server_name: str, + tuning_option: Union[str, _models.TuningOptionParameterEnum], + *, + recommendation_type: Optional[Union[str, _models.RecommendationTypeParameterEnum]] = None, + **kwargs: Any + ) -> AsyncItemPaged["_models.ObjectRecommendation"]: + """Lists available object recommendations. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param tuning_option: The name of the tuning option. Known values are: "index" and "table". + Required. + :type tuning_option: str or ~azure.mgmt.postgresql.models.TuningOptionParameterEnum + :keyword recommendation_type: Recommendations list filter. Retrieves recommendations based on + type. Known values are: "CreateIndex", "DropIndex", "ReIndex", and "AnalyzeTable". Default + value is None. + :paramtype recommendation_type: str or + ~azure.mgmt.postgresql.models.RecommendationTypeParameterEnum + :return: An iterator like instance of ObjectRecommendation + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.ObjectRecommendation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.ObjectRecommendation]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_tuning_options_list_recommendations_request( + resource_group_name=resource_group_name, + server_name=server_name, + tuning_option=tuning_option, + subscription_id=self._config.subscription_id, + recommendation_type=recommendation_type, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.ObjectRecommendation], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class CapabilitiesByLocationOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`capabilities_by_location` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list(self, location_name: str, **kwargs: Any) -> AsyncItemPaged["_models.Capability"]: + """Lists the capabilities available in a given location for a specific subscription. + + :param location_name: The name of the location. Required. + :type location_name: str + :return: An iterator like instance of Capability + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.Capability] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Capability]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_capabilities_by_location_list_request( + location_name=location_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Capability], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class NameAvailabilityOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`name_availability` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + async def check_globally( + self, parameters: _models.CheckNameAvailabilityRequest, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Checks the validity and availability of the given name, to assign it to a new server or to use + it as the base name of a new pair of virtual endpoints. + + :param parameters: The request body. Required. + :type parameters: ~azure.mgmt.postgresql.models.CheckNameAvailabilityRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def check_globally( + self, parameters: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Checks the validity and availability of the given name, to assign it to a new server or to use + it as the base name of a new pair of virtual endpoints. + + :param parameters: The request body. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def check_globally( + self, parameters: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Checks the validity and availability of the given name, to assign it to a new server or to use + it as the base name of a new pair of virtual endpoints. + + :param parameters: The request body. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def check_globally( + self, parameters: Union[_models.CheckNameAvailabilityRequest, JSON, IO[bytes]], **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Checks the validity and availability of the given name, to assign it to a new server or to use + it as the base name of a new pair of virtual endpoints. + + :param parameters: The request body. Is one of the following types: + CheckNameAvailabilityRequest, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.CheckNameAvailabilityRequest or JSON or + IO[bytes] + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.NameAvailabilityModel] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_name_availability_check_globally_request( + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.NameAvailabilityModel, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def check_with_location( + self, + location_name: str, + parameters: _models.CheckNameAvailabilityRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Check the availability of name for resource. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Required. + :type parameters: ~azure.mgmt.postgresql.models.CheckNameAvailabilityRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def check_with_location( + self, location_name: str, parameters: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Check the availability of name for resource. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def check_with_location( + self, location_name: str, parameters: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Check the availability of name for resource. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def check_with_location( + self, + location_name: str, + parameters: Union[_models.CheckNameAvailabilityRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Check the availability of name for resource. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Is one of the following types: + CheckNameAvailabilityRequest, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.CheckNameAvailabilityRequest or JSON or + IO[bytes] + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.NameAvailabilityModel] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_name_availability_check_with_location_request( + location_name=location_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.NameAvailabilityModel, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class PrivateDnsZoneSuffixOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`private_dns_zone_suffix` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get(self, **kwargs: Any) -> str: + """Gets the private DNS zone suffix. + + :return: str + :rtype: str + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[str] = kwargs.pop("cls", None) + + _request = build_private_dns_zone_suffix_get_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(str, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class QuotaUsagesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`quota_usages` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list(self, location_name: str, **kwargs: Any) -> AsyncItemPaged["_models.QuotaUsage"]: + """Get quota usages at specified location in a given subscription. + + :param location_name: The name of the location. Required. + :type location_name: str + :return: An iterator like instance of QuotaUsage + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.postgresql.models.QuotaUsage] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.QuotaUsage]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_quota_usages_list_request( + location_name=location_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.QuotaUsage], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + +class VirtualNetworkSubnetUsageOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.aio.PostgreSQLManagementClient`'s + :attr:`virtual_network_subnet_usage` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + async def list( + self, + location_name: str, + parameters: _models.VirtualNetworkSubnetUsageParameter, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.VirtualNetworkSubnetUsageModel: + """Lists the virtual network subnet usage for a given virtual network. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Required. + :type parameters: ~azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageParameter + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VirtualNetworkSubnetUsageModel. The VirtualNetworkSubnetUsageModel is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def list( + self, location_name: str, parameters: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VirtualNetworkSubnetUsageModel: + """Lists the virtual network subnet usage for a given virtual network. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VirtualNetworkSubnetUsageModel. The VirtualNetworkSubnetUsageModel is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def list( + self, location_name: str, parameters: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VirtualNetworkSubnetUsageModel: + """Lists the virtual network subnet usage for a given virtual network. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VirtualNetworkSubnetUsageModel. The VirtualNetworkSubnetUsageModel is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def list( + self, + location_name: str, + parameters: Union[_models.VirtualNetworkSubnetUsageParameter, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.VirtualNetworkSubnetUsageModel: + """Lists the virtual network subnet usage for a given virtual network. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Is one of the following types: + VirtualNetworkSubnetUsageParameter, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageParameter or JSON or + IO[bytes] + :return: VirtualNetworkSubnetUsageModel. The VirtualNetworkSubnetUsageModel is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VirtualNetworkSubnetUsageModel] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_virtual_network_subnet_usage_list_request( + location_name=location_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VirtualNetworkSubnetUsageModel, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/operations/_patch.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/operations/_patch.py new file mode 100644 index 000000000000..87676c65a8f0 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/aio/operations/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + + +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/__init__.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/__init__.py new file mode 100644 index 000000000000..8176a4b73b06 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/__init__.py @@ -0,0 +1,378 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + + +from ._models import ( # type: ignore + AdminCredentials, + AdminCredentialsForPatch, + AdministratorMicrosoftEntra, + AdministratorMicrosoftEntraAdd, + AdministratorMicrosoftEntraProperties, + AdministratorMicrosoftEntraPropertiesForAdd, + AdvancedThreatProtectionSettingsModel, + AdvancedThreatProtectionSettingsProperties, + AuthConfig, + AuthConfigForPatch, + Backup, + BackupAutomaticAndOnDemand, + BackupAutomaticAndOnDemandProperties, + BackupForPatch, + BackupRequestBase, + BackupSettings, + BackupStoreDetails, + BackupsLongTermRetentionOperation, + BackupsLongTermRetentionRequest, + BackupsLongTermRetentionResponse, + BackupsLongTermRetentionResponseProperties, + Capability, + CapabilityBase, + CapturedLog, + CapturedLogProperties, + CheckNameAvailabilityRequest, + CheckNameAvailabilityResponse, + Cluster, + Configuration, + ConfigurationForUpdate, + ConfigurationProperties, + DataEncryption, + Database, + DatabaseMigrationState, + DatabaseProperties, + DbLevelValidationStatus, + DbServerMetadata, + DelegatedSubnetUsage, + ErrorAdditionalInfo, + ErrorDetail, + ErrorResponse, + FastProvisioningEditionCapability, + FirewallRule, + FirewallRuleProperties, + HighAvailability, + HighAvailabilityForPatch, + ImpactRecord, + LtrBackupOperationResponseProperties, + LtrPreBackupRequest, + LtrPreBackupResponse, + MaintenanceWindow, + MaintenanceWindowForPatch, + MigrateNetworkStatus, + Migration, + MigrationNameAvailability, + MigrationProperties, + MigrationPropertiesForPatch, + MigrationResourceForPatch, + MigrationSecretParameters, + MigrationSecretParametersForPatch, + MigrationStatus, + MigrationSubstateDetails, + NameAvailabilityModel, + NameProperty, + Network, + ObjectRecommendation, + ObjectRecommendationDetails, + ObjectRecommendationProperties, + ObjectRecommendationPropertiesAnalyzedWorkload, + ObjectRecommendationPropertiesImplementationDetails, + Operation, + OperationDisplay, + PrivateEndpoint, + PrivateEndpointConnection, + PrivateEndpointConnectionProperties, + PrivateLinkResource, + PrivateLinkResourceProperties, + PrivateLinkServiceConnectionState, + ProxyResource, + QuotaUsage, + Replica, + Resource, + RestartParameter, + Server, + ServerEditionCapability, + ServerForPatch, + ServerProperties, + ServerPropertiesForPatch, + ServerSku, + ServerSkuCapability, + ServerVersionCapability, + Sku, + SkuForPatch, + Storage, + StorageEditionCapability, + StorageMbCapability, + StorageTierCapability, + SupportedFeature, + SystemData, + TrackedResource, + TuningOptions, + UserAssignedIdentity, + UserIdentity, + ValidationDetails, + ValidationMessage, + ValidationSummaryItem, + VirtualEndpoint, + VirtualEndpointResourceForPatch, + VirtualEndpointResourceProperties, + VirtualNetworkSubnetUsageModel, + VirtualNetworkSubnetUsageParameter, +) + +from ._enums import ( # type: ignore + AzureManagedDiskPerformanceTier, + BackupType, + Cancel, + CapabilityStatus, + CheckNameAvailabilityReason, + ConfigurationDataType, + CreateMode, + CreateModeForPatch, + CreatedByType, + DataEncryptionType, + EncryptionKeyStatus, + ExecutionStatus, + FailoverMode, + FastProvisioningSupport, + FeatureStatus, + GeographicallyRedundantBackup, + GeographicallyRedundantBackupSupport, + HighAvailabilityMode, + HighAvailabilityState, + IdentityType, + LocationRestricted, + LogicalReplicationOnSourceServer, + MicrosoftEntraAuth, + MigrateRolesAndPermissions, + MigrationDatabaseState, + MigrationListFilter, + MigrationMode, + MigrationNameAvailabilityReason, + MigrationOption, + MigrationState, + MigrationSubstate, + NetworkMigrationState, + OnlineStorageResizeSupport, + OperationOrigin, + OverwriteDatabasesOnTargetServer, + PasswordBasedAuth, + PostgreSqlFlexibleServerHighAvailabilityMode, + PostgresMajorVersion, + PrincipalType, + PrivateEndpointConnectionProvisioningState, + PrivateEndpointServiceConnectionStatus, + ReadReplicaPromoteMode, + ReadReplicaPromoteOption, + RecommendationTypeEnum, + RecommendationTypeParameterEnum, + ReplicationRole, + ReplicationState, + ServerPublicNetworkAccessState, + ServerState, + SkuTier, + SourceType, + SslMode, + StartDataMigration, + StorageAutoGrow, + StorageAutoGrowthSupport, + StorageType, + ThreatProtectionName, + ThreatProtectionState, + TriggerCutover, + TuningOptionParameterEnum, + ValidationState, + VirtualEndpointType, + ZoneRedundantHighAvailabilityAndGeographicallyRedundantBackupSupport, + ZoneRedundantHighAvailabilitySupport, +) +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AdminCredentials", + "AdminCredentialsForPatch", + "AdministratorMicrosoftEntra", + "AdministratorMicrosoftEntraAdd", + "AdministratorMicrosoftEntraProperties", + "AdministratorMicrosoftEntraPropertiesForAdd", + "AdvancedThreatProtectionSettingsModel", + "AdvancedThreatProtectionSettingsProperties", + "AuthConfig", + "AuthConfigForPatch", + "Backup", + "BackupAutomaticAndOnDemand", + "BackupAutomaticAndOnDemandProperties", + "BackupForPatch", + "BackupRequestBase", + "BackupSettings", + "BackupStoreDetails", + "BackupsLongTermRetentionOperation", + "BackupsLongTermRetentionRequest", + "BackupsLongTermRetentionResponse", + "BackupsLongTermRetentionResponseProperties", + "Capability", + "CapabilityBase", + "CapturedLog", + "CapturedLogProperties", + "CheckNameAvailabilityRequest", + "CheckNameAvailabilityResponse", + "Cluster", + "Configuration", + "ConfigurationForUpdate", + "ConfigurationProperties", + "DataEncryption", + "Database", + "DatabaseMigrationState", + "DatabaseProperties", + "DbLevelValidationStatus", + "DbServerMetadata", + "DelegatedSubnetUsage", + "ErrorAdditionalInfo", + "ErrorDetail", + "ErrorResponse", + "FastProvisioningEditionCapability", + "FirewallRule", + "FirewallRuleProperties", + "HighAvailability", + "HighAvailabilityForPatch", + "ImpactRecord", + "LtrBackupOperationResponseProperties", + "LtrPreBackupRequest", + "LtrPreBackupResponse", + "MaintenanceWindow", + "MaintenanceWindowForPatch", + "MigrateNetworkStatus", + "Migration", + "MigrationNameAvailability", + "MigrationProperties", + "MigrationPropertiesForPatch", + "MigrationResourceForPatch", + "MigrationSecretParameters", + "MigrationSecretParametersForPatch", + "MigrationStatus", + "MigrationSubstateDetails", + "NameAvailabilityModel", + "NameProperty", + "Network", + "ObjectRecommendation", + "ObjectRecommendationDetails", + "ObjectRecommendationProperties", + "ObjectRecommendationPropertiesAnalyzedWorkload", + "ObjectRecommendationPropertiesImplementationDetails", + "Operation", + "OperationDisplay", + "PrivateEndpoint", + "PrivateEndpointConnection", + "PrivateEndpointConnectionProperties", + "PrivateLinkResource", + "PrivateLinkResourceProperties", + "PrivateLinkServiceConnectionState", + "ProxyResource", + "QuotaUsage", + "Replica", + "Resource", + "RestartParameter", + "Server", + "ServerEditionCapability", + "ServerForPatch", + "ServerProperties", + "ServerPropertiesForPatch", + "ServerSku", + "ServerSkuCapability", + "ServerVersionCapability", + "Sku", + "SkuForPatch", + "Storage", + "StorageEditionCapability", + "StorageMbCapability", + "StorageTierCapability", + "SupportedFeature", + "SystemData", + "TrackedResource", + "TuningOptions", + "UserAssignedIdentity", + "UserIdentity", + "ValidationDetails", + "ValidationMessage", + "ValidationSummaryItem", + "VirtualEndpoint", + "VirtualEndpointResourceForPatch", + "VirtualEndpointResourceProperties", + "VirtualNetworkSubnetUsageModel", + "VirtualNetworkSubnetUsageParameter", + "AzureManagedDiskPerformanceTier", + "BackupType", + "Cancel", + "CapabilityStatus", + "CheckNameAvailabilityReason", + "ConfigurationDataType", + "CreateMode", + "CreateModeForPatch", + "CreatedByType", + "DataEncryptionType", + "EncryptionKeyStatus", + "ExecutionStatus", + "FailoverMode", + "FastProvisioningSupport", + "FeatureStatus", + "GeographicallyRedundantBackup", + "GeographicallyRedundantBackupSupport", + "HighAvailabilityMode", + "HighAvailabilityState", + "IdentityType", + "LocationRestricted", + "LogicalReplicationOnSourceServer", + "MicrosoftEntraAuth", + "MigrateRolesAndPermissions", + "MigrationDatabaseState", + "MigrationListFilter", + "MigrationMode", + "MigrationNameAvailabilityReason", + "MigrationOption", + "MigrationState", + "MigrationSubstate", + "NetworkMigrationState", + "OnlineStorageResizeSupport", + "OperationOrigin", + "OverwriteDatabasesOnTargetServer", + "PasswordBasedAuth", + "PostgreSqlFlexibleServerHighAvailabilityMode", + "PostgresMajorVersion", + "PrincipalType", + "PrivateEndpointConnectionProvisioningState", + "PrivateEndpointServiceConnectionStatus", + "ReadReplicaPromoteMode", + "ReadReplicaPromoteOption", + "RecommendationTypeEnum", + "RecommendationTypeParameterEnum", + "ReplicationRole", + "ReplicationState", + "ServerPublicNetworkAccessState", + "ServerState", + "SkuTier", + "SourceType", + "SslMode", + "StartDataMigration", + "StorageAutoGrow", + "StorageAutoGrowthSupport", + "StorageType", + "ThreatProtectionName", + "ThreatProtectionState", + "TriggerCutover", + "TuningOptionParameterEnum", + "ValidationState", + "VirtualEndpointType", + "ZoneRedundantHighAvailabilityAndGeographicallyRedundantBackupSupport", + "ZoneRedundantHighAvailabilitySupport", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/_enums.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/_enums.py new file mode 100644 index 000000000000..34abbe3fc55c --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/_enums.py @@ -0,0 +1,674 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class AzureManagedDiskPerformanceTier(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Storage tier of a server.""" + + P1 = "P1" + P2 = "P2" + P3 = "P3" + P4 = "P4" + P6 = "P6" + P10 = "P10" + P15 = "P15" + P20 = "P20" + P30 = "P30" + P40 = "P40" + P50 = "P50" + P60 = "P60" + P70 = "P70" + P80 = "P80" + + +class BackupType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of backup.""" + + FULL = "Full" + CUSTOMER_ON_DEMAND = "Customer On-Demand" + + +class Cancel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if cancel must be triggered for the entire migration.""" + + TRUE = "True" + FALSE = "False" + + +class CapabilityStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The status of the capability.""" + + VISIBLE = "Visible" + AVAILABLE = "Available" + DEFAULT = "Default" + DISABLED = "Disabled" + + +class CheckNameAvailabilityReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible reasons for a name not being available.""" + + INVALID = "Invalid" + """Name is invalid.""" + ALREADY_EXISTS = "AlreadyExists" + """Name already exists.""" + + +class ConfigurationDataType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Data type of the configuration (also known as server parameter).""" + + BOOLEAN = "Boolean" + NUMERIC = "Numeric" + INTEGER = "Integer" + ENUMERATION = "Enumeration" + STRING = "String" + SET = "Set" + + +class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The kind of entity that created the resource.""" + + USER = "User" + """The entity was created by a user.""" + APPLICATION = "Application" + """The entity was created by an application.""" + MANAGED_IDENTITY = "ManagedIdentity" + """The entity was created by a managed identity.""" + KEY = "Key" + """The entity was created by a key.""" + + +class CreateMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Creation mode of a new server.""" + + DEFAULT = "Default" + CREATE = "Create" + UPDATE = "Update" + POINT_IN_TIME_RESTORE = "PointInTimeRestore" + GEO_RESTORE = "GeoRestore" + REPLICA = "Replica" + REVIVE_DROPPED = "ReviveDropped" + + +class CreateModeForPatch(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Update mode of an existing server.""" + + DEFAULT = "Default" + UPDATE = "Update" + + +class DataEncryptionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Data encryption type used by a server.""" + + SYSTEM_MANAGED = "SystemManaged" + AZURE_KEY_VAULT = "AzureKeyVault" + + +class EncryptionKeyStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Status of key used by a server configured with data encryption based on customer managed key, + to encrypt the primary storage associated to the server. + """ + + VALID = "Valid" + INVALID = "Invalid" + + +class ExecutionStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Service-set extensible enum indicating the status of operation.""" + + RUNNING = "Running" + CANCELLED = "Cancelled" + FAILED = "Failed" + SUCCEEDED = "Succeeded" + + +class FailoverMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Failover mode.""" + + PLANNED_FAILOVER = "PlannedFailover" + FORCED_FAILOVER = "ForcedFailover" + PLANNED_SWITCHOVER = "PlannedSwitchover" + FORCED_SWITCHOVER = "ForcedSwitchover" + + +class FastProvisioningSupport(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if fast provisioning is supported. 'Enabled' means fast provisioning is supported. + 'Disabled' stands for fast provisioning is not supported. Will be deprecated in the future. + Look to Supported Features for 'FastProvisioning'. + """ + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class FeatureStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Status of the feature. Indicates if the feature is enabled or not.""" + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class GeographicallyRedundantBackup(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if the server is configured to create geographically redundant backups.""" + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class GeographicallyRedundantBackupSupport(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if geographically redundant backups are supported in this location. 'Enabled' means + geographically redundant backups are supported. 'Disabled' stands for geographically redundant + backup is not supported. Will be deprecated in the future. Look to Supported Features for + 'GeoBackup'. + """ + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class HighAvailabilityMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Modes of high availability supported for this compute.""" + + ZONE_REDUNDANT = "ZoneRedundant" + SAME_ZONE = "SameZone" + + +class HighAvailabilityState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible states of the standby server created when high availability is set to SameZone or + ZoneRedundant. + """ + + NOT_ENABLED = "NotEnabled" + CREATING_STANDBY = "CreatingStandby" + REPLICATING_DATA = "ReplicatingData" + FAILING_OVER = "FailingOver" + HEALTHY = "Healthy" + REMOVING_STANDBY = "RemovingStandby" + + +class IdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Types of identities associated with a server.""" + + NONE = "None" + USER_ASSIGNED = "UserAssigned" + SYSTEM_ASSIGNED = "SystemAssigned" + SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned,UserAssigned" + + +class LocationRestricted(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if this location is restricted. 'Enabled' means location is restricted. 'Disabled' + stands for location is not restricted. Will be deprecated in the future. Look to Supported + Features for 'Restricted'. + """ + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class LogicalReplicationOnSourceServer(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates whether to setup logical replication on source server, if needed.""" + + TRUE = "True" + FALSE = "False" + + +class MicrosoftEntraAuth(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if the server supports Microsoft Entra authentication.""" + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class MigrateRolesAndPermissions(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if roles and permissions must be migrated.""" + + TRUE = "True" + FALSE = "False" + + +class MigrationDatabaseState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Migration state of a database.""" + + IN_PROGRESS = "InProgress" + WAITING_FOR_CUTOVER_TRIGGER = "WaitingForCutoverTrigger" + FAILED = "Failed" + CANCELED = "Canceled" + SUCCEEDED = "Succeeded" + CANCELING = "Canceling" + + +class MigrationListFilter(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of MigrationListFilter.""" + + ACTIVE = "Active" + ALL = "All" + + +class MigrationMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Mode used to perform the migration: Online or Offline.""" + + OFFLINE = "Offline" + ONLINE = "Online" + + +class MigrationNameAvailabilityReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Migration name availability reason.""" + + INVALID = "Invalid" + ALREADY_EXISTS = "AlreadyExists" + + +class MigrationOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Supported option for a migration.""" + + VALIDATE = "Validate" + MIGRATE = "Migrate" + VALIDATE_AND_MIGRATE = "ValidateAndMigrate" + + +class MigrationState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """State of migration.""" + + IN_PROGRESS = "InProgress" + WAITING_FOR_USER_ACTION = "WaitingForUserAction" + CANCELED = "Canceled" + FAILED = "Failed" + SUCCEEDED = "Succeeded" + VALIDATION_FAILED = "ValidationFailed" + CLEANING_UP = "CleaningUp" + + +class MigrationSubstate(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Substate of migration.""" + + PERFORMING_PRE_REQUISITE_STEPS = "PerformingPreRequisiteSteps" + WAITING_FOR_LOGICAL_REPLICATION_SETUP_REQUEST_ON_SOURCE_DB = "WaitingForLogicalReplicationSetupRequestOnSourceDB" + WAITING_FOR_DBS_TO_MIGRATE_SPECIFICATION = "WaitingForDBsToMigrateSpecification" + WAITING_FOR_TARGET_DB_OVERWRITE_CONFIRMATION = "WaitingForTargetDBOverwriteConfirmation" + WAITING_FOR_DATA_MIGRATION_SCHEDULING = "WaitingForDataMigrationScheduling" + WAITING_FOR_DATA_MIGRATION_WINDOW = "WaitingForDataMigrationWindow" + MIGRATING_DATA = "MigratingData" + WAITING_FOR_CUTOVER_TRIGGER = "WaitingForCutoverTrigger" + COMPLETING_MIGRATION = "CompletingMigration" + COMPLETED = "Completed" + CANCELING_REQUESTED_DB_MIGRATIONS = "CancelingRequestedDBMigrations" + VALIDATION_IN_PROGRESS = "ValidationInProgress" + + +class NetworkMigrationState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The state of the network migration operation.""" + + PENDING = "Pending" + """The network migration is pending.""" + IN_PROGRESS = "InProgress" + """The network migration is in progress.""" + SUCCEEDED = "Succeeded" + """The network migration succeeded.""" + FAILED = "Failed" + """The network migration failed.""" + CANCEL_IN_PROGRESS = "CancelInProgress" + """The network migration cancellation is in progress.""" + CANCELLED = "Cancelled" + """The network migration was cancelled.""" + + +class OnlineStorageResizeSupport(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if resizing the storage, without interrupting the operation of the database engine, + is supported in this location for the given subscription. 'Enabled' means resizing the storage + without interrupting the operation of the database engine is supported. 'Disabled' means + resizing the storage without interrupting the operation of the database engine is not + supported. Will be deprecated in the future. Look to Supported Features for 'OnlineResize'. + """ + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class OperationOrigin(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Intended executor of the operation.""" + + NOT_SPECIFIED = "NotSpecified" + USER = "user" + SYSTEM = "system" + + +class OverwriteDatabasesOnTargetServer(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if databases on the target server can be overwritten when already present. If set to + 'False', when the migration workflow detects that the database already exists on the target + server, it will wait for a confirmation. + """ + + TRUE = "True" + FALSE = "False" + + +class PasswordBasedAuth(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if the server supports password based authentication.""" + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class PostgresMajorVersion(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Major version of PostgreSQL database engine.""" + + EIGHTEEN = "18" + SEVENTEEN = "17" + SIXTEEN = "16" + FIFTEEN = "15" + FOURTEEN = "14" + THIRTEEN = "13" + TWELVE = "12" + ELEVEN = "11" + + +class PostgreSqlFlexibleServerHighAvailabilityMode( # pylint: disable=name-too-long + str, Enum, metaclass=CaseInsensitiveEnumMeta +): + """Modes of high availability supported for this compute.""" + + DISABLED = "Disabled" + ZONE_REDUNDANT = "ZoneRedundant" + SAME_ZONE = "SameZone" + + +class PrincipalType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of Microsoft Entra principal to which the server administrator is associated.""" + + UNKNOWN = "Unknown" + """The principal type is not known or not specified.""" + USER = "User" + """A Microsoft Entra user.""" + GROUP = "Group" + """A Microsoft Entra group.""" + SERVICE_PRINCIPAL = "ServicePrincipal" + """A Microsoft Entra service principal, typically representing an application or service identity""" + + +class PrivateEndpointConnectionProvisioningState( # pylint: disable=name-too-long + str, Enum, metaclass=CaseInsensitiveEnumMeta +): + """The current provisioning state.""" + + SUCCEEDED = "Succeeded" + """Connection has been provisioned""" + CREATING = "Creating" + """Connection is being created""" + DELETING = "Deleting" + """Connection is being deleted""" + FAILED = "Failed" + """Connection provisioning has failed""" + + +class PrivateEndpointServiceConnectionStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The private endpoint connection status.""" + + PENDING = "Pending" + """Connection waiting for approval or rejection""" + APPROVED = "Approved" + """Connection approved""" + REJECTED = "Rejected" + """Connection Rejected""" + + +class ReadReplicaPromoteMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of operation to apply on the read replica. This property is write only. Standalone means + that the read replica will be promoted to a standalone server, and will become a completely + independent entity from the replication set. Switchover means that the read replica will roles + with the primary server. + """ + + STANDALONE = "Standalone" + """Read replica will become an independent server.""" + SWITCHOVER = "Switchover" + """Read replica will swap roles with primary server.""" + + +class ReadReplicaPromoteOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Data synchronization option to use when processing the operation specified in the promoteMode + property. This property is write only. + """ + + PLANNED = "Planned" + """The operation will wait for data in the read replica to be fully synchronized with its source + server, before it initiates the operation.""" + FORCED = "Forced" + """The operation will not wait for data in the read replica to be synchronized with its source + server, before it initiates the operation.""" + + +class RecommendationTypeEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type for this recommendation.""" + + CREATE_INDEX = "CreateIndex" + DROP_INDEX = "DropIndex" + RE_INDEX = "ReIndex" + ANALYZE_TABLE = "AnalyzeTable" + + +class RecommendationTypeParameterEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Recommendations list filter. Retrieves recommendations based on type.""" + + CREATE_INDEX = "CreateIndex" + DROP_INDEX = "DropIndex" + RE_INDEX = "ReIndex" + ANALYZE_TABLE = "AnalyzeTable" + + +class ReplicationRole(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Role of the server in a replication set.""" + + NONE = "None" + PRIMARY = "Primary" + ASYNC_REPLICA = "AsyncReplica" + GEO_ASYNC_REPLICA = "GeoAsyncReplica" + + +class ReplicationState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates the replication state of a read replica. This property is returned only when the + target server is a read replica. Possible values are Active, Broken, Catchup, Provisioning, + Reconfiguring, and Updating. + """ + + ACTIVE = "Active" + """The read replica server is fully synchronized and actively replicating data from the primary + server.""" + CATCHUP = "Catchup" + """The read replica server is behind the primary server and is currently catching up with pending + changes.""" + PROVISIONING = "Provisioning" + """The read replica server is being created and is in process of getting initialized.""" + UPDATING = "Updating" + """The read replica server is undergoing some changes it can be changing compute size of promoting + it to primary server.""" + BROKEN = "Broken" + """Replication has failed or been interrupted.""" + RECONFIGURING = "Reconfiguring" + """The read replica server is being reconfigured, possibly due to changes in source or settings.""" + + +class ServerPublicNetworkAccessState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if public network access is enabled or not.""" + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class ServerState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible states of a server.""" + + READY = "Ready" + DROPPING = "Dropping" + DISABLED = "Disabled" + STARTING = "Starting" + STOPPING = "Stopping" + STOPPED = "Stopped" + UPDATING = "Updating" + RESTARTING = "Restarting" + INACCESSIBLE = "Inaccessible" + PROVISIONING = "Provisioning" + + +class SkuTier(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Tier of the compute assigned to a server.""" + + BURSTABLE = "Burstable" + GENERAL_PURPOSE = "GeneralPurpose" + MEMORY_OPTIMIZED = "MemoryOptimized" + + +class SourceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Source server type used for the migration: ApsaraDB_RDS, AWS, AWS_AURORA, AWS_EC2, AWS_RDS, + AzureVM, Crunchy_PostgreSQL, Digital_Ocean_Droplets, Digital_Ocean_PostgreSQL, EDB, + EDB_Oracle_Server, EDB_PostgreSQL, GCP, GCP_AlloyDB, GCP_CloudSQL, GCP_Compute, + Heroku_PostgreSQL, Huawei_Compute, Huawei_RDS, OnPremises, PostgreSQLCosmosDB, + PostgreSQLFlexibleServer, PostgreSQLSingleServer, or Supabase_PostgreSQL. + """ + + ON_PREMISES = "OnPremises" + AWS = "AWS" + GCP = "GCP" + AZURE_VM = "AzureVM" + POSTGRE_SQL_SINGLE_SERVER = "PostgreSQLSingleServer" + AWS_RDS = "AWS_RDS" + AWS_AURORA = "AWS_AURORA" + AWS_EC2 = "AWS_EC2" + GCP_CLOUD_SQL = "GCP_CloudSQL" + GCP_ALLOY_DB = "GCP_AlloyDB" + GCP_COMPUTE = "GCP_Compute" + EDB = "EDB" + EDB_ORACLE_SERVER = "EDB_Oracle_Server" + EDB_POSTGRE_SQL = "EDB_PostgreSQL" + POSTGRE_SQL_FLEXIBLE_SERVER = "PostgreSQLFlexibleServer" + POSTGRE_SQL_COSMOS_DB = "PostgreSQLCosmosDB" + HUAWEI_RDS = "Huawei_RDS" + HUAWEI_COMPUTE = "Huawei_Compute" + HEROKU_POSTGRE_SQL = "Heroku_PostgreSQL" + CRUNCHY_POSTGRE_SQL = "Crunchy_PostgreSQL" + APSARA_DB_RDS = "ApsaraDB_RDS" + DIGITAL_OCEAN_DROPLETS = "Digital_Ocean_Droplets" + DIGITAL_OCEAN_POSTGRE_SQL = "Digital_Ocean_PostgreSQL" + SUPABASE_POSTGRE_SQL = "Supabase_PostgreSQL" + + +class SslMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """SSL mode used by a migration. Default SSL mode for 'PostgreSQLSingleServer' is 'VerifyFull'. + Default SSL mode for other source types is 'Prefer'. + """ + + PREFER = "Prefer" + REQUIRE = "Require" + VERIFY_CA = "VerifyCA" + VERIFY_FULL = "VerifyFull" + + +class StartDataMigration(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if data migration must start right away.""" + + TRUE = "True" + FALSE = "False" + + +class StorageAutoGrow(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Flag to enable or disable the automatic growth of storage size of a server when available space + is nearing zero and conditions allow for automatically growing storage size. + """ + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class StorageAutoGrowthSupport(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if storage autogrow is supported in this location. 'Enabled' means storage autogrow + is supported. 'Disabled' stands for storage autogrow is not supported. Will be deprecated in + the future. Look to Supported Features for 'StorageAutoGrowth'. + """ + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class StorageType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of storage assigned to a server. Allowed values are Premium_LRS, PremiumV2_LRS, or + UltraSSD_LRS. If not specified, it defaults to Premium_LRS. + """ + + PREMIUM_LRS = "Premium_LRS" + PREMIUM_V2_LRS = "PremiumV2_LRS" + ULTRA_SSD_LRS = "UltraSSD_LRS" + + +class ThreatProtectionName(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of ThreatProtectionName.""" + + DEFAULT = "Default" + + +class ThreatProtectionState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies the state of the advanced threat protection, whether it is enabled, disabled, or a + state has not been applied yet on the server. + """ + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class TriggerCutover(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if cutover must be triggered for the entire migration.""" + + TRUE = "True" + FALSE = "False" + + +class TuningOptionParameterEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The name of the tuning option.""" + + INDEX = "index" + TABLE = "table" + + +class ValidationState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Validation status for migration.""" + + FAILED = "Failed" + SUCCEEDED = "Succeeded" + WARNING = "Warning" + + +class VirtualEndpointType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of endpoint for the virtual endpoints.""" + + READ_WRITE = "ReadWrite" + + +class ZoneRedundantHighAvailabilityAndGeographicallyRedundantBackupSupport( # pylint: disable=name-too-long + str, Enum, metaclass=CaseInsensitiveEnumMeta +): + """Indicates if high availability with zone redundancy is supported in conjunction with + geographically redundant backups in this location. 'Enabled' means high availability with zone + redundancy is supported in conjunction with geographically redundant backups is supported. + 'Disabled' stands for high availability with zone redundancy is supported in conjunction with + geographically redundant backups is not supported. Will be deprecated in the future. Look to + Supported Features for 'ZoneRedundantHaAndGeoBackup'. + """ + + ENABLED = "Enabled" + DISABLED = "Disabled" + + +class ZoneRedundantHighAvailabilitySupport(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates if high availability with zone redundancy is supported in this location. 'Enabled' + means high availability with zone redundancy is supported. 'Disabled' stands for high + availability with zone redundancy is not supported. Will be deprecated in the future. Look to + Supported Features for 'ZoneRedundantHa'. + """ + + ENABLED = "Enabled" + DISABLED = "Disabled" diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/_models.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/_models.py new file mode 100644 index 000000000000..2ee033f0fc59 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/_models.py @@ -0,0 +1,5941 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=useless-super-delegation + +import datetime +from typing import Any, Mapping, Optional, TYPE_CHECKING, Union, overload + +from .._utils.model_base import Model as _Model, rest_field + +if TYPE_CHECKING: + from .. import models as _models + + +class AdminCredentials(_Model): + """Credentials of administrator users for source and target servers. + + :ivar source_server_password: Password for the user of the source server. Required. + :vartype source_server_password: str + :ivar target_server_password: Password for the user of the target server. Required. + :vartype target_server_password: str + """ + + source_server_password: str = rest_field(name="sourceServerPassword", visibility=["create", "update"]) + """Password for the user of the source server. Required.""" + target_server_password: str = rest_field(name="targetServerPassword", visibility=["create", "update"]) + """Password for the user of the target server. Required.""" + + @overload + def __init__( + self, + *, + source_server_password: str, + target_server_password: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AdminCredentialsForPatch(_Model): + """Credentials of administrator users for source and target servers. + + :ivar source_server_password: Password for the user of the source server. + :vartype source_server_password: str + :ivar target_server_password: Password for the user of the target server. + :vartype target_server_password: str + """ + + source_server_password: Optional[str] = rest_field(name="sourceServerPassword", visibility=["update"]) + """Password for the user of the source server.""" + target_server_password: Optional[str] = rest_field(name="targetServerPassword", visibility=["update"]) + """Password for the user of the target server.""" + + @overload + def __init__( + self, + *, + source_server_password: Optional[str] = None, + target_server_password: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Resource(_Model): + """Resource. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + """ + + id: Optional[str] = rest_field(visibility=["read"]) + """Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.""" + name: Optional[str] = rest_field(visibility=["read"]) + """The name of the resource.""" + type: Optional[str] = rest_field(visibility=["read"]) + """The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or + \"Microsoft.Storage/storageAccounts\".""" + system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) + """Azure Resource Manager metadata containing createdBy and modifiedBy information.""" + + +class ProxyResource(Resource): + """Proxy Resource. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + """ + + +class AdministratorMicrosoftEntra(ProxyResource): + """Server administrator associated to a Microsoft Entra principal. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar properties: Properties of a server administrator associated to a Microsoft Entra + principal. Required. + :vartype properties: ~azure.mgmt.postgresql.models.AdministratorMicrosoftEntraProperties + """ + + properties: "_models.AdministratorMicrosoftEntraProperties" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Properties of a server administrator associated to a Microsoft Entra principal. Required.""" + + __flattened_items = ["principal_type", "principal_name", "object_id", "tenant_id"] + + @overload + def __init__( + self, + *, + properties: "_models.AdministratorMicrosoftEntraProperties", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class AdministratorMicrosoftEntraAdd(_Model): + """Server administrator associated to a Microsoft Entra principal. + + :ivar properties: Properties of the server administrator associated to a Microsoft Entra + principal. + :vartype properties: ~azure.mgmt.postgresql.models.AdministratorMicrosoftEntraPropertiesForAdd + """ + + properties: Optional["_models.AdministratorMicrosoftEntraPropertiesForAdd"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Properties of the server administrator associated to a Microsoft Entra principal.""" + + __flattened_items = ["principal_type", "principal_name", "tenant_id"] + + @overload + def __init__( + self, + *, + properties: Optional["_models.AdministratorMicrosoftEntraPropertiesForAdd"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class AdministratorMicrosoftEntraProperties(_Model): + """Properties of a server administrator associated to a Microsoft Entra principal. + + :ivar principal_type: Type of Microsoft Entra principal to which the server administrator is + associated. Known values are: "Unknown", "User", "Group", and "ServicePrincipal". + :vartype principal_type: str or ~azure.mgmt.postgresql.models.PrincipalType + :ivar principal_name: Name of the Microsoft Entra principal. + :vartype principal_name: str + :ivar object_id: Object identifier of the Microsoft Entra principal. + :vartype object_id: str + :ivar tenant_id: Identifier of the tenant in which the Microsoft Entra principal exists. + :vartype tenant_id: str + """ + + principal_type: Optional[Union[str, "_models.PrincipalType"]] = rest_field( + name="principalType", visibility=["read", "create", "update"] + ) + """Type of Microsoft Entra principal to which the server administrator is associated. Known values + are: \"Unknown\", \"User\", \"Group\", and \"ServicePrincipal\".""" + principal_name: Optional[str] = rest_field(name="principalName", visibility=["read", "create", "update"]) + """Name of the Microsoft Entra principal.""" + object_id: Optional[str] = rest_field(name="objectId", visibility=["read", "create", "update"]) + """Object identifier of the Microsoft Entra principal.""" + tenant_id: Optional[str] = rest_field(name="tenantId", visibility=["read", "create", "update"]) + """Identifier of the tenant in which the Microsoft Entra principal exists.""" + + @overload + def __init__( + self, + *, + principal_type: Optional[Union[str, "_models.PrincipalType"]] = None, + principal_name: Optional[str] = None, + object_id: Optional[str] = None, + tenant_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AdministratorMicrosoftEntraPropertiesForAdd(_Model): # pylint: disable=name-too-long + """Properties of a server administrator associated to a Microsoft Entra principal. + + :ivar principal_type: Type of Microsoft Entra principal to which the server administrator is + associated. Known values are: "Unknown", "User", "Group", and "ServicePrincipal". + :vartype principal_type: str or ~azure.mgmt.postgresql.models.PrincipalType + :ivar principal_name: Name of the Microsoft Entra principal. + :vartype principal_name: str + :ivar tenant_id: Identifier of the tenant in which the Microsoft Entra principal exists. + :vartype tenant_id: str + """ + + principal_type: Optional[Union[str, "_models.PrincipalType"]] = rest_field( + name="principalType", visibility=["read", "create", "update"] + ) + """Type of Microsoft Entra principal to which the server administrator is associated. Known values + are: \"Unknown\", \"User\", \"Group\", and \"ServicePrincipal\".""" + principal_name: Optional[str] = rest_field(name="principalName", visibility=["read", "create", "update"]) + """Name of the Microsoft Entra principal.""" + tenant_id: Optional[str] = rest_field(name="tenantId", visibility=["create", "update"]) + """Identifier of the tenant in which the Microsoft Entra principal exists.""" + + @overload + def __init__( + self, + *, + principal_type: Optional[Union[str, "_models.PrincipalType"]] = None, + principal_name: Optional[str] = None, + tenant_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AdvancedThreatProtectionSettingsModel(ProxyResource): + """Advanced threat protection settings of the server. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar properties: Advanced threat protection properties. + :vartype properties: ~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsProperties + """ + + properties: Optional["_models.AdvancedThreatProtectionSettingsProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Advanced threat protection properties.""" + + __flattened_items = ["state", "creation_time"] + + @overload + def __init__( + self, + *, + properties: Optional["_models.AdvancedThreatProtectionSettingsProperties"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class AdvancedThreatProtectionSettingsProperties(_Model): # pylint: disable=name-too-long + """Properties of advanced threat protection state for a server. + + :ivar state: Specifies the state of the advanced threat protection, whether it is enabled, + disabled, or a state has not been applied yet on the server. Required. Known values are: + "Enabled" and "Disabled". + :vartype state: str or ~azure.mgmt.postgresql.models.ThreatProtectionState + :ivar creation_time: Specifies the creation time (UTC) of the policy. + :vartype creation_time: ~datetime.datetime + """ + + state: Union[str, "_models.ThreatProtectionState"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Specifies the state of the advanced threat protection, whether it is enabled, disabled, or a + state has not been applied yet on the server. Required. Known values are: \"Enabled\" and + \"Disabled\".""" + creation_time: Optional[datetime.datetime] = rest_field(name="creationTime", visibility=["read"], format="rfc3339") + """Specifies the creation time (UTC) of the policy.""" + + @overload + def __init__( + self, + *, + state: Union[str, "_models.ThreatProtectionState"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AuthConfig(_Model): + """Authentication configuration properties of a server. + + :ivar active_directory_auth: Indicates if the server supports Microsoft Entra authentication. + Known values are: "Enabled" and "Disabled". + :vartype active_directory_auth: str or ~azure.mgmt.postgresql.models.MicrosoftEntraAuth + :ivar password_auth: Indicates if the server supports password based authentication. Known + values are: "Enabled" and "Disabled". + :vartype password_auth: str or ~azure.mgmt.postgresql.models.PasswordBasedAuth + :ivar tenant_id: Identifier of the tenant of the delegated resource. + :vartype tenant_id: str + """ + + active_directory_auth: Optional[Union[str, "_models.MicrosoftEntraAuth"]] = rest_field( + name="activeDirectoryAuth", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if the server supports Microsoft Entra authentication. Known values are: \"Enabled\" + and \"Disabled\".""" + password_auth: Optional[Union[str, "_models.PasswordBasedAuth"]] = rest_field( + name="passwordAuth", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if the server supports password based authentication. Known values are: \"Enabled\" + and \"Disabled\".""" + tenant_id: Optional[str] = rest_field(name="tenantId", visibility=["read", "create", "update", "delete", "query"]) + """Identifier of the tenant of the delegated resource.""" + + @overload + def __init__( + self, + *, + active_directory_auth: Optional[Union[str, "_models.MicrosoftEntraAuth"]] = None, + password_auth: Optional[Union[str, "_models.PasswordBasedAuth"]] = None, + tenant_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AuthConfigForPatch(_Model): + """Authentication configuration properties of a server. + + :ivar active_directory_auth: Indicates if the server supports Microsoft Entra authentication. + Known values are: "Enabled" and "Disabled". + :vartype active_directory_auth: str or ~azure.mgmt.postgresql.models.MicrosoftEntraAuth + :ivar password_auth: Indicates if the server supports password based authentication. Known + values are: "Enabled" and "Disabled". + :vartype password_auth: str or ~azure.mgmt.postgresql.models.PasswordBasedAuth + :ivar tenant_id: Identifier of the tenant of the delegated resource. + :vartype tenant_id: str + """ + + active_directory_auth: Optional[Union[str, "_models.MicrosoftEntraAuth"]] = rest_field( + name="activeDirectoryAuth", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if the server supports Microsoft Entra authentication. Known values are: \"Enabled\" + and \"Disabled\".""" + password_auth: Optional[Union[str, "_models.PasswordBasedAuth"]] = rest_field( + name="passwordAuth", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if the server supports password based authentication. Known values are: \"Enabled\" + and \"Disabled\".""" + tenant_id: Optional[str] = rest_field(name="tenantId", visibility=["read", "create", "update", "delete", "query"]) + """Identifier of the tenant of the delegated resource.""" + + @overload + def __init__( + self, + *, + active_directory_auth: Optional[Union[str, "_models.MicrosoftEntraAuth"]] = None, + password_auth: Optional[Union[str, "_models.PasswordBasedAuth"]] = None, + tenant_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Backup(_Model): + """Backup properties of a server. + + :ivar backup_retention_days: Backup retention days for the server. + :vartype backup_retention_days: int + :ivar geo_redundant_backup: Indicates if the server is configured to create geographically + redundant backups. Known values are: "Enabled" and "Disabled". + :vartype geo_redundant_backup: str or + ~azure.mgmt.postgresql.models.GeographicallyRedundantBackup + :ivar earliest_restore_date: Earliest restore point time (ISO8601 format) for a server. + :vartype earliest_restore_date: ~datetime.datetime + """ + + backup_retention_days: Optional[int] = rest_field( + name="backupRetentionDays", visibility=["read", "create", "update", "delete", "query"] + ) + """Backup retention days for the server.""" + geo_redundant_backup: Optional[Union[str, "_models.GeographicallyRedundantBackup"]] = rest_field( + name="geoRedundantBackup", visibility=["read", "create"] + ) + """Indicates if the server is configured to create geographically redundant backups. Known values + are: \"Enabled\" and \"Disabled\".""" + earliest_restore_date: Optional[datetime.datetime] = rest_field( + name="earliestRestoreDate", visibility=["read"], format="rfc3339" + ) + """Earliest restore point time (ISO8601 format) for a server.""" + + @overload + def __init__( + self, + *, + backup_retention_days: Optional[int] = None, + geo_redundant_backup: Optional[Union[str, "_models.GeographicallyRedundantBackup"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BackupAutomaticAndOnDemand(ProxyResource): + """Properties of a backup. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar properties: Properties of a backup. + :vartype properties: ~azure.mgmt.postgresql.models.BackupAutomaticAndOnDemandProperties + """ + + properties: Optional["_models.BackupAutomaticAndOnDemandProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Properties of a backup.""" + + __flattened_items = ["backup_type", "completed_time", "source"] + + @overload + def __init__( + self, + *, + properties: Optional["_models.BackupAutomaticAndOnDemandProperties"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class BackupAutomaticAndOnDemandProperties(_Model): + """Properties of a backup. + + :ivar backup_type: Type of backup. Known values are: "Full" and "Customer On-Demand". + :vartype backup_type: str or ~azure.mgmt.postgresql.models.BackupType + :ivar completed_time: Time(ISO8601 format) at which the backup was completed. + :vartype completed_time: ~datetime.datetime + :ivar source: Source of the backup. + :vartype source: str + """ + + backup_type: Optional[Union[str, "_models.BackupType"]] = rest_field( + name="backupType", visibility=["read", "create", "update", "delete", "query"] + ) + """Type of backup. Known values are: \"Full\" and \"Customer On-Demand\".""" + completed_time: Optional[datetime.datetime] = rest_field( + name="completedTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """Time(ISO8601 format) at which the backup was completed.""" + source: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Source of the backup.""" + + @overload + def __init__( + self, + *, + backup_type: Optional[Union[str, "_models.BackupType"]] = None, + completed_time: Optional[datetime.datetime] = None, + source: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BackupForPatch(_Model): + """Backup properties of a server. + + :ivar backup_retention_days: Backup retention days for the server. + :vartype backup_retention_days: int + :ivar geo_redundant_backup: Indicates if the server is configured to create geographically + redundant backups. Known values are: "Enabled" and "Disabled". + :vartype geo_redundant_backup: str or + ~azure.mgmt.postgresql.models.GeographicallyRedundantBackup + :ivar earliest_restore_date: Earliest restore point time (ISO8601 format) for a server. + :vartype earliest_restore_date: ~datetime.datetime + """ + + backup_retention_days: Optional[int] = rest_field( + name="backupRetentionDays", visibility=["read", "create", "update", "delete", "query"] + ) + """Backup retention days for the server.""" + geo_redundant_backup: Optional[Union[str, "_models.GeographicallyRedundantBackup"]] = rest_field( + name="geoRedundantBackup", visibility=["read"] + ) + """Indicates if the server is configured to create geographically redundant backups. Known values + are: \"Enabled\" and \"Disabled\".""" + earliest_restore_date: Optional[datetime.datetime] = rest_field( + name="earliestRestoreDate", visibility=["read"], format="rfc3339" + ) + """Earliest restore point time (ISO8601 format) for a server.""" + + @overload + def __init__( + self, + *, + backup_retention_days: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BackupRequestBase(_Model): + """BackupRequestBase is the base for all backup request. + + :ivar backup_settings: Backup Settings. Required. + :vartype backup_settings: ~azure.mgmt.postgresql.models.BackupSettings + """ + + backup_settings: "_models.BackupSettings" = rest_field( + name="backupSettings", visibility=["read", "create", "update", "delete", "query"] + ) + """Backup Settings. Required.""" + + @overload + def __init__( + self, + *, + backup_settings: "_models.BackupSettings", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BackupSettings(_Model): + """The settings for the long term backup. + + :ivar backup_name: Backup Name for the current backup. Required. + :vartype backup_name: str + """ + + backup_name: str = rest_field(name="backupName", visibility=["read", "create", "update", "delete", "query"]) + """Backup Name for the current backup. Required.""" + + @overload + def __init__( + self, + *, + backup_name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BackupsLongTermRetentionOperation(ProxyResource): + """Response for the LTR backup Operation API call. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar properties: Long Term Retention Backup Operation Resource Properties. + :vartype properties: ~azure.mgmt.postgresql.models.LtrBackupOperationResponseProperties + """ + + properties: Optional["_models.LtrBackupOperationResponseProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Long Term Retention Backup Operation Resource Properties.""" + + __flattened_items = [ + "datasource_size_in_bytes", + "data_transferred_in_bytes", + "backup_name", + "backup_metadata", + "status", + "start_time", + "end_time", + "percent_complete", + "error_code", + "error_message", + ] + + @overload + def __init__( + self, + *, + properties: Optional["_models.LtrBackupOperationResponseProperties"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class BackupsLongTermRetentionRequest(BackupRequestBase): + """The request that is made for a long term retention backup. + + :ivar backup_settings: Backup Settings. Required. + :vartype backup_settings: ~azure.mgmt.postgresql.models.BackupSettings + :ivar target_details: Backup store detail for target server. Required. + :vartype target_details: ~azure.mgmt.postgresql.models.BackupStoreDetails + """ + + target_details: "_models.BackupStoreDetails" = rest_field( + name="targetDetails", visibility=["read", "create", "update", "delete", "query"] + ) + """Backup store detail for target server. Required.""" + + @overload + def __init__( + self, + *, + backup_settings: "_models.BackupSettings", + target_details: "_models.BackupStoreDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BackupsLongTermRetentionResponse(_Model): + """Response for the LTR backup API call. + + :ivar properties: Long Term Retention Backup Operation Resource Properties. + :vartype properties: ~azure.mgmt.postgresql.models.LtrBackupOperationResponseProperties + """ + + properties: Optional["_models.LtrBackupOperationResponseProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Long Term Retention Backup Operation Resource Properties.""" + + __flattened_items = [ + "datasource_size_in_bytes", + "data_transferred_in_bytes", + "backup_name", + "backup_metadata", + "status", + "start_time", + "end_time", + "percent_complete", + "error_code", + "error_message", + ] + + @overload + def __init__( + self, + *, + properties: Optional["_models.LtrBackupOperationResponseProperties"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class BackupsLongTermRetentionResponseProperties(_Model): # pylint: disable=name-too-long + """Response for the pre-backup request. + + :ivar number_of_containers: Number of storage containers the plugin will use during backup. + More than one containers may be used for size limitations, parallelism, or redundancy etc. + Required. + :vartype number_of_containers: int + """ + + number_of_containers: int = rest_field( + name="numberOfContainers", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of storage containers the plugin will use during backup. More than one containers may be + used for size limitations, parallelism, or redundancy etc. Required.""" + + @overload + def __init__( + self, + *, + number_of_containers: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BackupStoreDetails(_Model): + """Details about the target where the backup content will be stored. + + :ivar sas_uri_list: List of SAS uri of storage containers where backup data is to be + streamed/copied. Required. + :vartype sas_uri_list: list[str] + """ + + sas_uri_list: list[str] = rest_field(name="sasUriList", visibility=["read", "create", "update", "delete", "query"]) + """List of SAS uri of storage containers where backup data is to be streamed/copied. Required.""" + + @overload + def __init__( + self, + *, + sas_uri_list: list[str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CapabilityBase(_Model): + """Base object for representing capability. + + :ivar status: The status of the capability. Known values are: "Visible", "Available", + "Default", and "Disabled". + :vartype status: str or ~azure.mgmt.postgresql.models.CapabilityStatus + :ivar reason: The reason for the capability not being available. + :vartype reason: str + """ + + status: Optional[Union[str, "_models.CapabilityStatus"]] = rest_field(visibility=["read"]) + """The status of the capability. Known values are: \"Visible\", \"Available\", \"Default\", and + \"Disabled\".""" + reason: Optional[str] = rest_field(visibility=["read"]) + """The reason for the capability not being available.""" + + +class Capability(CapabilityBase): + """Capability for the Azure Database for PostgreSQL flexible server. + + :ivar status: The status of the capability. Known values are: "Visible", "Available", + "Default", and "Disabled". + :vartype status: str or ~azure.mgmt.postgresql.models.CapabilityStatus + :ivar reason: The reason for the capability not being available. + :vartype reason: str + :ivar name: Name of flexible servers capabilities. + :vartype name: str + :ivar supported_server_editions: List of supported compute tiers. + :vartype supported_server_editions: list[~azure.mgmt.postgresql.models.ServerEditionCapability] + :ivar supported_server_versions: List of supported major versions of PostgreSQL database + engine. + :vartype supported_server_versions: list[~azure.mgmt.postgresql.models.ServerVersionCapability] + :ivar supported_features: Features supported. + :vartype supported_features: list[~azure.mgmt.postgresql.models.SupportedFeature] + :ivar fast_provisioning_supported: Indicates if fast provisioning is supported. 'Enabled' means + fast provisioning is supported. 'Disabled' stands for fast provisioning is not supported. Will + be deprecated in the future. Look to Supported Features for 'FastProvisioning'. Known values + are: "Enabled" and "Disabled". + :vartype fast_provisioning_supported: str or + ~azure.mgmt.postgresql.models.FastProvisioningSupport + :ivar supported_fast_provisioning_editions: List of compute tiers supporting fast provisioning. + :vartype supported_fast_provisioning_editions: + list[~azure.mgmt.postgresql.models.FastProvisioningEditionCapability] + :ivar geo_backup_supported: Indicates if geographically redundant backups are supported in this + location. 'Enabled' means geographically redundant backups are supported. 'Disabled' stands for + geographically redundant backup is not supported. Will be deprecated in the future. Look to + Supported Features for 'GeoBackup'. Known values are: "Enabled" and "Disabled". + :vartype geo_backup_supported: str or + ~azure.mgmt.postgresql.models.GeographicallyRedundantBackupSupport + :ivar zone_redundant_ha_supported: Indicates if high availability with zone redundancy is + supported in this location. 'Enabled' means high availability with zone redundancy is + supported. 'Disabled' stands for high availability with zone redundancy is not supported. Will + be deprecated in the future. Look to Supported Features for 'ZoneRedundantHa'. Known values + are: "Enabled" and "Disabled". + :vartype zone_redundant_ha_supported: str or + ~azure.mgmt.postgresql.models.ZoneRedundantHighAvailabilitySupport + :ivar zone_redundant_ha_and_geo_backup_supported: Indicates if high availability with zone + redundancy is supported in conjunction with geographically redundant backups in this location. + 'Enabled' means high availability with zone redundancy is supported in conjunction with + geographically redundant backups is supported. 'Disabled' stands for high availability with + zone redundancy is supported in conjunction with geographically redundant backups is not + supported. Will be deprecated in the future. Look to Supported Features for + 'ZoneRedundantHaAndGeoBackup'. Known values are: "Enabled" and "Disabled". + :vartype zone_redundant_ha_and_geo_backup_supported: str or + ~azure.mgmt.postgresql.models.ZoneRedundantHighAvailabilityAndGeographicallyRedundantBackupSupport + :ivar storage_auto_growth_supported: Indicates if storage autogrow is supported in this + location. 'Enabled' means storage autogrow is supported. 'Disabled' stands for storage autogrow + is not supported. Will be deprecated in the future. Look to Supported Features for + 'StorageAutoGrowth'. Known values are: "Enabled" and "Disabled". + :vartype storage_auto_growth_supported: str or + ~azure.mgmt.postgresql.models.StorageAutoGrowthSupport + :ivar online_resize_supported: Indicates if resizing the storage, without interrupting the + operation of the database engine, is supported in this location for the given subscription. + 'Enabled' means resizing the storage without interrupting the operation of the database engine + is supported. 'Disabled' means resizing the storage without interrupting the operation of the + database engine is not supported. Will be deprecated in the future. Look to Supported Features + for 'OnlineResize'. Known values are: "Enabled" and "Disabled". + :vartype online_resize_supported: str or + ~azure.mgmt.postgresql.models.OnlineStorageResizeSupport + :ivar restricted: Indicates if this location is restricted. 'Enabled' means location is + restricted. 'Disabled' stands for location is not restricted. Will be deprecated in the future. + Look to Supported Features for 'Restricted'. Known values are: "Enabled" and "Disabled". + :vartype restricted: str or ~azure.mgmt.postgresql.models.LocationRestricted + """ + + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name of flexible servers capabilities.""" + supported_server_editions: Optional[list["_models.ServerEditionCapability"]] = rest_field( + name="supportedServerEditions", visibility=["read"] + ) + """List of supported compute tiers.""" + supported_server_versions: Optional[list["_models.ServerVersionCapability"]] = rest_field( + name="supportedServerVersions", visibility=["read"] + ) + """List of supported major versions of PostgreSQL database engine.""" + supported_features: Optional[list["_models.SupportedFeature"]] = rest_field( + name="supportedFeatures", visibility=["read"] + ) + """Features supported.""" + fast_provisioning_supported: Optional[Union[str, "_models.FastProvisioningSupport"]] = rest_field( + name="fastProvisioningSupported", visibility=["read"] + ) + """Indicates if fast provisioning is supported. 'Enabled' means fast provisioning is supported. + 'Disabled' stands for fast provisioning is not supported. Will be deprecated in the future. + Look to Supported Features for 'FastProvisioning'. Known values are: \"Enabled\" and + \"Disabled\".""" + supported_fast_provisioning_editions: Optional[list["_models.FastProvisioningEditionCapability"]] = rest_field( + name="supportedFastProvisioningEditions", visibility=["read"] + ) + """List of compute tiers supporting fast provisioning.""" + geo_backup_supported: Optional[Union[str, "_models.GeographicallyRedundantBackupSupport"]] = rest_field( + name="geoBackupSupported", visibility=["read"] + ) + """Indicates if geographically redundant backups are supported in this location. 'Enabled' means + geographically redundant backups are supported. 'Disabled' stands for geographically redundant + backup is not supported. Will be deprecated in the future. Look to Supported Features for + 'GeoBackup'. Known values are: \"Enabled\" and \"Disabled\".""" + zone_redundant_ha_supported: Optional[Union[str, "_models.ZoneRedundantHighAvailabilitySupport"]] = rest_field( + name="zoneRedundantHaSupported", visibility=["read"] + ) + """Indicates if high availability with zone redundancy is supported in this location. 'Enabled' + means high availability with zone redundancy is supported. 'Disabled' stands for high + availability with zone redundancy is not supported. Will be deprecated in the future. Look to + Supported Features for 'ZoneRedundantHa'. Known values are: \"Enabled\" and \"Disabled\".""" + zone_redundant_ha_and_geo_backup_supported: Optional[ + Union[str, "_models.ZoneRedundantHighAvailabilityAndGeographicallyRedundantBackupSupport"] + ] = rest_field(name="zoneRedundantHaAndGeoBackupSupported", visibility=["read"]) + """Indicates if high availability with zone redundancy is supported in conjunction with + geographically redundant backups in this location. 'Enabled' means high availability with zone + redundancy is supported in conjunction with geographically redundant backups is supported. + 'Disabled' stands for high availability with zone redundancy is supported in conjunction with + geographically redundant backups is not supported. Will be deprecated in the future. Look to + Supported Features for 'ZoneRedundantHaAndGeoBackup'. Known values are: \"Enabled\" and + \"Disabled\".""" + storage_auto_growth_supported: Optional[Union[str, "_models.StorageAutoGrowthSupport"]] = rest_field( + name="storageAutoGrowthSupported", visibility=["read"] + ) + """Indicates if storage autogrow is supported in this location. 'Enabled' means storage autogrow + is supported. 'Disabled' stands for storage autogrow is not supported. Will be deprecated in + the future. Look to Supported Features for 'StorageAutoGrowth'. Known values are: \"Enabled\" + and \"Disabled\".""" + online_resize_supported: Optional[Union[str, "_models.OnlineStorageResizeSupport"]] = rest_field( + name="onlineResizeSupported", visibility=["read"] + ) + """Indicates if resizing the storage, without interrupting the operation of the database engine, + is supported in this location for the given subscription. 'Enabled' means resizing the storage + without interrupting the operation of the database engine is supported. 'Disabled' means + resizing the storage without interrupting the operation of the database engine is not + supported. Will be deprecated in the future. Look to Supported Features for 'OnlineResize'. + Known values are: \"Enabled\" and \"Disabled\".""" + restricted: Optional[Union[str, "_models.LocationRestricted"]] = rest_field(visibility=["read"]) + """Indicates if this location is restricted. 'Enabled' means location is restricted. 'Disabled' + stands for location is not restricted. Will be deprecated in the future. Look to Supported + Features for 'Restricted'. Known values are: \"Enabled\" and \"Disabled\".""" + + @overload + def __init__( + self, + *, + name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CapturedLog(ProxyResource): + """Log file. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar properties: Properties of a log file. + :vartype properties: ~azure.mgmt.postgresql.models.CapturedLogProperties + """ + + properties: Optional["_models.CapturedLogProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Properties of a log file.""" + + __flattened_items = ["created_time", "last_modified_time", "size_in_kb", "type", "url"] + + @overload + def __init__( + self, + *, + properties: Optional["_models.CapturedLogProperties"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class CapturedLogProperties(_Model): + """Properties of a log file. + + :ivar created_time: Creation timestamp of the log file. + :vartype created_time: ~datetime.datetime + :ivar last_modified_time: Last modified timestamp of the log file. + :vartype last_modified_time: ~datetime.datetime + :ivar size_in_kb: Size (in KB) of the log file. + :vartype size_in_kb: int + :ivar type: Type of log file. Can be 'ServerLogs' or 'UpgradeLogs'. + :vartype type: str + :ivar url: URL to download the log file from. + :vartype url: str + """ + + created_time: Optional[datetime.datetime] = rest_field( + name="createdTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """Creation timestamp of the log file.""" + last_modified_time: Optional[datetime.datetime] = rest_field( + name="lastModifiedTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """Last modified timestamp of the log file.""" + size_in_kb: Optional[int] = rest_field(name="sizeInKb", visibility=["read", "create", "update", "delete", "query"]) + """Size (in KB) of the log file.""" + type: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Type of log file. Can be 'ServerLogs' or 'UpgradeLogs'.""" + url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """URL to download the log file from.""" + + @overload + def __init__( + self, + *, + created_time: Optional[datetime.datetime] = None, + last_modified_time: Optional[datetime.datetime] = None, + size_in_kb: Optional[int] = None, + type: Optional[str] = None, + url: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CheckNameAvailabilityRequest(_Model): + """The check availability request body. + + :ivar name: The name of the resource for which availability needs to be checked. + :vartype name: str + :ivar type: The resource type. + :vartype type: str + """ + + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the resource for which availability needs to be checked.""" + type: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The resource type.""" + + @overload + def __init__( + self, + *, + name: Optional[str] = None, + type: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CheckNameAvailabilityResponse(_Model): + """The check availability result. + + :ivar name_available: Indicates if the resource name is available. + :vartype name_available: bool + :ivar reason: The reason why the given name is not available. Known values are: "Invalid" and + "AlreadyExists". + :vartype reason: str or ~azure.mgmt.postgresql.models.CheckNameAvailabilityReason + :ivar message: Detailed reason why the given name is not available. + :vartype message: str + """ + + name_available: Optional[bool] = rest_field( + name="nameAvailable", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if the resource name is available.""" + reason: Optional[Union[str, "_models.CheckNameAvailabilityReason"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The reason why the given name is not available. Known values are: \"Invalid\" and + \"AlreadyExists\".""" + message: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Detailed reason why the given name is not available.""" + + @overload + def __init__( + self, + *, + name_available: Optional[bool] = None, + reason: Optional[Union[str, "_models.CheckNameAvailabilityReason"]] = None, + message: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Cluster(_Model): + """Cluster properties of a server. + + :ivar cluster_size: Number of nodes assigned to the elastic cluster. + :vartype cluster_size: int + :ivar default_database_name: Default database name for the elastic cluster. + :vartype default_database_name: str + """ + + cluster_size: Optional[int] = rest_field( + name="clusterSize", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of nodes assigned to the elastic cluster.""" + default_database_name: Optional[str] = rest_field( + name="defaultDatabaseName", visibility=["read", "create", "update", "delete", "query"] + ) + """Default database name for the elastic cluster.""" + + @overload + def __init__( + self, + *, + cluster_size: Optional[int] = None, + default_database_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Configuration(ProxyResource): + """Configuration (also known as server parameter). + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar properties: Properties of a configuration (also known as server parameter). + :vartype properties: ~azure.mgmt.postgresql.models.ConfigurationProperties + """ + + properties: Optional["_models.ConfigurationProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Properties of a configuration (also known as server parameter).""" + + __flattened_items = [ + "value", + "description", + "default_value", + "data_type", + "allowed_values", + "source", + "is_dynamic_config", + "is_read_only", + "is_config_pending_restart", + "unit", + "documentation_link", + ] + + @overload + def __init__( + self, + *, + properties: Optional["_models.ConfigurationProperties"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class ConfigurationForUpdate(_Model): + """Configuration (also known as server parameter). + + :ivar properties: Properties of a configuration (also known as server parameter). + :vartype properties: ~azure.mgmt.postgresql.models.ConfigurationProperties + """ + + properties: Optional["_models.ConfigurationProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Properties of a configuration (also known as server parameter).""" + + __flattened_items = [ + "value", + "description", + "default_value", + "data_type", + "allowed_values", + "source", + "is_dynamic_config", + "is_read_only", + "is_config_pending_restart", + "unit", + "documentation_link", + ] + + @overload + def __init__( + self, + *, + properties: Optional["_models.ConfigurationProperties"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class ConfigurationProperties(_Model): + """Properties of a configuration (also known as server parameter). + + :ivar value: Value of the configuration (also known as server parameter). Required to update + the value assigned to a specific modifiable configuration. + :vartype value: str + :ivar description: Description of the configuration (also known as server parameter). + :vartype description: str + :ivar default_value: Value assigned by default to the configuration (also known as server + parameter). + :vartype default_value: str + :ivar data_type: Data type of the configuration (also known as server parameter). Known values + are: "Boolean", "Numeric", "Integer", "Enumeration", "String", and "Set". + :vartype data_type: str or ~azure.mgmt.postgresql.models.ConfigurationDataType + :ivar allowed_values: Allowed values of the configuration (also known as server parameter). + :vartype allowed_values: str + :ivar source: Source of the value assigned to the configuration (also known as server + parameter). Required to update the value assigned to a specific modifiable configuration. + :vartype source: str + :ivar is_dynamic_config: Indicates if it's a dynamic (true) or static (false) configuration + (also known as server parameter). Static server parameters require a server restart after + changing the value assigned to them, for the change to take effect. Dynamic server parameters + do not require a server restart after changing the value assigned to them, for the change to + take effect. + :vartype is_dynamic_config: bool + :ivar is_read_only: Indicates if it's a read-only (true) or modifiable (false) configuration + (also known as server parameter). + :vartype is_read_only: bool + :ivar is_config_pending_restart: Indicates if the value assigned to the configuration (also + known as server parameter) is pending a server restart for it to take effect. + :vartype is_config_pending_restart: bool + :ivar unit: Units in which the configuration (also known as server parameter) value is + expressed. + :vartype unit: str + :ivar documentation_link: Link pointing to the documentation of the configuration (also known + as server parameter). + :vartype documentation_link: str + """ + + value: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Value of the configuration (also known as server parameter). Required to update the value + assigned to a specific modifiable configuration.""" + description: Optional[str] = rest_field(visibility=["read"]) + """Description of the configuration (also known as server parameter).""" + default_value: Optional[str] = rest_field(name="defaultValue", visibility=["read"]) + """Value assigned by default to the configuration (also known as server parameter).""" + data_type: Optional[Union[str, "_models.ConfigurationDataType"]] = rest_field(name="dataType", visibility=["read"]) + """Data type of the configuration (also known as server parameter). Known values are: \"Boolean\", + \"Numeric\", \"Integer\", \"Enumeration\", \"String\", and \"Set\".""" + allowed_values: Optional[str] = rest_field(name="allowedValues", visibility=["read"]) + """Allowed values of the configuration (also known as server parameter).""" + source: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Source of the value assigned to the configuration (also known as server parameter). Required to + update the value assigned to a specific modifiable configuration.""" + is_dynamic_config: Optional[bool] = rest_field(name="isDynamicConfig", visibility=["read"]) + """Indicates if it's a dynamic (true) or static (false) configuration (also known as server + parameter). Static server parameters require a server restart after changing the value assigned + to them, for the change to take effect. Dynamic server parameters do not require a server + restart after changing the value assigned to them, for the change to take effect.""" + is_read_only: Optional[bool] = rest_field(name="isReadOnly", visibility=["read"]) + """Indicates if it's a read-only (true) or modifiable (false) configuration (also known as server + parameter).""" + is_config_pending_restart: Optional[bool] = rest_field(name="isConfigPendingRestart", visibility=["read"]) + """Indicates if the value assigned to the configuration (also known as server parameter) is + pending a server restart for it to take effect.""" + unit: Optional[str] = rest_field(visibility=["read"]) + """Units in which the configuration (also known as server parameter) value is expressed.""" + documentation_link: Optional[str] = rest_field(name="documentationLink", visibility=["read"]) + """Link pointing to the documentation of the configuration (also known as server parameter).""" + + @overload + def __init__( + self, + *, + value: Optional[str] = None, + source: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Database(ProxyResource): + """Represents a database. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar properties: Properties of a database. + :vartype properties: ~azure.mgmt.postgresql.models.DatabaseProperties + """ + + properties: Optional["_models.DatabaseProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Properties of a database.""" + + __flattened_items = ["charset", "collation"] + + @overload + def __init__( + self, + *, + properties: Optional["_models.DatabaseProperties"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class DatabaseMigrationState(_Model): + """Migration state of a database. + + :ivar database_name: Name of database. + :vartype database_name: str + :ivar migration_state: Migration state of a database. Known values are: "InProgress", + "WaitingForCutoverTrigger", "Failed", "Canceled", "Succeeded", and "Canceling". + :vartype migration_state: str or ~azure.mgmt.postgresql.models.MigrationDatabaseState + :ivar migration_operation: Migration operation of a database. + :vartype migration_operation: str + :ivar started_on: Start time of a migration state. + :vartype started_on: ~datetime.datetime + :ivar ended_on: End time of a migration state. + :vartype ended_on: ~datetime.datetime + :ivar full_load_queued_tables: Number of tables queued for the migration of a database. + :vartype full_load_queued_tables: int + :ivar full_load_errored_tables: Number of tables encountering errors during the migration of a + database. + :vartype full_load_errored_tables: int + :ivar full_load_loading_tables: Number of tables loading during the migration of a database. + :vartype full_load_loading_tables: int + :ivar full_load_completed_tables: Number of tables loaded during the migration of a database. + :vartype full_load_completed_tables: int + :ivar cdc_update_counter: Change Data Capture update counter. + :vartype cdc_update_counter: int + :ivar cdc_delete_counter: Change Data Capture delete counter. + :vartype cdc_delete_counter: int + :ivar cdc_insert_counter: Change Data Capture insert counter. + :vartype cdc_insert_counter: int + :ivar applied_changes: Change Data Capture applied changes counter. + :vartype applied_changes: int + :ivar incoming_changes: Change Data Capture incoming changes counter. + :vartype incoming_changes: int + :ivar latency: Lag in seconds between source and target during online phase. + :vartype latency: int + :ivar message: Error message, if any, for the migration state. + :vartype message: str + """ + + database_name: Optional[str] = rest_field( + name="databaseName", visibility=["read", "create", "update", "delete", "query"] + ) + """Name of database.""" + migration_state: Optional[Union[str, "_models.MigrationDatabaseState"]] = rest_field( + name="migrationState", visibility=["read", "create", "update", "delete", "query"] + ) + """Migration state of a database. Known values are: \"InProgress\", \"WaitingForCutoverTrigger\", + \"Failed\", \"Canceled\", \"Succeeded\", and \"Canceling\".""" + migration_operation: Optional[str] = rest_field( + name="migrationOperation", visibility=["read", "create", "update", "delete", "query"] + ) + """Migration operation of a database.""" + started_on: Optional[datetime.datetime] = rest_field( + name="startedOn", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """Start time of a migration state.""" + ended_on: Optional[datetime.datetime] = rest_field( + name="endedOn", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """End time of a migration state.""" + full_load_queued_tables: Optional[int] = rest_field( + name="fullLoadQueuedTables", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of tables queued for the migration of a database.""" + full_load_errored_tables: Optional[int] = rest_field( + name="fullLoadErroredTables", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of tables encountering errors during the migration of a database.""" + full_load_loading_tables: Optional[int] = rest_field( + name="fullLoadLoadingTables", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of tables loading during the migration of a database.""" + full_load_completed_tables: Optional[int] = rest_field( + name="fullLoadCompletedTables", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of tables loaded during the migration of a database.""" + cdc_update_counter: Optional[int] = rest_field( + name="cdcUpdateCounter", visibility=["read", "create", "update", "delete", "query"] + ) + """Change Data Capture update counter.""" + cdc_delete_counter: Optional[int] = rest_field( + name="cdcDeleteCounter", visibility=["read", "create", "update", "delete", "query"] + ) + """Change Data Capture delete counter.""" + cdc_insert_counter: Optional[int] = rest_field( + name="cdcInsertCounter", visibility=["read", "create", "update", "delete", "query"] + ) + """Change Data Capture insert counter.""" + applied_changes: Optional[int] = rest_field( + name="appliedChanges", visibility=["read", "create", "update", "delete", "query"] + ) + """Change Data Capture applied changes counter.""" + incoming_changes: Optional[int] = rest_field( + name="incomingChanges", visibility=["read", "create", "update", "delete", "query"] + ) + """Change Data Capture incoming changes counter.""" + latency: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Lag in seconds between source and target during online phase.""" + message: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Error message, if any, for the migration state.""" + + @overload + def __init__( + self, + *, + database_name: Optional[str] = None, + migration_state: Optional[Union[str, "_models.MigrationDatabaseState"]] = None, + migration_operation: Optional[str] = None, + started_on: Optional[datetime.datetime] = None, + ended_on: Optional[datetime.datetime] = None, + full_load_queued_tables: Optional[int] = None, + full_load_errored_tables: Optional[int] = None, + full_load_loading_tables: Optional[int] = None, + full_load_completed_tables: Optional[int] = None, + cdc_update_counter: Optional[int] = None, + cdc_delete_counter: Optional[int] = None, + cdc_insert_counter: Optional[int] = None, + applied_changes: Optional[int] = None, + incoming_changes: Optional[int] = None, + latency: Optional[int] = None, + message: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DatabaseProperties(_Model): + """Properties of a database. + + :ivar charset: Character set of the database. + :vartype charset: str + :ivar collation: Collation of the database. + :vartype collation: str + """ + + charset: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Character set of the database.""" + collation: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Collation of the database.""" + + @overload + def __init__( + self, + *, + charset: Optional[str] = None, + collation: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DataEncryption(_Model): + """Data encryption properties of a server. + + :ivar primary_key_uri: URI of the key in Azure Key Vault used for data encryption of the + primary storage associated to a server. + :vartype primary_key_uri: str + :ivar primary_user_assigned_identity_id: Identifier of the user assigned managed identity used + to access the key in Azure Key Vault for data encryption of the primary storage associated to a + server. + :vartype primary_user_assigned_identity_id: str + :ivar geo_backup_key_uri: Identifier of the user assigned managed identity used to access the + key in Azure Key Vault for data encryption of the geographically redundant storage associated + to a server that is configured to support geographically redundant backups. + :vartype geo_backup_key_uri: str + :ivar geo_backup_user_assigned_identity_id: Identifier of the user assigned managed identity + used to access the key in Azure Key Vault for data encryption of the geographically redundant + storage associated to a server that is configured to support geographically redundant backups. + :vartype geo_backup_user_assigned_identity_id: str + :ivar type: Data encryption type used by a server. Known values are: "SystemManaged" and + "AzureKeyVault". + :vartype type: str or ~azure.mgmt.postgresql.models.DataEncryptionType + :ivar primary_encryption_key_status: Status of key used by a server configured with data + encryption based on customer managed key, to encrypt the primary storage associated to the + server. Known values are: "Valid" and "Invalid". + :vartype primary_encryption_key_status: str or + ~azure.mgmt.postgresql.models.EncryptionKeyStatus + :ivar geo_backup_encryption_key_status: Status of key used by a server configured with data + encryption based on customer managed key, to encrypt the geographically redundant storage + associated to the server when it is configured to support geographically redundant backups. + Known values are: "Valid" and "Invalid". + :vartype geo_backup_encryption_key_status: str or + ~azure.mgmt.postgresql.models.EncryptionKeyStatus + """ + + primary_key_uri: Optional[str] = rest_field( + name="primaryKeyURI", visibility=["read", "create", "update", "delete", "query"] + ) + """URI of the key in Azure Key Vault used for data encryption of the primary storage associated to + a server.""" + primary_user_assigned_identity_id: Optional[str] = rest_field( + name="primaryUserAssignedIdentityId", visibility=["read", "create", "update", "delete", "query"] + ) + """Identifier of the user assigned managed identity used to access the key in Azure Key Vault for + data encryption of the primary storage associated to a server.""" + geo_backup_key_uri: Optional[str] = rest_field( + name="geoBackupKeyURI", visibility=["read", "create", "update", "delete", "query"] + ) + """Identifier of the user assigned managed identity used to access the key in Azure Key Vault for + data encryption of the geographically redundant storage associated to a server that is + configured to support geographically redundant backups.""" + geo_backup_user_assigned_identity_id: Optional[str] = rest_field( + name="geoBackupUserAssignedIdentityId", visibility=["read", "create", "update", "delete", "query"] + ) + """Identifier of the user assigned managed identity used to access the key in Azure Key Vault for + data encryption of the geographically redundant storage associated to a server that is + configured to support geographically redundant backups.""" + type: Optional[Union[str, "_models.DataEncryptionType"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Data encryption type used by a server. Known values are: \"SystemManaged\" and + \"AzureKeyVault\".""" + primary_encryption_key_status: Optional[Union[str, "_models.EncryptionKeyStatus"]] = rest_field( + name="primaryEncryptionKeyStatus", visibility=["read"] + ) + """Status of key used by a server configured with data encryption based on customer managed key, + to encrypt the primary storage associated to the server. Known values are: \"Valid\" and + \"Invalid\".""" + geo_backup_encryption_key_status: Optional[Union[str, "_models.EncryptionKeyStatus"]] = rest_field( + name="geoBackupEncryptionKeyStatus", visibility=["read"] + ) + """Status of key used by a server configured with data encryption based on customer managed key, + to encrypt the geographically redundant storage associated to the server when it is configured + to support geographically redundant backups. Known values are: \"Valid\" and \"Invalid\".""" + + @overload + def __init__( + self, + *, + primary_key_uri: Optional[str] = None, + primary_user_assigned_identity_id: Optional[str] = None, + geo_backup_key_uri: Optional[str] = None, + geo_backup_user_assigned_identity_id: Optional[str] = None, + type: Optional[Union[str, "_models.DataEncryptionType"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DbLevelValidationStatus(_Model): + """Validation status summary for a database. + + :ivar database_name: Name of database. + :vartype database_name: str + :ivar started_on: Start time of a database level validation. + :vartype started_on: ~datetime.datetime + :ivar ended_on: End time of a database level validation. + :vartype ended_on: ~datetime.datetime + :ivar summary: Summary of database level validations. + :vartype summary: list[~azure.mgmt.postgresql.models.ValidationSummaryItem] + """ + + database_name: Optional[str] = rest_field( + name="databaseName", visibility=["read", "create", "update", "delete", "query"] + ) + """Name of database.""" + started_on: Optional[datetime.datetime] = rest_field( + name="startedOn", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """Start time of a database level validation.""" + ended_on: Optional[datetime.datetime] = rest_field( + name="endedOn", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """End time of a database level validation.""" + summary: Optional[list["_models.ValidationSummaryItem"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Summary of database level validations.""" + + @overload + def __init__( + self, + *, + database_name: Optional[str] = None, + started_on: Optional[datetime.datetime] = None, + ended_on: Optional[datetime.datetime] = None, + summary: Optional[list["_models.ValidationSummaryItem"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DbServerMetadata(_Model): + """Database server metadata. + + :ivar location: Location of database server. + :vartype location: str + :ivar version: Major version of PostgreSQL database engine. + :vartype version: str + :ivar storage_mb: Storage size (in MB) for database server. + :vartype storage_mb: int + :ivar sku: Compute tier and size of the database server. This object is empty for an Azure + Database for PostgreSQL single server. + :vartype sku: ~azure.mgmt.postgresql.models.ServerSku + """ + + location: Optional[str] = rest_field(visibility=["read"]) + """Location of database server.""" + version: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Major version of PostgreSQL database engine.""" + storage_mb: Optional[int] = rest_field(name="storageMb", visibility=["read", "create", "update", "delete", "query"]) + """Storage size (in MB) for database server.""" + sku: Optional["_models.ServerSku"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Compute tier and size of the database server. This object is empty for an Azure Database for + PostgreSQL single server.""" + + @overload + def __init__( + self, + *, + version: Optional[str] = None, + storage_mb: Optional[int] = None, + sku: Optional["_models.ServerSku"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DelegatedSubnetUsage(_Model): + """Delegated subnet usage data. + + :ivar subnet_name: Name of the delegated subnet for which IP addresses are in use. + :vartype subnet_name: str + :ivar usage: Number of IP addresses used by the delegated subnet. + :vartype usage: int + """ + + subnet_name: Optional[str] = rest_field(name="subnetName", visibility=["read"]) + """Name of the delegated subnet for which IP addresses are in use.""" + usage: Optional[int] = rest_field(visibility=["read"]) + """Number of IP addresses used by the delegated subnet.""" + + +class ErrorAdditionalInfo(_Model): + """The resource management error additional info. + + :ivar type: The additional info type. + :vartype type: str + :ivar info: The additional info. + :vartype info: any + """ + + type: Optional[str] = rest_field(visibility=["read"]) + """The additional info type.""" + info: Optional[Any] = rest_field(visibility=["read"]) + """The additional info.""" + + +class ErrorDetail(_Model): + """The error detail. + + :ivar code: The error code. + :vartype code: str + :ivar message: The error message. + :vartype message: str + :ivar target: The error target. + :vartype target: str + :ivar details: The error details. + :vartype details: list[~azure.mgmt.postgresql.models.ErrorDetail] + :ivar additional_info: The error additional info. + :vartype additional_info: list[~azure.mgmt.postgresql.models.ErrorAdditionalInfo] + """ + + code: Optional[str] = rest_field(visibility=["read"]) + """The error code.""" + message: Optional[str] = rest_field(visibility=["read"]) + """The error message.""" + target: Optional[str] = rest_field(visibility=["read"]) + """The error target.""" + details: Optional[list["_models.ErrorDetail"]] = rest_field(visibility=["read"]) + """The error details.""" + additional_info: Optional[list["_models.ErrorAdditionalInfo"]] = rest_field( + name="additionalInfo", visibility=["read"] + ) + """The error additional info.""" + + +class ErrorResponse(_Model): + """Error response. + + :ivar error: The error object. + :vartype error: ~azure.mgmt.postgresql.models.ErrorDetail + """ + + error: Optional["_models.ErrorDetail"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The error object.""" + + @overload + def __init__( + self, + *, + error: Optional["_models.ErrorDetail"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FastProvisioningEditionCapability(CapabilityBase): + """Capability of a fast provisioning compute tier. + + :ivar status: The status of the capability. Known values are: "Visible", "Available", + "Default", and "Disabled". + :vartype status: str or ~azure.mgmt.postgresql.models.CapabilityStatus + :ivar reason: The reason for the capability not being available. + :vartype reason: str + :ivar supported_tier: Compute tier supporting fast provisioning. + :vartype supported_tier: str + :ivar supported_sku: Compute name (SKU) supporting fast provisioning. + :vartype supported_sku: str + :ivar supported_storage_gb: Storage size (in GB) supporting fast provisioning. + :vartype supported_storage_gb: int + :ivar supported_server_versions: Major version of PostgreSQL database engine supporting fast + provisioning. + :vartype supported_server_versions: str + :ivar server_count: Count of servers in cache matching this specification. + :vartype server_count: int + """ + + supported_tier: Optional[str] = rest_field(name="supportedTier", visibility=["read"]) + """Compute tier supporting fast provisioning.""" + supported_sku: Optional[str] = rest_field(name="supportedSku", visibility=["read"]) + """Compute name (SKU) supporting fast provisioning.""" + supported_storage_gb: Optional[int] = rest_field(name="supportedStorageGb", visibility=["read"]) + """Storage size (in GB) supporting fast provisioning.""" + supported_server_versions: Optional[str] = rest_field(name="supportedServerVersions", visibility=["read"]) + """Major version of PostgreSQL database engine supporting fast provisioning.""" + server_count: Optional[int] = rest_field(name="serverCount", visibility=["read"]) + """Count of servers in cache matching this specification.""" + + +class FirewallRule(ProxyResource): + """Firewall rule. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar properties: Properties of a firewall rule. Required. + :vartype properties: ~azure.mgmt.postgresql.models.FirewallRuleProperties + """ + + properties: "_models.FirewallRuleProperties" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Properties of a firewall rule. Required.""" + + __flattened_items = ["start_ip_address", "end_ip_address"] + + @overload + def __init__( + self, + *, + properties: "_models.FirewallRuleProperties", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class FirewallRuleProperties(_Model): + """Properties of a firewall rule. + + :ivar start_ip_address: IP address defining the start of the range of addresses of a firewall + rule. Must be expressed in IPv4 format. Required. + :vartype start_ip_address: str + :ivar end_ip_address: IP address defining the end of the range of addresses of a firewall rule. + Must be expressed in IPv4 format. Required. + :vartype end_ip_address: str + """ + + start_ip_address: str = rest_field( + name="startIpAddress", visibility=["read", "create", "update", "delete", "query"] + ) + """IP address defining the start of the range of addresses of a firewall rule. Must be expressed + in IPv4 format. Required.""" + end_ip_address: str = rest_field(name="endIpAddress", visibility=["read", "create", "update", "delete", "query"]) + """IP address defining the end of the range of addresses of a firewall rule. Must be expressed in + IPv4 format. Required.""" + + @overload + def __init__( + self, + *, + start_ip_address: str, + end_ip_address: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class HighAvailability(_Model): + """High availability properties of a server. + + :ivar mode: High availability mode for a server. Known values are: "Disabled", "ZoneRedundant", + and "SameZone". + :vartype mode: str or + ~azure.mgmt.postgresql.models.PostgreSqlFlexibleServerHighAvailabilityMode + :ivar state: Possible states of the standby server created when high availability is set to + SameZone or ZoneRedundant. Known values are: "NotEnabled", "CreatingStandby", + "ReplicatingData", "FailingOver", "Healthy", and "RemovingStandby". + :vartype state: str or ~azure.mgmt.postgresql.models.HighAvailabilityState + :ivar standby_availability_zone: Availability zone associated to the standby server created + when high availability is set to SameZone or ZoneRedundant. + :vartype standby_availability_zone: str + """ + + mode: Optional[Union[str, "_models.PostgreSqlFlexibleServerHighAvailabilityMode"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """High availability mode for a server. Known values are: \"Disabled\", \"ZoneRedundant\", and + \"SameZone\".""" + state: Optional[Union[str, "_models.HighAvailabilityState"]] = rest_field(visibility=["read"]) + """Possible states of the standby server created when high availability is set to SameZone or + ZoneRedundant. Known values are: \"NotEnabled\", \"CreatingStandby\", \"ReplicatingData\", + \"FailingOver\", \"Healthy\", and \"RemovingStandby\".""" + standby_availability_zone: Optional[str] = rest_field( + name="standbyAvailabilityZone", visibility=["read", "create", "update", "delete", "query"] + ) + """Availability zone associated to the standby server created when high availability is set to + SameZone or ZoneRedundant.""" + + @overload + def __init__( + self, + *, + mode: Optional[Union[str, "_models.PostgreSqlFlexibleServerHighAvailabilityMode"]] = None, + standby_availability_zone: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class HighAvailabilityForPatch(_Model): + """High availability properties of a server. + + :ivar mode: High availability mode for a server. Known values are: "Disabled", "ZoneRedundant", + and "SameZone". + :vartype mode: str or + ~azure.mgmt.postgresql.models.PostgreSqlFlexibleServerHighAvailabilityMode + :ivar state: Possible states of the standby server created when high availability is set to + SameZone or ZoneRedundant. Known values are: "NotEnabled", "CreatingStandby", + "ReplicatingData", "FailingOver", "Healthy", and "RemovingStandby". + :vartype state: str or ~azure.mgmt.postgresql.models.HighAvailabilityState + :ivar standby_availability_zone: Availability zone associated to the standby server created + when high availability is set to SameZone or ZoneRedundant. + :vartype standby_availability_zone: str + """ + + mode: Optional[Union[str, "_models.PostgreSqlFlexibleServerHighAvailabilityMode"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """High availability mode for a server. Known values are: \"Disabled\", \"ZoneRedundant\", and + \"SameZone\".""" + state: Optional[Union[str, "_models.HighAvailabilityState"]] = rest_field(visibility=["read"]) + """Possible states of the standby server created when high availability is set to SameZone or + ZoneRedundant. Known values are: \"NotEnabled\", \"CreatingStandby\", \"ReplicatingData\", + \"FailingOver\", \"Healthy\", and \"RemovingStandby\".""" + standby_availability_zone: Optional[str] = rest_field( + name="standbyAvailabilityZone", visibility=["read", "create", "update", "delete", "query"] + ) + """Availability zone associated to the standby server created when high availability is set to + SameZone or ZoneRedundant.""" + + @overload + def __init__( + self, + *, + mode: Optional[Union[str, "_models.PostgreSqlFlexibleServerHighAvailabilityMode"]] = None, + standby_availability_zone: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ImpactRecord(_Model): + """Impact on some metric if this recommended action is applied. + + :ivar dimension_name: Dimension name. + :vartype dimension_name: str + :ivar unit: Dimension unit. + :vartype unit: str + :ivar query_id: Optional property that can be used to store the identifier of the query, if the + metric is for a specific query. + :vartype query_id: int + :ivar absolute_value: Absolute value. + :vartype absolute_value: float + """ + + dimension_name: Optional[str] = rest_field( + name="dimensionName", visibility=["read", "create", "update", "delete", "query"] + ) + """Dimension name.""" + unit: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Dimension unit.""" + query_id: Optional[int] = rest_field(name="queryId", visibility=["read", "create", "update", "delete", "query"]) + """Optional property that can be used to store the identifier of the query, if the metric is for a + specific query.""" + absolute_value: Optional[float] = rest_field( + name="absoluteValue", visibility=["read", "create", "update", "delete", "query"] + ) + """Absolute value.""" + + @overload + def __init__( + self, + *, + dimension_name: Optional[str] = None, + unit: Optional[str] = None, + query_id: Optional[int] = None, + absolute_value: Optional[float] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class LtrBackupOperationResponseProperties(_Model): + """Response for the backup request. + + :ivar datasource_size_in_bytes: Size of datasource in bytes. + :vartype datasource_size_in_bytes: int + :ivar data_transferred_in_bytes: Data transferred in bytes. + :vartype data_transferred_in_bytes: int + :ivar backup_name: Name of Backup operation. + :vartype backup_name: str + :ivar backup_metadata: Metadata to be stored in RP. Store everything that will be required to + perform a successful restore using this Recovery point. e.g. Versions, DataFormat etc. + :vartype backup_metadata: str + :ivar status: Service-set extensible enum indicating the status of operation. Required. Known + values are: "Running", "Cancelled", "Failed", and "Succeeded". + :vartype status: str or ~azure.mgmt.postgresql.models.ExecutionStatus + :ivar start_time: Start time of the operation. Required. + :vartype start_time: ~datetime.datetime + :ivar end_time: End time of the operation. + :vartype end_time: ~datetime.datetime + :ivar percent_complete: PercentageCompleted. + :vartype percent_complete: float + :ivar error_code: The error code. + :vartype error_code: str + :ivar error_message: The error message. + :vartype error_message: str + """ + + datasource_size_in_bytes: Optional[int] = rest_field( + name="datasourceSizeInBytes", visibility=["read", "create", "update", "delete", "query"] + ) + """Size of datasource in bytes.""" + data_transferred_in_bytes: Optional[int] = rest_field( + name="dataTransferredInBytes", visibility=["read", "create", "update", "delete", "query"] + ) + """Data transferred in bytes.""" + backup_name: Optional[str] = rest_field( + name="backupName", visibility=["read", "create", "update", "delete", "query"] + ) + """Name of Backup operation.""" + backup_metadata: Optional[str] = rest_field( + name="backupMetadata", visibility=["read", "create", "update", "delete", "query"] + ) + """Metadata to be stored in RP. Store everything that will be required to perform a successful + restore using this Recovery point. e.g. Versions, DataFormat etc.""" + status: Union[str, "_models.ExecutionStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Service-set extensible enum indicating the status of operation. Required. Known values are: + \"Running\", \"Cancelled\", \"Failed\", and \"Succeeded\".""" + start_time: datetime.datetime = rest_field( + name="startTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """Start time of the operation. Required.""" + end_time: Optional[datetime.datetime] = rest_field( + name="endTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """End time of the operation.""" + percent_complete: Optional[float] = rest_field( + name="percentComplete", visibility=["read", "create", "update", "delete", "query"] + ) + """PercentageCompleted.""" + error_code: Optional[str] = rest_field(name="errorCode", visibility=["read"]) + """The error code.""" + error_message: Optional[str] = rest_field(name="errorMessage", visibility=["read"]) + """The error message.""" + + @overload + def __init__( + self, + *, + status: Union[str, "_models.ExecutionStatus"], + start_time: datetime.datetime, + datasource_size_in_bytes: Optional[int] = None, + data_transferred_in_bytes: Optional[int] = None, + backup_name: Optional[str] = None, + backup_metadata: Optional[str] = None, + end_time: Optional[datetime.datetime] = None, + percent_complete: Optional[float] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class LtrPreBackupRequest(BackupRequestBase): + """A request that is made for pre-backup. + + :ivar backup_settings: Backup Settings. Required. + :vartype backup_settings: ~azure.mgmt.postgresql.models.BackupSettings + """ + + @overload + def __init__( + self, + *, + backup_settings: "_models.BackupSettings", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class LtrPreBackupResponse(_Model): + """Response for the LTR pre-backup API call. + + :ivar properties: Additional Properties for the pre backup response. Required. + :vartype properties: ~azure.mgmt.postgresql.models.BackupsLongTermRetentionResponseProperties + """ + + properties: "_models.BackupsLongTermRetentionResponseProperties" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Additional Properties for the pre backup response. Required.""" + + __flattened_items = ["number_of_containers"] + + @overload + def __init__( + self, + *, + properties: "_models.BackupsLongTermRetentionResponseProperties", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class MaintenanceWindow(_Model): + """Maintenance window properties of a server. + + :ivar custom_window: Indicates whether custom window is enabled or disabled. + :vartype custom_window: str + :ivar start_hour: Start hour to be used for maintenance window. + :vartype start_hour: int + :ivar start_minute: Start minute to be used for maintenance window. + :vartype start_minute: int + :ivar day_of_week: Day of the week to be used for maintenance window. + :vartype day_of_week: int + """ + + custom_window: Optional[str] = rest_field( + name="customWindow", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates whether custom window is enabled or disabled.""" + start_hour: Optional[int] = rest_field(name="startHour", visibility=["read", "create", "update", "delete", "query"]) + """Start hour to be used for maintenance window.""" + start_minute: Optional[int] = rest_field( + name="startMinute", visibility=["read", "create", "update", "delete", "query"] + ) + """Start minute to be used for maintenance window.""" + day_of_week: Optional[int] = rest_field( + name="dayOfWeek", visibility=["read", "create", "update", "delete", "query"] + ) + """Day of the week to be used for maintenance window.""" + + @overload + def __init__( + self, + *, + custom_window: Optional[str] = None, + start_hour: Optional[int] = None, + start_minute: Optional[int] = None, + day_of_week: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MaintenanceWindowForPatch(_Model): + """Maintenance window properties of a server. + + :ivar custom_window: Indicates whether custom window is enabled or disabled. + :vartype custom_window: str + :ivar start_hour: Start hour to be used for maintenance window. + :vartype start_hour: int + :ivar start_minute: Start minute to be used for maintenance window. + :vartype start_minute: int + :ivar day_of_week: Day of the week to be used for maintenance window. + :vartype day_of_week: int + """ + + custom_window: Optional[str] = rest_field( + name="customWindow", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates whether custom window is enabled or disabled.""" + start_hour: Optional[int] = rest_field(name="startHour", visibility=["read", "create", "update", "delete", "query"]) + """Start hour to be used for maintenance window.""" + start_minute: Optional[int] = rest_field( + name="startMinute", visibility=["read", "create", "update", "delete", "query"] + ) + """Start minute to be used for maintenance window.""" + day_of_week: Optional[int] = rest_field( + name="dayOfWeek", visibility=["read", "create", "update", "delete", "query"] + ) + """Day of the week to be used for maintenance window.""" + + @overload + def __init__( + self, + *, + custom_window: Optional[str] = None, + start_hour: Optional[int] = None, + start_minute: Optional[int] = None, + day_of_week: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MigrateNetworkStatus(_Model): + """The status of a network migration operation. + + :ivar subscription_id: The ID of the subscription. + :vartype subscription_id: str + :ivar resource_group_name: The name of the resource group. + :vartype resource_group_name: str + :ivar server_name: The name of the server. + :vartype server_name: str + :ivar state: The state of the network migration operation. Known values are: "Pending", + "InProgress", "Succeeded", "Failed", "CancelInProgress", and "Cancelled". + :vartype state: str or ~azure.mgmt.postgresql.models.NetworkMigrationState + """ + + subscription_id: Optional[str] = rest_field( + name="subscriptionId", visibility=["read", "create", "update", "delete", "query"] + ) + """The ID of the subscription.""" + resource_group_name: Optional[str] = rest_field( + name="resourceGroupName", visibility=["read", "create", "update", "delete", "query"] + ) + """The name of the resource group.""" + server_name: Optional[str] = rest_field( + name="serverName", visibility=["read", "create", "update", "delete", "query"] + ) + """The name of the server.""" + state: Optional[Union[str, "_models.NetworkMigrationState"]] = rest_field(visibility=["read"]) + """The state of the network migration operation. Known values are: \"Pending\", \"InProgress\", + \"Succeeded\", \"Failed\", \"CancelInProgress\", and \"Cancelled\".""" + + @overload + def __init__( + self, + *, + subscription_id: Optional[str] = None, + resource_group_name: Optional[str] = None, + server_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TrackedResource(Resource): + """Tracked Resource. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar location: The geo-location where the resource lives. Required. + :vartype location: str + """ + + tags: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Resource tags.""" + location: str = rest_field(visibility=["read", "create"]) + """The geo-location where the resource lives. Required.""" + + @overload + def __init__( + self, + *, + location: str, + tags: Optional[dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Migration(TrackedResource): + """Properties of a migration. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar location: The geo-location where the resource lives. Required. + :vartype location: str + :ivar properties: Migration properties. + :vartype properties: ~azure.mgmt.postgresql.models.MigrationProperties + """ + + properties: Optional["_models.MigrationProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Migration properties.""" + + __flattened_items = [ + "migration_id", + "current_status", + "migration_instance_resource_id", + "migration_mode", + "migration_option", + "source_type", + "ssl_mode", + "source_db_server_metadata", + "target_db_server_metadata", + "source_db_server_resource_id", + "source_db_server_fully_qualified_domain_name", + "target_db_server_resource_id", + "target_db_server_fully_qualified_domain_name", + "secret_parameters", + "dbs_to_migrate", + "setup_logical_replication_on_source_db_if_needed", + "overwrite_dbs_in_target", + "migration_window_start_time_in_utc", + "migration_window_end_time_in_utc", + "migrate_roles", + "start_data_migration", + "trigger_cutover", + "dbs_to_trigger_cutover_on", + "cancel", + "dbs_to_cancel_migration_on", + ] + + @overload + def __init__( + self, + *, + location: str, + tags: Optional[dict[str, str]] = None, + properties: Optional["_models.MigrationProperties"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class MigrationNameAvailability(_Model): + """Availability of a migration name. + + :ivar name: Name of the migration to check for validity and availability. Required. + :vartype name: str + :ivar type: Type of resource. Required. + :vartype type: str + :ivar name_available: Indicates if the migration name is available. + :vartype name_available: bool + :ivar reason: Migration name availability reason. Known values are: "Invalid" and + "AlreadyExists". + :vartype reason: str or ~azure.mgmt.postgresql.models.MigrationNameAvailabilityReason + :ivar message: Migration name availability message. + :vartype message: str + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name of the migration to check for validity and availability. Required.""" + type: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Type of resource. Required.""" + name_available: Optional[bool] = rest_field(name="nameAvailable", visibility=["read"]) + """Indicates if the migration name is available.""" + reason: Optional[Union[str, "_models.MigrationNameAvailabilityReason"]] = rest_field(visibility=["read"]) + """Migration name availability reason. Known values are: \"Invalid\" and \"AlreadyExists\".""" + message: Optional[str] = rest_field(visibility=["read"]) + """Migration name availability message.""" + + @overload + def __init__( + self, + *, + name: str, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MigrationProperties(_Model): + """Migration. + + :ivar migration_id: Identifier of a migration. + :vartype migration_id: str + :ivar current_status: Current status of a migration. + :vartype current_status: ~azure.mgmt.postgresql.models.MigrationStatus + :ivar migration_instance_resource_id: Identifier of the private endpoint migration instance. + :vartype migration_instance_resource_id: str + :ivar migration_mode: Mode used to perform the migration: Online or Offline. Known values are: + "Offline" and "Online". + :vartype migration_mode: str or ~azure.mgmt.postgresql.models.MigrationMode + :ivar migration_option: Supported option for a migration. Known values are: "Validate", + "Migrate", and "ValidateAndMigrate". + :vartype migration_option: str or ~azure.mgmt.postgresql.models.MigrationOption + :ivar source_type: Source server type used for the migration: ApsaraDB_RDS, AWS, AWS_AURORA, + AWS_EC2, AWS_RDS, AzureVM, Crunchy_PostgreSQL, Digital_Ocean_Droplets, + Digital_Ocean_PostgreSQL, EDB, EDB_Oracle_Server, EDB_PostgreSQL, GCP, GCP_AlloyDB, + GCP_CloudSQL, GCP_Compute, Heroku_PostgreSQL, Huawei_Compute, Huawei_RDS, OnPremises, + PostgreSQLCosmosDB, PostgreSQLFlexibleServer, PostgreSQLSingleServer, or Supabase_PostgreSQL. + Known values are: "OnPremises", "AWS", "GCP", "AzureVM", "PostgreSQLSingleServer", "AWS_RDS", + "AWS_AURORA", "AWS_EC2", "GCP_CloudSQL", "GCP_AlloyDB", "GCP_Compute", "EDB", + "EDB_Oracle_Server", "EDB_PostgreSQL", "PostgreSQLFlexibleServer", "PostgreSQLCosmosDB", + "Huawei_RDS", "Huawei_Compute", "Heroku_PostgreSQL", "Crunchy_PostgreSQL", "ApsaraDB_RDS", + "Digital_Ocean_Droplets", "Digital_Ocean_PostgreSQL", and "Supabase_PostgreSQL". + :vartype source_type: str or ~azure.mgmt.postgresql.models.SourceType + :ivar ssl_mode: SSL mode used by a migration. Default SSL mode for 'PostgreSQLSingleServer' is + 'VerifyFull'. Default SSL mode for other source types is 'Prefer'. Known values are: "Prefer", + "Require", "VerifyCA", and "VerifyFull". + :vartype ssl_mode: str or ~azure.mgmt.postgresql.models.SslMode + :ivar source_db_server_metadata: Metadata of source database server. + :vartype source_db_server_metadata: ~azure.mgmt.postgresql.models.DbServerMetadata + :ivar target_db_server_metadata: Metadata of target database server. + :vartype target_db_server_metadata: ~azure.mgmt.postgresql.models.DbServerMetadata + :ivar source_db_server_resource_id: Identifier of the source database server resource, when + 'sourceType' is 'PostgreSQLSingleServer'. For other source types this must be set to + ipaddress:port@username or hostname:port@username. + :vartype source_db_server_resource_id: str + :ivar source_db_server_fully_qualified_domain_name: Fully qualified domain name (FQDN) or IP + address of the source server. This property is optional. When provided, the migration service + will always use it to connect to the source server. + :vartype source_db_server_fully_qualified_domain_name: str + :ivar target_db_server_resource_id: Identifier of the target database server resource. + :vartype target_db_server_resource_id: str + :ivar target_db_server_fully_qualified_domain_name: Fully qualified domain name (FQDN) or IP + address of the target server. This property is optional. When provided, the migration service + will always use it to connect to the target server. + :vartype target_db_server_fully_qualified_domain_name: str + :ivar secret_parameters: Migration secret parameters. + :vartype secret_parameters: ~azure.mgmt.postgresql.models.MigrationSecretParameters + :ivar dbs_to_migrate: Names of databases to migrate. + :vartype dbs_to_migrate: list[str] + :ivar setup_logical_replication_on_source_db_if_needed: Indicates whether to setup logical + replication on source server, if needed. Known values are: "True" and "False". + :vartype setup_logical_replication_on_source_db_if_needed: str or + ~azure.mgmt.postgresql.models.LogicalReplicationOnSourceServer + :ivar overwrite_dbs_in_target: Indicates if databases on the target server can be overwritten + when already present. If set to 'False', when the migration workflow detects that the database + already exists on the target server, it will wait for a confirmation. Known values are: "True" + and "False". + :vartype overwrite_dbs_in_target: str or + ~azure.mgmt.postgresql.models.OverwriteDatabasesOnTargetServer + :ivar migration_window_start_time_in_utc: Start time (UTC) for migration window. + :vartype migration_window_start_time_in_utc: ~datetime.datetime + :ivar migration_window_end_time_in_utc: End time (UTC) for migration window. + :vartype migration_window_end_time_in_utc: ~datetime.datetime + :ivar migrate_roles: Indicates if roles and permissions must be migrated. Known values are: + "True" and "False". + :vartype migrate_roles: str or ~azure.mgmt.postgresql.models.MigrateRolesAndPermissions + :ivar start_data_migration: Indicates if data migration must start right away. Known values + are: "True" and "False". + :vartype start_data_migration: str or ~azure.mgmt.postgresql.models.StartDataMigration + :ivar trigger_cutover: Indicates if cutover must be triggered for the entire migration. Known + values are: "True" and "False". + :vartype trigger_cutover: str or ~azure.mgmt.postgresql.models.TriggerCutover + :ivar dbs_to_trigger_cutover_on: When you want to trigger cutover for specific databases set + 'triggerCutover' to 'True' and the names of the specific databases in this array. + :vartype dbs_to_trigger_cutover_on: list[str] + :ivar cancel: Indicates if cancel must be triggered for the entire migration. Known values are: + "True" and "False". + :vartype cancel: str or ~azure.mgmt.postgresql.models.Cancel + :ivar dbs_to_cancel_migration_on: When you want to trigger cancel for specific databases set + 'triggerCutover' to 'True' and the names of the specific databases in this array. + :vartype dbs_to_cancel_migration_on: list[str] + """ + + migration_id: Optional[str] = rest_field(name="migrationId", visibility=["read"]) + """Identifier of a migration.""" + current_status: Optional["_models.MigrationStatus"] = rest_field(name="currentStatus", visibility=["read"]) + """Current status of a migration.""" + migration_instance_resource_id: Optional[str] = rest_field( + name="migrationInstanceResourceId", visibility=["read", "create", "update", "delete", "query"] + ) + """Identifier of the private endpoint migration instance.""" + migration_mode: Optional[Union[str, "_models.MigrationMode"]] = rest_field( + name="migrationMode", visibility=["read", "create", "update", "delete", "query"] + ) + """Mode used to perform the migration: Online or Offline. Known values are: \"Offline\" and + \"Online\".""" + migration_option: Optional[Union[str, "_models.MigrationOption"]] = rest_field( + name="migrationOption", visibility=["read", "create", "update", "delete", "query"] + ) + """Supported option for a migration. Known values are: \"Validate\", \"Migrate\", and + \"ValidateAndMigrate\".""" + source_type: Optional[Union[str, "_models.SourceType"]] = rest_field( + name="sourceType", visibility=["read", "create", "update", "delete", "query"] + ) + """Source server type used for the migration: ApsaraDB_RDS, AWS, AWS_AURORA, AWS_EC2, AWS_RDS, + AzureVM, Crunchy_PostgreSQL, Digital_Ocean_Droplets, Digital_Ocean_PostgreSQL, EDB, + EDB_Oracle_Server, EDB_PostgreSQL, GCP, GCP_AlloyDB, GCP_CloudSQL, GCP_Compute, + Heroku_PostgreSQL, Huawei_Compute, Huawei_RDS, OnPremises, PostgreSQLCosmosDB, + PostgreSQLFlexibleServer, PostgreSQLSingleServer, or Supabase_PostgreSQL. Known values are: + \"OnPremises\", \"AWS\", \"GCP\", \"AzureVM\", \"PostgreSQLSingleServer\", \"AWS_RDS\", + \"AWS_AURORA\", \"AWS_EC2\", \"GCP_CloudSQL\", \"GCP_AlloyDB\", \"GCP_Compute\", \"EDB\", + \"EDB_Oracle_Server\", \"EDB_PostgreSQL\", \"PostgreSQLFlexibleServer\", + \"PostgreSQLCosmosDB\", \"Huawei_RDS\", \"Huawei_Compute\", \"Heroku_PostgreSQL\", + \"Crunchy_PostgreSQL\", \"ApsaraDB_RDS\", \"Digital_Ocean_Droplets\", + \"Digital_Ocean_PostgreSQL\", and \"Supabase_PostgreSQL\".""" + ssl_mode: Optional[Union[str, "_models.SslMode"]] = rest_field( + name="sslMode", visibility=["read", "create", "update", "delete", "query"] + ) + """SSL mode used by a migration. Default SSL mode for 'PostgreSQLSingleServer' is 'VerifyFull'. + Default SSL mode for other source types is 'Prefer'. Known values are: \"Prefer\", \"Require\", + \"VerifyCA\", and \"VerifyFull\".""" + source_db_server_metadata: Optional["_models.DbServerMetadata"] = rest_field( + name="sourceDbServerMetadata", visibility=["read"] + ) + """Metadata of source database server.""" + target_db_server_metadata: Optional["_models.DbServerMetadata"] = rest_field( + name="targetDbServerMetadata", visibility=["read"] + ) + """Metadata of target database server.""" + source_db_server_resource_id: Optional[str] = rest_field( + name="sourceDbServerResourceId", visibility=["read", "create", "update", "delete", "query"] + ) + """Identifier of the source database server resource, when 'sourceType' is + 'PostgreSQLSingleServer'. For other source types this must be set to ipaddress:port@username or + hostname:port@username.""" + source_db_server_fully_qualified_domain_name: Optional[str] = rest_field( + name="sourceDbServerFullyQualifiedDomainName", visibility=["read", "create", "update", "delete", "query"] + ) + """Fully qualified domain name (FQDN) or IP address of the source server. This property is + optional. When provided, the migration service will always use it to connect to the source + server.""" + target_db_server_resource_id: Optional[str] = rest_field(name="targetDbServerResourceId", visibility=["read"]) + """Identifier of the target database server resource.""" + target_db_server_fully_qualified_domain_name: Optional[str] = rest_field( + name="targetDbServerFullyQualifiedDomainName", visibility=["read", "create", "update", "delete", "query"] + ) + """Fully qualified domain name (FQDN) or IP address of the target server. This property is + optional. When provided, the migration service will always use it to connect to the target + server.""" + secret_parameters: Optional["_models.MigrationSecretParameters"] = rest_field( + name="secretParameters", visibility=["read", "create", "update", "delete", "query"] + ) + """Migration secret parameters.""" + dbs_to_migrate: Optional[list[str]] = rest_field( + name="dbsToMigrate", visibility=["read", "create", "update", "delete", "query"] + ) + """Names of databases to migrate.""" + setup_logical_replication_on_source_db_if_needed: Optional[ + Union[str, "_models.LogicalReplicationOnSourceServer"] + ] = rest_field( + name="setupLogicalReplicationOnSourceDbIfNeeded", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates whether to setup logical replication on source server, if needed. Known values are: + \"True\" and \"False\".""" + overwrite_dbs_in_target: Optional[Union[str, "_models.OverwriteDatabasesOnTargetServer"]] = rest_field( + name="overwriteDbsInTarget", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if databases on the target server can be overwritten when already present. If set to + 'False', when the migration workflow detects that the database already exists on the target + server, it will wait for a confirmation. Known values are: \"True\" and \"False\".""" + migration_window_start_time_in_utc: Optional[datetime.datetime] = rest_field( + name="migrationWindowStartTimeInUtc", + visibility=["read", "create", "update", "delete", "query"], + format="rfc3339", + ) + """Start time (UTC) for migration window.""" + migration_window_end_time_in_utc: Optional[datetime.datetime] = rest_field( + name="migrationWindowEndTimeInUtc", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """End time (UTC) for migration window.""" + migrate_roles: Optional[Union[str, "_models.MigrateRolesAndPermissions"]] = rest_field( + name="migrateRoles", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if roles and permissions must be migrated. Known values are: \"True\" and \"False\".""" + start_data_migration: Optional[Union[str, "_models.StartDataMigration"]] = rest_field( + name="startDataMigration", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if data migration must start right away. Known values are: \"True\" and \"False\".""" + trigger_cutover: Optional[Union[str, "_models.TriggerCutover"]] = rest_field( + name="triggerCutover", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if cutover must be triggered for the entire migration. Known values are: \"True\" and + \"False\".""" + dbs_to_trigger_cutover_on: Optional[list[str]] = rest_field( + name="dbsToTriggerCutoverOn", visibility=["read", "create", "update", "delete", "query"] + ) + """When you want to trigger cutover for specific databases set 'triggerCutover' to 'True' and the + names of the specific databases in this array.""" + cancel: Optional[Union[str, "_models.Cancel"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if cancel must be triggered for the entire migration. Known values are: \"True\" and + \"False\".""" + dbs_to_cancel_migration_on: Optional[list[str]] = rest_field( + name="dbsToCancelMigrationOn", visibility=["read", "create", "update", "delete", "query"] + ) + """When you want to trigger cancel for specific databases set 'triggerCutover' to 'True' and the + names of the specific databases in this array.""" + + @overload + def __init__( + self, + *, + migration_instance_resource_id: Optional[str] = None, + migration_mode: Optional[Union[str, "_models.MigrationMode"]] = None, + migration_option: Optional[Union[str, "_models.MigrationOption"]] = None, + source_type: Optional[Union[str, "_models.SourceType"]] = None, + ssl_mode: Optional[Union[str, "_models.SslMode"]] = None, + source_db_server_resource_id: Optional[str] = None, + source_db_server_fully_qualified_domain_name: Optional[str] = None, + target_db_server_fully_qualified_domain_name: Optional[str] = None, + secret_parameters: Optional["_models.MigrationSecretParameters"] = None, + dbs_to_migrate: Optional[list[str]] = None, + setup_logical_replication_on_source_db_if_needed: Optional[ + Union[str, "_models.LogicalReplicationOnSourceServer"] + ] = None, + overwrite_dbs_in_target: Optional[Union[str, "_models.OverwriteDatabasesOnTargetServer"]] = None, + migration_window_start_time_in_utc: Optional[datetime.datetime] = None, + migration_window_end_time_in_utc: Optional[datetime.datetime] = None, + migrate_roles: Optional[Union[str, "_models.MigrateRolesAndPermissions"]] = None, + start_data_migration: Optional[Union[str, "_models.StartDataMigration"]] = None, + trigger_cutover: Optional[Union[str, "_models.TriggerCutover"]] = None, + dbs_to_trigger_cutover_on: Optional[list[str]] = None, + cancel: Optional[Union[str, "_models.Cancel"]] = None, + dbs_to_cancel_migration_on: Optional[list[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MigrationPropertiesForPatch(_Model): + """Migration properties. + + :ivar source_db_server_resource_id: Identifier of the source database server resource, when + 'sourceType' is 'PostgreSQLSingleServer'. For other source types this must be set to + ipaddress:port@username or hostname:port@username. + :vartype source_db_server_resource_id: str + :ivar source_db_server_fully_qualified_domain_name: Fully qualified domain name (FQDN) or IP + address of the source server. This property is optional. When provided, the migration service + will always use it to connect to the source server. + :vartype source_db_server_fully_qualified_domain_name: str + :ivar target_db_server_fully_qualified_domain_name: Fully qualified domain name (FQDN) or IP + address of the target server. This property is optional. When provided, the migration service + will always use it to connect to the target server. + :vartype target_db_server_fully_qualified_domain_name: str + :ivar secret_parameters: Migration secret parameters. + :vartype secret_parameters: ~azure.mgmt.postgresql.models.MigrationSecretParametersForPatch + :ivar dbs_to_migrate: Names of databases to migrate. + :vartype dbs_to_migrate: list[str] + :ivar setup_logical_replication_on_source_db_if_needed: Indicates whether to setup logical + replication on source server, if needed. Known values are: "True" and "False". + :vartype setup_logical_replication_on_source_db_if_needed: str or + ~azure.mgmt.postgresql.models.LogicalReplicationOnSourceServer + :ivar overwrite_dbs_in_target: Indicates if databases on the target server can be overwritten + when already present. If set to 'False', when the migration workflow detects that the database + already exists on the target server, it will wait for a confirmation. Known values are: "True" + and "False". + :vartype overwrite_dbs_in_target: str or + ~azure.mgmt.postgresql.models.OverwriteDatabasesOnTargetServer + :ivar migration_window_start_time_in_utc: Start time (UTC) for migration window. + :vartype migration_window_start_time_in_utc: ~datetime.datetime + :ivar migrate_roles: Indicates if roles and permissions must be migrated. Known values are: + "True" and "False". + :vartype migrate_roles: str or ~azure.mgmt.postgresql.models.MigrateRolesAndPermissions + :ivar start_data_migration: Indicates if data migration must start right away. Known values + are: "True" and "False". + :vartype start_data_migration: str or ~azure.mgmt.postgresql.models.StartDataMigration + :ivar trigger_cutover: Indicates if cutover must be triggered for the entire migration. Known + values are: "True" and "False". + :vartype trigger_cutover: str or ~azure.mgmt.postgresql.models.TriggerCutover + :ivar dbs_to_trigger_cutover_on: When you want to trigger cutover for specific databases set + 'triggerCutover' to 'True' and the names of the specific databases in this array. + :vartype dbs_to_trigger_cutover_on: list[str] + :ivar cancel: Indicates if cancel must be triggered for the entire migration. Known values are: + "True" and "False". + :vartype cancel: str or ~azure.mgmt.postgresql.models.Cancel + :ivar dbs_to_cancel_migration_on: When you want to trigger cancel for specific databases set + 'triggerCutover' to 'True' and the names of the specific databases in this array. + :vartype dbs_to_cancel_migration_on: list[str] + :ivar migration_mode: Mode used to perform the migration: Online or Offline. Known values are: + "Offline" and "Online". + :vartype migration_mode: str or ~azure.mgmt.postgresql.models.MigrationMode + """ + + source_db_server_resource_id: Optional[str] = rest_field( + name="sourceDbServerResourceId", visibility=["read", "create", "update", "delete", "query"] + ) + """Identifier of the source database server resource, when 'sourceType' is + 'PostgreSQLSingleServer'. For other source types this must be set to ipaddress:port@username or + hostname:port@username.""" + source_db_server_fully_qualified_domain_name: Optional[str] = rest_field( + name="sourceDbServerFullyQualifiedDomainName", visibility=["read", "create", "update", "delete", "query"] + ) + """Fully qualified domain name (FQDN) or IP address of the source server. This property is + optional. When provided, the migration service will always use it to connect to the source + server.""" + target_db_server_fully_qualified_domain_name: Optional[str] = rest_field( + name="targetDbServerFullyQualifiedDomainName", visibility=["read", "create", "update", "delete", "query"] + ) + """Fully qualified domain name (FQDN) or IP address of the target server. This property is + optional. When provided, the migration service will always use it to connect to the target + server.""" + secret_parameters: Optional["_models.MigrationSecretParametersForPatch"] = rest_field( + name="secretParameters", visibility=["read", "create", "update", "delete", "query"] + ) + """Migration secret parameters.""" + dbs_to_migrate: Optional[list[str]] = rest_field( + name="dbsToMigrate", visibility=["read", "create", "update", "delete", "query"] + ) + """Names of databases to migrate.""" + setup_logical_replication_on_source_db_if_needed: Optional[ + Union[str, "_models.LogicalReplicationOnSourceServer"] + ] = rest_field( + name="setupLogicalReplicationOnSourceDbIfNeeded", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates whether to setup logical replication on source server, if needed. Known values are: + \"True\" and \"False\".""" + overwrite_dbs_in_target: Optional[Union[str, "_models.OverwriteDatabasesOnTargetServer"]] = rest_field( + name="overwriteDbsInTarget", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if databases on the target server can be overwritten when already present. If set to + 'False', when the migration workflow detects that the database already exists on the target + server, it will wait for a confirmation. Known values are: \"True\" and \"False\".""" + migration_window_start_time_in_utc: Optional[datetime.datetime] = rest_field( + name="migrationWindowStartTimeInUtc", + visibility=["read", "create", "update", "delete", "query"], + format="rfc3339", + ) + """Start time (UTC) for migration window.""" + migrate_roles: Optional[Union[str, "_models.MigrateRolesAndPermissions"]] = rest_field( + name="migrateRoles", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if roles and permissions must be migrated. Known values are: \"True\" and \"False\".""" + start_data_migration: Optional[Union[str, "_models.StartDataMigration"]] = rest_field( + name="startDataMigration", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if data migration must start right away. Known values are: \"True\" and \"False\".""" + trigger_cutover: Optional[Union[str, "_models.TriggerCutover"]] = rest_field( + name="triggerCutover", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if cutover must be triggered for the entire migration. Known values are: \"True\" and + \"False\".""" + dbs_to_trigger_cutover_on: Optional[list[str]] = rest_field( + name="dbsToTriggerCutoverOn", visibility=["read", "create", "update", "delete", "query"] + ) + """When you want to trigger cutover for specific databases set 'triggerCutover' to 'True' and the + names of the specific databases in this array.""" + cancel: Optional[Union[str, "_models.Cancel"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if cancel must be triggered for the entire migration. Known values are: \"True\" and + \"False\".""" + dbs_to_cancel_migration_on: Optional[list[str]] = rest_field( + name="dbsToCancelMigrationOn", visibility=["read", "create", "update", "delete", "query"] + ) + """When you want to trigger cancel for specific databases set 'triggerCutover' to 'True' and the + names of the specific databases in this array.""" + migration_mode: Optional[Union[str, "_models.MigrationMode"]] = rest_field( + name="migrationMode", visibility=["read", "create", "update", "delete", "query"] + ) + """Mode used to perform the migration: Online or Offline. Known values are: \"Offline\" and + \"Online\".""" + + @overload + def __init__( + self, + *, + source_db_server_resource_id: Optional[str] = None, + source_db_server_fully_qualified_domain_name: Optional[str] = None, + target_db_server_fully_qualified_domain_name: Optional[str] = None, + secret_parameters: Optional["_models.MigrationSecretParametersForPatch"] = None, + dbs_to_migrate: Optional[list[str]] = None, + setup_logical_replication_on_source_db_if_needed: Optional[ + Union[str, "_models.LogicalReplicationOnSourceServer"] + ] = None, + overwrite_dbs_in_target: Optional[Union[str, "_models.OverwriteDatabasesOnTargetServer"]] = None, + migration_window_start_time_in_utc: Optional[datetime.datetime] = None, + migrate_roles: Optional[Union[str, "_models.MigrateRolesAndPermissions"]] = None, + start_data_migration: Optional[Union[str, "_models.StartDataMigration"]] = None, + trigger_cutover: Optional[Union[str, "_models.TriggerCutover"]] = None, + dbs_to_trigger_cutover_on: Optional[list[str]] = None, + cancel: Optional[Union[str, "_models.Cancel"]] = None, + dbs_to_cancel_migration_on: Optional[list[str]] = None, + migration_mode: Optional[Union[str, "_models.MigrationMode"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MigrationResourceForPatch(_Model): + """Migration. + + :ivar properties: Migration properties. + :vartype properties: ~azure.mgmt.postgresql.models.MigrationPropertiesForPatch + :ivar tags: Application-specific metadata in the form of key-value pairs. + :vartype tags: dict[str, str] + """ + + properties: Optional["_models.MigrationPropertiesForPatch"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Migration properties.""" + tags: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Application-specific metadata in the form of key-value pairs.""" + + __flattened_items = [ + "source_db_server_resource_id", + "source_db_server_fully_qualified_domain_name", + "target_db_server_fully_qualified_domain_name", + "secret_parameters", + "dbs_to_migrate", + "setup_logical_replication_on_source_db_if_needed", + "overwrite_dbs_in_target", + "migration_window_start_time_in_utc", + "migrate_roles", + "start_data_migration", + "trigger_cutover", + "dbs_to_trigger_cutover_on", + "cancel", + "dbs_to_cancel_migration_on", + "migration_mode", + ] + + @overload + def __init__( + self, + *, + properties: Optional["_models.MigrationPropertiesForPatch"] = None, + tags: Optional[dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class MigrationSecretParameters(_Model): + """Migration secret parameters. + + :ivar admin_credentials: Credentials of administrator users for source and target servers. + Required. + :vartype admin_credentials: ~azure.mgmt.postgresql.models.AdminCredentials + :ivar source_server_username: Gets or sets the name of the user for the source server. This + user doesn't need to be an administrator. + :vartype source_server_username: str + :ivar target_server_username: Gets or sets the name of the user for the target server. This + user doesn't need to be an administrator. + :vartype target_server_username: str + """ + + admin_credentials: "_models.AdminCredentials" = rest_field( + name="adminCredentials", visibility=["read", "create", "update", "delete", "query"] + ) + """Credentials of administrator users for source and target servers. Required.""" + source_server_username: Optional[str] = rest_field(name="sourceServerUsername", visibility=["create", "update"]) + """Gets or sets the name of the user for the source server. This user doesn't need to be an + administrator.""" + target_server_username: Optional[str] = rest_field(name="targetServerUsername", visibility=["create", "update"]) + """Gets or sets the name of the user for the target server. This user doesn't need to be an + administrator.""" + + @overload + def __init__( + self, + *, + admin_credentials: "_models.AdminCredentials", + source_server_username: Optional[str] = None, + target_server_username: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MigrationSecretParametersForPatch(_Model): + """Migration secret parameters. + + :ivar admin_credentials: Credentials of administrator users for source and target servers. + :vartype admin_credentials: ~azure.mgmt.postgresql.models.AdminCredentialsForPatch + :ivar source_server_username: Gets or sets the name of the user for the source server. This + user doesn't need to be an administrator. + :vartype source_server_username: str + :ivar target_server_username: Gets or sets the name of the user for the target server. This + user doesn't need to be an administrator. + :vartype target_server_username: str + """ + + admin_credentials: Optional["_models.AdminCredentialsForPatch"] = rest_field( + name="adminCredentials", visibility=["read", "create", "update", "delete", "query"] + ) + """Credentials of administrator users for source and target servers.""" + source_server_username: Optional[str] = rest_field(name="sourceServerUsername", visibility=["update"]) + """Gets or sets the name of the user for the source server. This user doesn't need to be an + administrator.""" + target_server_username: Optional[str] = rest_field(name="targetServerUsername", visibility=["update"]) + """Gets or sets the name of the user for the target server. This user doesn't need to be an + administrator.""" + + @overload + def __init__( + self, + *, + admin_credentials: Optional["_models.AdminCredentialsForPatch"] = None, + source_server_username: Optional[str] = None, + target_server_username: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MigrationStatus(_Model): + """State of migration. + + :ivar state: State of migration. Known values are: "InProgress", "WaitingForUserAction", + "Canceled", "Failed", "Succeeded", "ValidationFailed", and "CleaningUp". + :vartype state: str or ~azure.mgmt.postgresql.models.MigrationState + :ivar error: Error message, if any, for the migration state. + :vartype error: str + :ivar current_sub_state_details: Current migration sub state details. + :vartype current_sub_state_details: ~azure.mgmt.postgresql.models.MigrationSubstateDetails + """ + + state: Optional[Union[str, "_models.MigrationState"]] = rest_field(visibility=["read"]) + """State of migration. Known values are: \"InProgress\", \"WaitingForUserAction\", \"Canceled\", + \"Failed\", \"Succeeded\", \"ValidationFailed\", and \"CleaningUp\".""" + error: Optional[str] = rest_field(visibility=["read"]) + """Error message, if any, for the migration state.""" + current_sub_state_details: Optional["_models.MigrationSubstateDetails"] = rest_field( + name="currentSubStateDetails", visibility=["read"] + ) + """Current migration sub state details.""" + + +class MigrationSubstateDetails(_Model): + """Details of migration substate. + + :ivar current_sub_state: Substate of migration. Known values are: + "PerformingPreRequisiteSteps", "WaitingForLogicalReplicationSetupRequestOnSourceDB", + "WaitingForDBsToMigrateSpecification", "WaitingForTargetDBOverwriteConfirmation", + "WaitingForDataMigrationScheduling", "WaitingForDataMigrationWindow", "MigratingData", + "WaitingForCutoverTrigger", "CompletingMigration", "Completed", + "CancelingRequestedDBMigrations", and "ValidationInProgress". + :vartype current_sub_state: str or ~azure.mgmt.postgresql.models.MigrationSubstate + :ivar db_details: + :vartype db_details: dict[str, ~azure.mgmt.postgresql.models.DatabaseMigrationState] + :ivar validation_details: + :vartype validation_details: ~azure.mgmt.postgresql.models.ValidationDetails + """ + + current_sub_state: Optional[Union[str, "_models.MigrationSubstate"]] = rest_field( + name="currentSubState", visibility=["read"] + ) + """Substate of migration. Known values are: \"PerformingPreRequisiteSteps\", + \"WaitingForLogicalReplicationSetupRequestOnSourceDB\", + \"WaitingForDBsToMigrateSpecification\", \"WaitingForTargetDBOverwriteConfirmation\", + \"WaitingForDataMigrationScheduling\", \"WaitingForDataMigrationWindow\", \"MigratingData\", + \"WaitingForCutoverTrigger\", \"CompletingMigration\", \"Completed\", + \"CancelingRequestedDBMigrations\", and \"ValidationInProgress\".""" + db_details: Optional[dict[str, "_models.DatabaseMigrationState"]] = rest_field( + name="dbDetails", visibility=["read", "create", "update", "delete", "query"] + ) + validation_details: Optional["_models.ValidationDetails"] = rest_field( + name="validationDetails", visibility=["read", "create", "update", "delete", "query"] + ) + + @overload + def __init__( + self, + *, + db_details: Optional[dict[str, "_models.DatabaseMigrationState"]] = None, + validation_details: Optional["_models.ValidationDetails"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class NameAvailabilityModel(CheckNameAvailabilityResponse): + """Availability of a name. + + :ivar name_available: Indicates if the resource name is available. + :vartype name_available: bool + :ivar reason: The reason why the given name is not available. Known values are: "Invalid" and + "AlreadyExists". + :vartype reason: str or ~azure.mgmt.postgresql.models.CheckNameAvailabilityReason + :ivar message: Detailed reason why the given name is not available. + :vartype message: str + :ivar name: Name for which validity and availability was checked. + :vartype name: str + :ivar type: Type of resource. It can be 'Microsoft.DBforPostgreSQL/flexibleServers' or + 'Microsoft.DBforPostgreSQL/flexibleServers/virtualendpoints'. + :vartype type: str + """ + + name: Optional[str] = rest_field(visibility=["read"]) + """Name for which validity and availability was checked.""" + type: Optional[str] = rest_field(visibility=["read"]) + """Type of resource. It can be 'Microsoft.DBforPostgreSQL/flexibleServers' or + 'Microsoft.DBforPostgreSQL/flexibleServers/virtualendpoints'.""" + + @overload + def __init__( + self, + *, + name_available: Optional[bool] = None, + reason: Optional[Union[str, "_models.CheckNameAvailabilityReason"]] = None, + message: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class NameProperty(_Model): + """Name property for quota usage. + + :ivar value: Name value. + :vartype value: str + :ivar localized_value: Localized name. + :vartype localized_value: str + """ + + value: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name value.""" + localized_value: Optional[str] = rest_field( + name="localizedValue", visibility=["read", "create", "update", "delete", "query"] + ) + """Localized name.""" + + @overload + def __init__( + self, + *, + value: Optional[str] = None, + localized_value: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Network(_Model): + """Network properties of a server. + + :ivar public_network_access: Indicates if public network access is enabled or not. This is only + supported for servers that are not integrated into a virtual network which is owned and + provided by customer when server is deployed. Known values are: "Enabled" and "Disabled". + :vartype public_network_access: str or + ~azure.mgmt.postgresql.models.ServerPublicNetworkAccessState + :ivar delegated_subnet_resource_id: Resource identifier of the delegated subnet. Required + during creation of a new server, in case you want the server to be integrated into your own + virtual network. For an update operation, you only have to provide this property if you want to + change the value assigned for the private DNS zone. + :vartype delegated_subnet_resource_id: str + :ivar private_dns_zone_arm_resource_id: Identifier of the private DNS zone. Required during + creation of a new server, in case you want the server to be integrated into your own virtual + network. For an update operation, you only have to provide this property if you want to change + the value assigned for the private DNS zone. + :vartype private_dns_zone_arm_resource_id: str + """ + + public_network_access: Optional[Union[str, "_models.ServerPublicNetworkAccessState"]] = rest_field( + name="publicNetworkAccess", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if public network access is enabled or not. This is only supported for servers that + are not integrated into a virtual network which is owned and provided by customer when server + is deployed. Known values are: \"Enabled\" and \"Disabled\".""" + delegated_subnet_resource_id: Optional[str] = rest_field( + name="delegatedSubnetResourceId", visibility=["read", "create", "update", "delete", "query"] + ) + """Resource identifier of the delegated subnet. Required during creation of a new server, in case + you want the server to be integrated into your own virtual network. For an update operation, + you only have to provide this property if you want to change the value assigned for the private + DNS zone.""" + private_dns_zone_arm_resource_id: Optional[str] = rest_field( + name="privateDnsZoneArmResourceId", visibility=["read", "create", "update", "delete", "query"] + ) + """Identifier of the private DNS zone. Required during creation of a new server, in case you want + the server to be integrated into your own virtual network. For an update operation, you only + have to provide this property if you want to change the value assigned for the private DNS + zone.""" + + @overload + def __init__( + self, + *, + public_network_access: Optional[Union[str, "_models.ServerPublicNetworkAccessState"]] = None, + delegated_subnet_resource_id: Optional[str] = None, + private_dns_zone_arm_resource_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ObjectRecommendation(ProxyResource): + """Object recommendation properties. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar kind: Always empty. + :vartype kind: str + :ivar properties: Properties of an object recommendation. + :vartype properties: ~azure.mgmt.postgresql.models.ObjectRecommendationProperties + """ + + kind: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Always empty.""" + properties: Optional["_models.ObjectRecommendationProperties"] = rest_field(visibility=["read"]) + """Properties of an object recommendation.""" + + __flattened_items = [ + "initial_recommended_time", + "last_recommended_time", + "times_recommended", + "improved_query_ids", + "recommendation_reason", + "current_state", + "recommendation_type", + "implementation_details", + "analyzed_workload", + "estimated_impact", + "details", + ] + + @overload + def __init__( + self, + *, + kind: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class ObjectRecommendationDetails(_Model): + """Recommendation details for the recommended action. + + :ivar database_name: Database name. + :vartype database_name: str + :ivar schema: Schema name. + :vartype schema: str + :ivar table: Table name. + :vartype table: str + :ivar index_type: Index type. + :vartype index_type: str + :ivar index_name: Index name. + :vartype index_name: str + :ivar index_columns: Index columns. + :vartype index_columns: list[str] + :ivar included_columns: Index included columns. + :vartype included_columns: list[str] + """ + + database_name: Optional[str] = rest_field( + name="databaseName", visibility=["read", "create", "update", "delete", "query"] + ) + """Database name.""" + schema: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Schema name.""" + table: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Table name.""" + index_type: Optional[str] = rest_field(name="indexType", visibility=["read", "create", "update", "delete", "query"]) + """Index type.""" + index_name: Optional[str] = rest_field(name="indexName", visibility=["read", "create", "update", "delete", "query"]) + """Index name.""" + index_columns: Optional[list[str]] = rest_field( + name="indexColumns", visibility=["read", "create", "update", "delete", "query"] + ) + """Index columns.""" + included_columns: Optional[list[str]] = rest_field( + name="includedColumns", visibility=["read", "create", "update", "delete", "query"] + ) + """Index included columns.""" + + @overload + def __init__( + self, + *, + database_name: Optional[str] = None, + schema: Optional[str] = None, + table: Optional[str] = None, + index_type: Optional[str] = None, + index_name: Optional[str] = None, + index_columns: Optional[list[str]] = None, + included_columns: Optional[list[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ObjectRecommendationProperties(_Model): + """Object recommendation properties. + + :ivar initial_recommended_time: Creation time (UTC) of this recommendation. + :vartype initial_recommended_time: ~datetime.datetime + :ivar last_recommended_time: Last time (UTC) that this recommendation was produced. + :vartype last_recommended_time: ~datetime.datetime + :ivar times_recommended: Number of times this recommendation has been produced. + :vartype times_recommended: int + :ivar improved_query_ids: List of identifiers for all queries identified as targets for + improvement if the recommendation is applied. The list is only populated for CREATE INDEX + recommendations. + :vartype improved_query_ids: list[int] + :ivar recommendation_reason: Reason for this recommendation. + :vartype recommendation_reason: str + :ivar current_state: Current state. + :vartype current_state: str + :ivar recommendation_type: Type for this recommendation. Known values are: "CreateIndex", + "DropIndex", "ReIndex", and "AnalyzeTable". + :vartype recommendation_type: str or ~azure.mgmt.postgresql.models.RecommendationTypeEnum + :ivar implementation_details: Implementation details for the recommended action. + :vartype implementation_details: + ~azure.mgmt.postgresql.models.ObjectRecommendationPropertiesImplementationDetails + :ivar analyzed_workload: Workload information for the recommended action. + :vartype analyzed_workload: + ~azure.mgmt.postgresql.models.ObjectRecommendationPropertiesAnalyzedWorkload + :ivar estimated_impact: Estimated impact of this recommended action. + :vartype estimated_impact: list[~azure.mgmt.postgresql.models.ImpactRecord] + :ivar details: Recommendation details for the recommended action. + :vartype details: ~azure.mgmt.postgresql.models.ObjectRecommendationDetails + """ + + initial_recommended_time: Optional[datetime.datetime] = rest_field( + name="initialRecommendedTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """Creation time (UTC) of this recommendation.""" + last_recommended_time: Optional[datetime.datetime] = rest_field( + name="lastRecommendedTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """Last time (UTC) that this recommendation was produced.""" + times_recommended: Optional[int] = rest_field( + name="timesRecommended", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of times this recommendation has been produced.""" + improved_query_ids: Optional[list[int]] = rest_field( + name="improvedQueryIds", visibility=["read", "create", "update", "delete", "query"] + ) + """List of identifiers for all queries identified as targets for improvement if the recommendation + is applied. The list is only populated for CREATE INDEX recommendations.""" + recommendation_reason: Optional[str] = rest_field( + name="recommendationReason", visibility=["read", "create", "update", "delete", "query"] + ) + """Reason for this recommendation.""" + current_state: Optional[str] = rest_field( + name="currentState", visibility=["read", "create", "update", "delete", "query"] + ) + """Current state.""" + recommendation_type: Optional[Union[str, "_models.RecommendationTypeEnum"]] = rest_field( + name="recommendationType", visibility=["read", "create", "update", "delete", "query"] + ) + """Type for this recommendation. Known values are: \"CreateIndex\", \"DropIndex\", \"ReIndex\", + and \"AnalyzeTable\".""" + implementation_details: Optional["_models.ObjectRecommendationPropertiesImplementationDetails"] = rest_field( + name="implementationDetails", visibility=["read", "create", "update", "delete", "query"] + ) + """Implementation details for the recommended action.""" + analyzed_workload: Optional["_models.ObjectRecommendationPropertiesAnalyzedWorkload"] = rest_field( + name="analyzedWorkload", visibility=["read", "create", "update", "delete", "query"] + ) + """Workload information for the recommended action.""" + estimated_impact: Optional[list["_models.ImpactRecord"]] = rest_field(name="estimatedImpact", visibility=["read"]) + """Estimated impact of this recommended action.""" + details: Optional["_models.ObjectRecommendationDetails"] = rest_field(visibility=["read"]) + """Recommendation details for the recommended action.""" + + @overload + def __init__( + self, + *, + initial_recommended_time: Optional[datetime.datetime] = None, + last_recommended_time: Optional[datetime.datetime] = None, + times_recommended: Optional[int] = None, + improved_query_ids: Optional[list[int]] = None, + recommendation_reason: Optional[str] = None, + current_state: Optional[str] = None, + recommendation_type: Optional[Union[str, "_models.RecommendationTypeEnum"]] = None, + implementation_details: Optional["_models.ObjectRecommendationPropertiesImplementationDetails"] = None, + analyzed_workload: Optional["_models.ObjectRecommendationPropertiesAnalyzedWorkload"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ObjectRecommendationPropertiesAnalyzedWorkload(_Model): # pylint: disable=name-too-long + """Workload information for the recommended action. + + :ivar start_time: Start time (UTC) of the workload analyzed. + :vartype start_time: ~datetime.datetime + :ivar end_time: End time (UTC) of the workload analyzed. + :vartype end_time: ~datetime.datetime + :ivar query_count: Number of queries from the workload that were examined to produce this + recommendation. For DROP INDEX recommendations it's 0 (zero). + :vartype query_count: int + """ + + start_time: Optional[datetime.datetime] = rest_field( + name="startTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """Start time (UTC) of the workload analyzed.""" + end_time: Optional[datetime.datetime] = rest_field( + name="endTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """End time (UTC) of the workload analyzed.""" + query_count: Optional[int] = rest_field( + name="queryCount", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of queries from the workload that were examined to produce this recommendation. For DROP + INDEX recommendations it's 0 (zero).""" + + @overload + def __init__( + self, + *, + start_time: Optional[datetime.datetime] = None, + end_time: Optional[datetime.datetime] = None, + query_count: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ObjectRecommendationPropertiesImplementationDetails(_Model): # pylint: disable=name-too-long + """Implementation details for the recommended action. + + :ivar method: Method of implementation for recommended action. + :vartype method: str + :ivar script: Implementation script for the recommended action. + :vartype script: str + """ + + method: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Method of implementation for recommended action.""" + script: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Implementation script for the recommended action.""" + + @overload + def __init__( + self, + *, + method: Optional[str] = None, + script: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Operation(_Model): + """REST API operation definition. + + :ivar name: Name of the operation being performed on this particular object. + :vartype name: str + :ivar display: Localized display information for this particular operation or action. + :vartype display: ~azure.mgmt.postgresql.models.OperationDisplay + :ivar is_data_action: Indicates if the operation is a data action. + :vartype is_data_action: bool + :ivar origin: Intended executor of the operation. Known values are: "NotSpecified", "user", and + "system". + :vartype origin: str or ~azure.mgmt.postgresql.models.OperationOrigin + :ivar properties: Additional descriptions for the operation. + :vartype properties: dict[str, any] + """ + + name: Optional[str] = rest_field(visibility=["read"]) + """Name of the operation being performed on this particular object.""" + display: Optional["_models.OperationDisplay"] = rest_field(visibility=["read"]) + """Localized display information for this particular operation or action.""" + is_data_action: Optional[bool] = rest_field( + name="isDataAction", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if the operation is a data action.""" + origin: Optional[Union[str, "_models.OperationOrigin"]] = rest_field(visibility=["read"]) + """Intended executor of the operation. Known values are: \"NotSpecified\", \"user\", and + \"system\".""" + properties: Optional[dict[str, Any]] = rest_field(visibility=["read"]) + """Additional descriptions for the operation.""" + + @overload + def __init__( + self, + *, + is_data_action: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class OperationDisplay(_Model): + """Display metadata associated with the operation. + + :ivar provider: Name of the resource provider. + :vartype provider: str + :ivar resource: Type of resource on which the operation is performed. + :vartype resource: str + :ivar operation: Name of the operation. + :vartype operation: str + :ivar description: Description of the operation. + :vartype description: str + """ + + provider: Optional[str] = rest_field(visibility=["read"]) + """Name of the resource provider.""" + resource: Optional[str] = rest_field(visibility=["read"]) + """Type of resource on which the operation is performed.""" + operation: Optional[str] = rest_field(visibility=["read"]) + """Name of the operation.""" + description: Optional[str] = rest_field(visibility=["read"]) + """Description of the operation.""" + + +class PrivateEndpoint(_Model): + """The private endpoint resource. + + :ivar id: The resource identifier of the private endpoint. + :vartype id: str + """ + + id: Optional[str] = rest_field(visibility=["read"]) + """The resource identifier of the private endpoint.""" + + +class PrivateEndpointConnection(Resource): + """The private endpoint connection resource. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar properties: Resource properties. + :vartype properties: ~azure.mgmt.postgresql.models.PrivateEndpointConnectionProperties + """ + + properties: Optional["_models.PrivateEndpointConnectionProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Resource properties.""" + + __flattened_items = ["group_ids", "private_endpoint", "private_link_service_connection_state", "provisioning_state"] + + @overload + def __init__( + self, + *, + properties: Optional["_models.PrivateEndpointConnectionProperties"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class PrivateEndpointConnectionProperties(_Model): + """Properties of the private endpoint connection. + + :ivar group_ids: The group ids for the private endpoint resource. + :vartype group_ids: list[str] + :ivar private_endpoint: The private endpoint resource. + :vartype private_endpoint: ~azure.mgmt.postgresql.models.PrivateEndpoint + :ivar private_link_service_connection_state: A collection of information about the state of the + connection between service consumer and provider. Required. + :vartype private_link_service_connection_state: + ~azure.mgmt.postgresql.models.PrivateLinkServiceConnectionState + :ivar provisioning_state: The provisioning state of the private endpoint connection resource. + Known values are: "Succeeded", "Creating", "Deleting", and "Failed". + :vartype provisioning_state: str or + ~azure.mgmt.postgresql.models.PrivateEndpointConnectionProvisioningState + """ + + group_ids: Optional[list[str]] = rest_field(name="groupIds", visibility=["read"]) + """The group ids for the private endpoint resource.""" + private_endpoint: Optional["_models.PrivateEndpoint"] = rest_field( + name="privateEndpoint", visibility=["read", "create", "update", "delete", "query"] + ) + """The private endpoint resource.""" + private_link_service_connection_state: "_models.PrivateLinkServiceConnectionState" = rest_field( + name="privateLinkServiceConnectionState", visibility=["read", "create", "update", "delete", "query"] + ) + """A collection of information about the state of the connection between service consumer and + provider. Required.""" + provisioning_state: Optional[Union[str, "_models.PrivateEndpointConnectionProvisioningState"]] = rest_field( + name="provisioningState", visibility=["read"] + ) + """The provisioning state of the private endpoint connection resource. Known values are: + \"Succeeded\", \"Creating\", \"Deleting\", and \"Failed\".""" + + @overload + def __init__( + self, + *, + private_link_service_connection_state: "_models.PrivateLinkServiceConnectionState", + private_endpoint: Optional["_models.PrivateEndpoint"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class PrivateLinkResource(ProxyResource): + """A private link resource. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar properties: Resource properties. + :vartype properties: ~azure.mgmt.postgresql.models.PrivateLinkResourceProperties + """ + + properties: Optional["_models.PrivateLinkResourceProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Resource properties.""" + + __flattened_items = ["group_id", "required_members", "required_zone_names"] + + @overload + def __init__( + self, + *, + properties: Optional["_models.PrivateLinkResourceProperties"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class PrivateLinkResourceProperties(_Model): + """Properties of a private link resource. + + :ivar group_id: The private link resource group id. + :vartype group_id: str + :ivar required_members: The private link resource required member names. + :vartype required_members: list[str] + :ivar required_zone_names: The private link resource private link DNS zone name. + :vartype required_zone_names: list[str] + """ + + group_id: Optional[str] = rest_field(name="groupId", visibility=["read"]) + """The private link resource group id.""" + required_members: Optional[list[str]] = rest_field(name="requiredMembers", visibility=["read"]) + """The private link resource required member names.""" + required_zone_names: Optional[list[str]] = rest_field( + name="requiredZoneNames", visibility=["read", "create", "update", "delete", "query"] + ) + """The private link resource private link DNS zone name.""" + + @overload + def __init__( + self, + *, + required_zone_names: Optional[list[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class PrivateLinkServiceConnectionState(_Model): + """A collection of information about the state of the connection between service consumer and + provider. + + :ivar status: Indicates whether the connection has been Approved/Rejected/Removed by the owner + of the service. Known values are: "Pending", "Approved", and "Rejected". + :vartype status: str or ~azure.mgmt.postgresql.models.PrivateEndpointServiceConnectionStatus + :ivar description: The reason for approval/rejection of the connection. + :vartype description: str + :ivar actions_required: A message indicating if changes on the service provider require any + updates on the consumer. + :vartype actions_required: str + """ + + status: Optional[Union[str, "_models.PrivateEndpointServiceConnectionStatus"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates whether the connection has been Approved/Rejected/Removed by the owner of the + service. Known values are: \"Pending\", \"Approved\", and \"Rejected\".""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The reason for approval/rejection of the connection.""" + actions_required: Optional[str] = rest_field( + name="actionsRequired", visibility=["read", "create", "update", "delete", "query"] + ) + """A message indicating if changes on the service provider require any updates on the consumer.""" + + @overload + def __init__( + self, + *, + status: Optional[Union[str, "_models.PrivateEndpointServiceConnectionStatus"]] = None, + description: Optional[str] = None, + actions_required: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class QuotaUsage(_Model): + """Quota usage for servers. + + :ivar name: Name of quota usage for servers. + :vartype name: ~azure.mgmt.postgresql.models.NameProperty + :ivar limit: Quota limit. + :vartype limit: int + :ivar unit: Quota unit. + :vartype unit: str + :ivar current_value: Current Quota usage value. + :vartype current_value: int + :ivar id: Fully qualified ARM resource Id. + :vartype id: str + """ + + name: Optional["_models.NameProperty"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name of quota usage for servers.""" + limit: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Quota limit.""" + unit: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Quota unit.""" + current_value: Optional[int] = rest_field( + name="currentValue", visibility=["read", "create", "update", "delete", "query"] + ) + """Current Quota usage value.""" + id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Fully qualified ARM resource Id.""" + + @overload + def __init__( + self, + *, + name: Optional["_models.NameProperty"] = None, + limit: Optional[int] = None, + unit: Optional[str] = None, + current_value: Optional[int] = None, + id: Optional[str] = None, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Replica(_Model): + """Replica properties of a server. + + :ivar role: Role of the server in a replication set. Known values are: "None", "Primary", + "AsyncReplica", and "GeoAsyncReplica". + :vartype role: str or ~azure.mgmt.postgresql.models.ReplicationRole + :ivar capacity: Maximum number of read replicas allowed for a server. + :vartype capacity: int + :ivar replication_state: Indicates the replication state of a read replica. This property is + returned only when the target server is a read replica. Possible values are Active, Broken, + Catchup, Provisioning, Reconfiguring, and Updating. Known values are: "Active", "Catchup", + "Provisioning", "Updating", "Broken", and "Reconfiguring". + :vartype replication_state: str or ~azure.mgmt.postgresql.models.ReplicationState + :ivar promote_mode: Type of operation to apply on the read replica. This property is write + only. Standalone means that the read replica will be promoted to a standalone server, and will + become a completely independent entity from the replication set. Switchover means that the read + replica will roles with the primary server. Known values are: "Standalone" and "Switchover". + :vartype promote_mode: str or ~azure.mgmt.postgresql.models.ReadReplicaPromoteMode + :ivar promote_option: Data synchronization option to use when processing the operation + specified in the promoteMode property. This property is write only. Known values are: "Planned" + and "Forced". + :vartype promote_option: str or ~azure.mgmt.postgresql.models.ReadReplicaPromoteOption + """ + + role: Optional[Union[str, "_models.ReplicationRole"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Role of the server in a replication set. Known values are: \"None\", \"Primary\", + \"AsyncReplica\", and \"GeoAsyncReplica\".""" + capacity: Optional[int] = rest_field(visibility=["read"]) + """Maximum number of read replicas allowed for a server.""" + replication_state: Optional[Union[str, "_models.ReplicationState"]] = rest_field( + name="replicationState", visibility=["read"] + ) + """Indicates the replication state of a read replica. This property is returned only when the + target server is a read replica. Possible values are Active, Broken, Catchup, Provisioning, + Reconfiguring, and Updating. Known values are: \"Active\", \"Catchup\", \"Provisioning\", + \"Updating\", \"Broken\", and \"Reconfiguring\".""" + promote_mode: Optional[Union[str, "_models.ReadReplicaPromoteMode"]] = rest_field( + name="promoteMode", visibility=["update"] + ) + """Type of operation to apply on the read replica. This property is write only. Standalone means + that the read replica will be promoted to a standalone server, and will become a completely + independent entity from the replication set. Switchover means that the read replica will roles + with the primary server. Known values are: \"Standalone\" and \"Switchover\".""" + promote_option: Optional[Union[str, "_models.ReadReplicaPromoteOption"]] = rest_field( + name="promoteOption", visibility=["update"] + ) + """Data synchronization option to use when processing the operation specified in the promoteMode + property. This property is write only. Known values are: \"Planned\" and \"Forced\".""" + + @overload + def __init__( + self, + *, + role: Optional[Union[str, "_models.ReplicationRole"]] = None, + promote_mode: Optional[Union[str, "_models.ReadReplicaPromoteMode"]] = None, + promote_option: Optional[Union[str, "_models.ReadReplicaPromoteOption"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RestartParameter(_Model): + """PostgreSQL database engine restart parameters. + + :ivar restart_with_failover: Indicates if restart the PostgreSQL database engine should + failover or switch over from primary to standby. This only works if server has high + availability enabled. + :vartype restart_with_failover: bool + :ivar failover_mode: Failover mode. Known values are: "PlannedFailover", "ForcedFailover", + "PlannedSwitchover", and "ForcedSwitchover". + :vartype failover_mode: str or ~azure.mgmt.postgresql.models.FailoverMode + """ + + restart_with_failover: Optional[bool] = rest_field( + name="restartWithFailover", visibility=["read", "create", "update", "delete", "query"] + ) + """Indicates if restart the PostgreSQL database engine should failover or switch over from primary + to standby. This only works if server has high availability enabled.""" + failover_mode: Optional[Union[str, "_models.FailoverMode"]] = rest_field( + name="failoverMode", visibility=["read", "create", "update", "delete", "query"] + ) + """Failover mode. Known values are: \"PlannedFailover\", \"ForcedFailover\", + \"PlannedSwitchover\", and \"ForcedSwitchover\".""" + + @overload + def __init__( + self, + *, + restart_with_failover: Optional[bool] = None, + failover_mode: Optional[Union[str, "_models.FailoverMode"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Server(TrackedResource): + """Properties of a server. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar location: The geo-location where the resource lives. Required. + :vartype location: str + :ivar properties: Properties of a server. + :vartype properties: ~azure.mgmt.postgresql.models.ServerProperties + :ivar sku: Compute tier and size of a server. + :vartype sku: ~azure.mgmt.postgresql.models.Sku + :ivar identity: User assigned managed identities assigned to the server. + :vartype identity: ~azure.mgmt.postgresql.models.UserAssignedIdentity + """ + + properties: Optional["_models.ServerProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Properties of a server.""" + sku: Optional["_models.Sku"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Compute tier and size of a server.""" + identity: Optional["_models.UserAssignedIdentity"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """User assigned managed identities assigned to the server.""" + + __flattened_items = [ + "administrator_login", + "administrator_login_password", + "version", + "minor_version", + "state", + "fully_qualified_domain_name", + "storage", + "auth_config", + "data_encryption", + "backup", + "network", + "high_availability", + "maintenance_window", + "source_server_resource_id", + "point_in_time_utc", + "availability_zone", + "replication_role", + "replica_capacity", + "replica", + "create_mode", + "private_endpoint_connections", + "cluster", + ] + + @overload + def __init__( + self, + *, + location: str, + tags: Optional[dict[str, str]] = None, + properties: Optional["_models.ServerProperties"] = None, + sku: Optional["_models.Sku"] = None, + identity: Optional["_models.UserAssignedIdentity"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class ServerEditionCapability(CapabilityBase): + """Capabilities in terms of compute tier. + + :ivar status: The status of the capability. Known values are: "Visible", "Available", + "Default", and "Disabled". + :vartype status: str or ~azure.mgmt.postgresql.models.CapabilityStatus + :ivar reason: The reason for the capability not being available. + :vartype reason: str + :ivar name: Name of compute tier. + :vartype name: str + :ivar default_sku_name: Default compute name (SKU) for this computer tier. + :vartype default_sku_name: str + :ivar supported_storage_editions: List of storage editions supported by this compute tier and + compute name. + :vartype supported_storage_editions: + list[~azure.mgmt.postgresql.models.StorageEditionCapability] + :ivar supported_server_skus: List of supported compute names (SKUs). + :vartype supported_server_skus: list[~azure.mgmt.postgresql.models.ServerSkuCapability] + """ + + name: Optional[str] = rest_field(visibility=["read"]) + """Name of compute tier.""" + default_sku_name: Optional[str] = rest_field(name="defaultSkuName", visibility=["read"]) + """Default compute name (SKU) for this computer tier.""" + supported_storage_editions: Optional[list["_models.StorageEditionCapability"]] = rest_field( + name="supportedStorageEditions", visibility=["read"] + ) + """List of storage editions supported by this compute tier and compute name.""" + supported_server_skus: Optional[list["_models.ServerSkuCapability"]] = rest_field( + name="supportedServerSkus", visibility=["read"] + ) + """List of supported compute names (SKUs).""" + + +class ServerForPatch(_Model): + """Represents a server to be updated. + + :ivar sku: Compute tier and size of a server. + :vartype sku: ~azure.mgmt.postgresql.models.SkuForPatch + :ivar identity: Describes the identity of the application. + :vartype identity: ~azure.mgmt.postgresql.models.UserAssignedIdentity + :ivar properties: Properties of the server. + :vartype properties: ~azure.mgmt.postgresql.models.ServerPropertiesForPatch + :ivar tags: Application-specific metadata in the form of key-value pairs. + :vartype tags: dict[str, str] + """ + + sku: Optional["_models.SkuForPatch"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Compute tier and size of a server.""" + identity: Optional["_models.UserAssignedIdentity"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Describes the identity of the application.""" + properties: Optional["_models.ServerPropertiesForPatch"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Properties of the server.""" + tags: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Application-specific metadata in the form of key-value pairs.""" + + __flattened_items = [ + "administrator_login", + "administrator_login_password", + "version", + "storage", + "backup", + "high_availability", + "maintenance_window", + "auth_config", + "data_encryption", + "availability_zone", + "create_mode", + "replication_role", + "replica", + "network", + "cluster", + ] + + @overload + def __init__( + self, + *, + sku: Optional["_models.SkuForPatch"] = None, + identity: Optional["_models.UserAssignedIdentity"] = None, + properties: Optional["_models.ServerPropertiesForPatch"] = None, + tags: Optional[dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class ServerProperties(_Model): + """Properties of a server. + + :ivar administrator_login: Name of the login designated as the first password based + administrator assigned to your instance of PostgreSQL. Must be specified the first time that + you enable password based authentication on a server. Once set to a given value, it cannot be + changed for the rest of the life of a server. If you disable password based authentication on a + server which had it enabled, this password based role isn't deleted. + :vartype administrator_login: str + :ivar administrator_login_password: Password assigned to the administrator login. As long as + password authentication is enabled, this password can be changed at any time. + :vartype administrator_login_password: str + :ivar version: Major version of PostgreSQL database engine. Known values are: "18", "17", "16", + "15", "14", "13", "12", and "11". + :vartype version: str or ~azure.mgmt.postgresql.models.PostgresMajorVersion + :ivar minor_version: Minor version of PostgreSQL database engine. + :vartype minor_version: str + :ivar state: Possible states of a server. Known values are: "Ready", "Dropping", "Disabled", + "Starting", "Stopping", "Stopped", "Updating", "Restarting", "Inaccessible", and + "Provisioning". + :vartype state: str or ~azure.mgmt.postgresql.models.ServerState + :ivar fully_qualified_domain_name: Fully qualified domain name of a server. + :vartype fully_qualified_domain_name: str + :ivar storage: Storage properties of a server. + :vartype storage: ~azure.mgmt.postgresql.models.Storage + :ivar auth_config: Authentication configuration properties of a server. + :vartype auth_config: ~azure.mgmt.postgresql.models.AuthConfig + :ivar data_encryption: Data encryption properties of a server. + :vartype data_encryption: ~azure.mgmt.postgresql.models.DataEncryption + :ivar backup: Backup properties of a server. + :vartype backup: ~azure.mgmt.postgresql.models.Backup + :ivar network: Network properties of a server. Only required if you want your server to be + integrated into a virtual network provided by customer. + :vartype network: ~azure.mgmt.postgresql.models.Network + :ivar high_availability: High availability properties of a server. + :vartype high_availability: ~azure.mgmt.postgresql.models.HighAvailability + :ivar maintenance_window: Maintenance window properties of a server. + :vartype maintenance_window: ~azure.mgmt.postgresql.models.MaintenanceWindow + :ivar source_server_resource_id: Identifier of the server to be used as the source of the new + server. Required when 'createMode' is 'PointInTimeRestore', 'GeoRestore', 'Replica', or + 'ReviveDropped'. This property is returned only when the target server is a read replica. + :vartype source_server_resource_id: str + :ivar point_in_time_utc: Creation time (in ISO8601 format) of the backup which you want to + restore in the new server. It's required when 'createMode' is 'PointInTimeRestore', + 'GeoRestore', or 'ReviveDropped'. + :vartype point_in_time_utc: ~datetime.datetime + :ivar availability_zone: Availability zone of a server. + :vartype availability_zone: str + :ivar replication_role: Role of the server in a replication set. Known values are: "None", + "Primary", "AsyncReplica", and "GeoAsyncReplica". + :vartype replication_role: str or ~azure.mgmt.postgresql.models.ReplicationRole + :ivar replica_capacity: Maximum number of read replicas allowed for a server. + :vartype replica_capacity: int + :ivar replica: Read replica properties of a server. Required only in case that you want to + promote a server. + :vartype replica: ~azure.mgmt.postgresql.models.Replica + :ivar create_mode: Creation mode of a new server. Known values are: "Default", "Create", + "Update", "PointInTimeRestore", "GeoRestore", "Replica", and "ReviveDropped". + :vartype create_mode: str or ~azure.mgmt.postgresql.models.CreateMode + :ivar private_endpoint_connections: List of private endpoint connections associated with the + specified server. + :vartype private_endpoint_connections: + list[~azure.mgmt.postgresql.models.PrivateEndpointConnection] + :ivar cluster: Cluster properties of a server. + :vartype cluster: ~azure.mgmt.postgresql.models.Cluster + """ + + administrator_login: Optional[str] = rest_field(name="administratorLogin", visibility=["read", "create"]) + """Name of the login designated as the first password based administrator assigned to your + instance of PostgreSQL. Must be specified the first time that you enable password based + authentication on a server. Once set to a given value, it cannot be changed for the rest of the + life of a server. If you disable password based authentication on a server which had it + enabled, this password based role isn't deleted.""" + administrator_login_password: Optional[str] = rest_field( + name="administratorLoginPassword", visibility=["create", "update"] + ) + """Password assigned to the administrator login. As long as password authentication is enabled, + this password can be changed at any time.""" + version: Optional[Union[str, "_models.PostgresMajorVersion"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Major version of PostgreSQL database engine. Known values are: \"18\", \"17\", \"16\", \"15\", + \"14\", \"13\", \"12\", and \"11\".""" + minor_version: Optional[str] = rest_field(name="minorVersion", visibility=["read"]) + """Minor version of PostgreSQL database engine.""" + state: Optional[Union[str, "_models.ServerState"]] = rest_field(visibility=["read"]) + """Possible states of a server. Known values are: \"Ready\", \"Dropping\", \"Disabled\", + \"Starting\", \"Stopping\", \"Stopped\", \"Updating\", \"Restarting\", \"Inaccessible\", and + \"Provisioning\".""" + fully_qualified_domain_name: Optional[str] = rest_field(name="fullyQualifiedDomainName", visibility=["read"]) + """Fully qualified domain name of a server.""" + storage: Optional["_models.Storage"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Storage properties of a server.""" + auth_config: Optional["_models.AuthConfig"] = rest_field( + name="authConfig", visibility=["read", "create", "update", "delete", "query"] + ) + """Authentication configuration properties of a server.""" + data_encryption: Optional["_models.DataEncryption"] = rest_field( + name="dataEncryption", visibility=["read", "create", "update", "delete", "query"] + ) + """Data encryption properties of a server.""" + backup: Optional["_models.Backup"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Backup properties of a server.""" + network: Optional["_models.Network"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Network properties of a server. Only required if you want your server to be integrated into a + virtual network provided by customer.""" + high_availability: Optional["_models.HighAvailability"] = rest_field( + name="highAvailability", visibility=["read", "create", "update", "delete", "query"] + ) + """High availability properties of a server.""" + maintenance_window: Optional["_models.MaintenanceWindow"] = rest_field( + name="maintenanceWindow", visibility=["read", "update"] + ) + """Maintenance window properties of a server.""" + source_server_resource_id: Optional[str] = rest_field(name="sourceServerResourceId", visibility=["read", "create"]) + """Identifier of the server to be used as the source of the new server. Required when 'createMode' + is 'PointInTimeRestore', 'GeoRestore', 'Replica', or 'ReviveDropped'. This property is returned + only when the target server is a read replica.""" + point_in_time_utc: Optional[datetime.datetime] = rest_field( + name="pointInTimeUTC", visibility=["create"], format="rfc3339" + ) + """Creation time (in ISO8601 format) of the backup which you want to restore in the new server. + It's required when 'createMode' is 'PointInTimeRestore', 'GeoRestore', or 'ReviveDropped'.""" + availability_zone: Optional[str] = rest_field(name="availabilityZone", visibility=["read", "create"]) + """Availability zone of a server.""" + replication_role: Optional[Union[str, "_models.ReplicationRole"]] = rest_field( + name="replicationRole", visibility=["read", "create", "update", "delete", "query"] + ) + """Role of the server in a replication set. Known values are: \"None\", \"Primary\", + \"AsyncReplica\", and \"GeoAsyncReplica\".""" + replica_capacity: Optional[int] = rest_field(name="replicaCapacity", visibility=["read"]) + """Maximum number of read replicas allowed for a server.""" + replica: Optional["_models.Replica"] = rest_field(visibility=["read", "update"]) + """Read replica properties of a server. Required only in case that you want to promote a server.""" + create_mode: Optional[Union[str, "_models.CreateMode"]] = rest_field( + name="createMode", visibility=["create", "update"] + ) + """Creation mode of a new server. Known values are: \"Default\", \"Create\", \"Update\", + \"PointInTimeRestore\", \"GeoRestore\", \"Replica\", and \"ReviveDropped\".""" + private_endpoint_connections: Optional[list["_models.PrivateEndpointConnection"]] = rest_field( + name="privateEndpointConnections", visibility=["read"] + ) + """List of private endpoint connections associated with the specified server.""" + cluster: Optional["_models.Cluster"] = rest_field(visibility=["read", "create", "update"]) + """Cluster properties of a server.""" + + @overload + def __init__( + self, + *, + administrator_login: Optional[str] = None, + administrator_login_password: Optional[str] = None, + version: Optional[Union[str, "_models.PostgresMajorVersion"]] = None, + storage: Optional["_models.Storage"] = None, + auth_config: Optional["_models.AuthConfig"] = None, + data_encryption: Optional["_models.DataEncryption"] = None, + backup: Optional["_models.Backup"] = None, + network: Optional["_models.Network"] = None, + high_availability: Optional["_models.HighAvailability"] = None, + maintenance_window: Optional["_models.MaintenanceWindow"] = None, + source_server_resource_id: Optional[str] = None, + point_in_time_utc: Optional[datetime.datetime] = None, + availability_zone: Optional[str] = None, + replication_role: Optional[Union[str, "_models.ReplicationRole"]] = None, + replica: Optional["_models.Replica"] = None, + create_mode: Optional[Union[str, "_models.CreateMode"]] = None, + cluster: Optional["_models.Cluster"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ServerPropertiesForPatch(_Model): + """Properties of a server. + + :ivar administrator_login: Name of the login designated as the first password based + administrator assigned to your instance of PostgreSQL. Must be specified the first time that + you enable password based authentication on a server. Once set to a given value, it cannot be + changed for the rest of the life of a server. If you disable password based authentication on a + server which had it enabled, this password based role isn't deleted. + :vartype administrator_login: str + :ivar administrator_login_password: Password assigned to the administrator login. As long as + password authentication is enabled, this password can be changed at any time. + :vartype administrator_login_password: str + :ivar version: Major version of PostgreSQL database engine. Known values are: "18", "17", "16", + "15", "14", "13", "12", and "11". + :vartype version: str or ~azure.mgmt.postgresql.models.PostgresMajorVersion + :ivar storage: Storage properties of a server. + :vartype storage: ~azure.mgmt.postgresql.models.Storage + :ivar backup: Backup properties of a server. + :vartype backup: ~azure.mgmt.postgresql.models.BackupForPatch + :ivar high_availability: High availability properties of a server. + :vartype high_availability: ~azure.mgmt.postgresql.models.HighAvailabilityForPatch + :ivar maintenance_window: Maintenance window properties of a server. + :vartype maintenance_window: ~azure.mgmt.postgresql.models.MaintenanceWindowForPatch + :ivar auth_config: Authentication configuration properties of a server. + :vartype auth_config: ~azure.mgmt.postgresql.models.AuthConfigForPatch + :ivar data_encryption: Data encryption properties of a server. + :vartype data_encryption: ~azure.mgmt.postgresql.models.DataEncryption + :ivar availability_zone: Availability zone of a server. + :vartype availability_zone: str + :ivar create_mode: Update mode of an existing server. Known values are: "Default" and "Update". + :vartype create_mode: str or ~azure.mgmt.postgresql.models.CreateModeForPatch + :ivar replication_role: Role of the server in a replication set. Known values are: "None", + "Primary", "AsyncReplica", and "GeoAsyncReplica". + :vartype replication_role: str or ~azure.mgmt.postgresql.models.ReplicationRole + :ivar replica: Read replica properties of a server. Required only in case that you want to + promote a server. + :vartype replica: ~azure.mgmt.postgresql.models.Replica + :ivar network: Network properties of a server. Only required if you want your server to be + integrated into a virtual network provided by customer. + :vartype network: ~azure.mgmt.postgresql.models.Network + :ivar cluster: Cluster properties of a server. + :vartype cluster: ~azure.mgmt.postgresql.models.Cluster + """ + + administrator_login: Optional[str] = rest_field(name="administratorLogin", visibility=["read"]) + """Name of the login designated as the first password based administrator assigned to your + instance of PostgreSQL. Must be specified the first time that you enable password based + authentication on a server. Once set to a given value, it cannot be changed for the rest of the + life of a server. If you disable password based authentication on a server which had it + enabled, this password based role isn't deleted.""" + administrator_login_password: Optional[str] = rest_field(name="administratorLoginPassword", visibility=["update"]) + """Password assigned to the administrator login. As long as password authentication is enabled, + this password can be changed at any time.""" + version: Optional[Union[str, "_models.PostgresMajorVersion"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Major version of PostgreSQL database engine. Known values are: \"18\", \"17\", \"16\", \"15\", + \"14\", \"13\", \"12\", and \"11\".""" + storage: Optional["_models.Storage"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Storage properties of a server.""" + backup: Optional["_models.BackupForPatch"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Backup properties of a server.""" + high_availability: Optional["_models.HighAvailabilityForPatch"] = rest_field( + name="highAvailability", visibility=["read", "create", "update", "delete", "query"] + ) + """High availability properties of a server.""" + maintenance_window: Optional["_models.MaintenanceWindowForPatch"] = rest_field( + name="maintenanceWindow", visibility=["read", "update"] + ) + """Maintenance window properties of a server.""" + auth_config: Optional["_models.AuthConfigForPatch"] = rest_field( + name="authConfig", visibility=["read", "create", "update", "delete", "query"] + ) + """Authentication configuration properties of a server.""" + data_encryption: Optional["_models.DataEncryption"] = rest_field( + name="dataEncryption", visibility=["read", "create", "update", "delete", "query"] + ) + """Data encryption properties of a server.""" + availability_zone: Optional[str] = rest_field( + name="availabilityZone", visibility=["read", "create", "update", "delete", "query"] + ) + """Availability zone of a server.""" + create_mode: Optional[Union[str, "_models.CreateModeForPatch"]] = rest_field( + name="createMode", visibility=["update"] + ) + """Update mode of an existing server. Known values are: \"Default\" and \"Update\".""" + replication_role: Optional[Union[str, "_models.ReplicationRole"]] = rest_field( + name="replicationRole", visibility=["read", "create", "update", "delete", "query"] + ) + """Role of the server in a replication set. Known values are: \"None\", \"Primary\", + \"AsyncReplica\", and \"GeoAsyncReplica\".""" + replica: Optional["_models.Replica"] = rest_field(visibility=["read", "update"]) + """Read replica properties of a server. Required only in case that you want to promote a server.""" + network: Optional["_models.Network"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Network properties of a server. Only required if you want your server to be integrated into a + virtual network provided by customer.""" + cluster: Optional["_models.Cluster"] = rest_field(visibility=["read", "update"]) + """Cluster properties of a server.""" + + @overload + def __init__( + self, + *, + administrator_login_password: Optional[str] = None, + version: Optional[Union[str, "_models.PostgresMajorVersion"]] = None, + storage: Optional["_models.Storage"] = None, + backup: Optional["_models.BackupForPatch"] = None, + high_availability: Optional["_models.HighAvailabilityForPatch"] = None, + maintenance_window: Optional["_models.MaintenanceWindowForPatch"] = None, + auth_config: Optional["_models.AuthConfigForPatch"] = None, + data_encryption: Optional["_models.DataEncryption"] = None, + availability_zone: Optional[str] = None, + create_mode: Optional[Union[str, "_models.CreateModeForPatch"]] = None, + replication_role: Optional[Union[str, "_models.ReplicationRole"]] = None, + replica: Optional["_models.Replica"] = None, + network: Optional["_models.Network"] = None, + cluster: Optional["_models.Cluster"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ServerSku(_Model): + """Compute information of a server. + + :ivar name: Compute tier and size of the database server. This object is empty for an Azure + Database for PostgreSQL single server. + :vartype name: str + :ivar tier: Tier of the compute assigned to a server. Known values are: "Burstable", + "GeneralPurpose", and "MemoryOptimized". + :vartype tier: str or ~azure.mgmt.postgresql.models.SkuTier + """ + + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Compute tier and size of the database server. This object is empty for an Azure Database for + PostgreSQL single server.""" + tier: Optional[Union[str, "_models.SkuTier"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Tier of the compute assigned to a server. Known values are: \"Burstable\", \"GeneralPurpose\", + and \"MemoryOptimized\".""" + + @overload + def __init__( + self, + *, + name: Optional[str] = None, + tier: Optional[Union[str, "_models.SkuTier"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ServerSkuCapability(CapabilityBase): + """Capabilities in terms of compute. + + :ivar status: The status of the capability. Known values are: "Visible", "Available", + "Default", and "Disabled". + :vartype status: str or ~azure.mgmt.postgresql.models.CapabilityStatus + :ivar reason: The reason for the capability not being available. + :vartype reason: str + :ivar name: Name of the compute (SKU). + :vartype name: str + :ivar v_cores: vCores available for this compute. + :vartype v_cores: int + :ivar supported_iops: Maximum IOPS supported by this compute. + :vartype supported_iops: int + :ivar supported_memory_per_vcore_mb: Supported memory (in MB) per virtual core assigned to this + compute. + :vartype supported_memory_per_vcore_mb: int + :ivar supported_zones: List of supported availability zones. E.g. '1', '2', '3'. + :vartype supported_zones: list[str] + :ivar supported_ha_mode: Modes of high availability supported for this compute. + :vartype supported_ha_mode: list[str or ~azure.mgmt.postgresql.models.HighAvailabilityMode] + :ivar supported_features: Features supported. + :vartype supported_features: list[~azure.mgmt.postgresql.models.SupportedFeature] + :ivar security_profile: Security profile of the compute. Indicates if it's a Confidential + Compute virtual machine. + :vartype security_profile: str + """ + + name: Optional[str] = rest_field(visibility=["read"]) + """Name of the compute (SKU).""" + v_cores: Optional[int] = rest_field(name="vCores", visibility=["read"]) + """vCores available for this compute.""" + supported_iops: Optional[int] = rest_field(name="supportedIops", visibility=["read"]) + """Maximum IOPS supported by this compute.""" + supported_memory_per_vcore_mb: Optional[int] = rest_field(name="supportedMemoryPerVcoreMb", visibility=["read"]) + """Supported memory (in MB) per virtual core assigned to this compute.""" + supported_zones: Optional[list[str]] = rest_field(name="supportedZones", visibility=["read"]) + """List of supported availability zones. E.g. '1', '2', '3'.""" + supported_ha_mode: Optional[list[Union[str, "_models.HighAvailabilityMode"]]] = rest_field( + name="supportedHaMode", visibility=["read"] + ) + """Modes of high availability supported for this compute.""" + supported_features: Optional[list["_models.SupportedFeature"]] = rest_field( + name="supportedFeatures", visibility=["read"] + ) + """Features supported.""" + security_profile: Optional[str] = rest_field(name="securityProfile", visibility=["read"]) + """Security profile of the compute. Indicates if it's a Confidential Compute virtual machine.""" + + +class ServerVersionCapability(CapabilityBase): + """Capabilities in terms of major versions of PostgreSQL database engine. + + :ivar status: The status of the capability. Known values are: "Visible", "Available", + "Default", and "Disabled". + :vartype status: str or ~azure.mgmt.postgresql.models.CapabilityStatus + :ivar reason: The reason for the capability not being available. + :vartype reason: str + :ivar name: Major version of PostgreSQL database engine. + :vartype name: str + :ivar supported_versions_to_upgrade: Major versions of PostgreSQL database engine to which this + version can be automatically upgraded. + :vartype supported_versions_to_upgrade: list[str] + :ivar supported_features: Features supported. + :vartype supported_features: list[~azure.mgmt.postgresql.models.SupportedFeature] + """ + + name: Optional[str] = rest_field(visibility=["read"]) + """Major version of PostgreSQL database engine.""" + supported_versions_to_upgrade: Optional[list[str]] = rest_field( + name="supportedVersionsToUpgrade", visibility=["read"] + ) + """Major versions of PostgreSQL database engine to which this version can be automatically + upgraded.""" + supported_features: Optional[list["_models.SupportedFeature"]] = rest_field( + name="supportedFeatures", visibility=["read"] + ) + """Features supported.""" + + +class Sku(_Model): + """Compute information of a server. + + :ivar name: Name by which is known a given compute size assigned to a server. Required. + :vartype name: str + :ivar tier: Tier of the compute assigned to a server. Required. Known values are: "Burstable", + "GeneralPurpose", and "MemoryOptimized". + :vartype tier: str or ~azure.mgmt.postgresql.models.SkuTier + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name by which is known a given compute size assigned to a server. Required.""" + tier: Union[str, "_models.SkuTier"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Tier of the compute assigned to a server. Required. Known values are: \"Burstable\", + \"GeneralPurpose\", and \"MemoryOptimized\".""" + + @overload + def __init__( + self, + *, + name: str, + tier: Union[str, "_models.SkuTier"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SkuForPatch(_Model): + """Compute information of a server. + + :ivar name: Name by which is known a given compute size assigned to a server. + :vartype name: str + :ivar tier: Tier of the compute assigned to a server. Known values are: "Burstable", + "GeneralPurpose", and "MemoryOptimized". + :vartype tier: str or ~azure.mgmt.postgresql.models.SkuTier + """ + + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name by which is known a given compute size assigned to a server.""" + tier: Optional[Union[str, "_models.SkuTier"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Tier of the compute assigned to a server. Known values are: \"Burstable\", \"GeneralPurpose\", + and \"MemoryOptimized\".""" + + @overload + def __init__( + self, + *, + name: Optional[str] = None, + tier: Optional[Union[str, "_models.SkuTier"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Storage(_Model): + """Storage properties of a server. + + :ivar storage_size_gb: Size of storage assigned to a server. + :vartype storage_size_gb: int + :ivar auto_grow: Flag to enable or disable the automatic growth of storage size of a server + when available space is nearing zero and conditions allow for automatically growing storage + size. Known values are: "Enabled" and "Disabled". + :vartype auto_grow: str or ~azure.mgmt.postgresql.models.StorageAutoGrow + :ivar tier: Storage tier of a server. Known values are: "P1", "P2", "P3", "P4", "P6", "P10", + "P15", "P20", "P30", "P40", "P50", "P60", "P70", and "P80". + :vartype tier: str or ~azure.mgmt.postgresql.models.AzureManagedDiskPerformanceTier + :ivar iops: Maximum IOPS supported for storage. Required when type of storage is PremiumV2_LRS + or UltraSSD_LRS. + :vartype iops: int + :ivar throughput: Maximum throughput supported for storage. Required when type of storage is + PremiumV2_LRS or UltraSSD_LRS. + :vartype throughput: int + :ivar type: Type of storage assigned to a server. Allowed values are Premium_LRS, + PremiumV2_LRS, or UltraSSD_LRS. If not specified, it defaults to Premium_LRS. Known values are: + "Premium_LRS", "PremiumV2_LRS", and "UltraSSD_LRS". + :vartype type: str or ~azure.mgmt.postgresql.models.StorageType + """ + + storage_size_gb: Optional[int] = rest_field( + name="storageSizeGB", visibility=["read", "create", "update", "delete", "query"] + ) + """Size of storage assigned to a server.""" + auto_grow: Optional[Union[str, "_models.StorageAutoGrow"]] = rest_field( + name="autoGrow", visibility=["read", "create", "update", "delete", "query"] + ) + """Flag to enable or disable the automatic growth of storage size of a server when available space + is nearing zero and conditions allow for automatically growing storage size. Known values are: + \"Enabled\" and \"Disabled\".""" + tier: Optional[Union[str, "_models.AzureManagedDiskPerformanceTier"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Storage tier of a server. Known values are: \"P1\", \"P2\", \"P3\", \"P4\", \"P6\", \"P10\", + \"P15\", \"P20\", \"P30\", \"P40\", \"P50\", \"P60\", \"P70\", and \"P80\".""" + iops: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Maximum IOPS supported for storage. Required when type of storage is PremiumV2_LRS or + UltraSSD_LRS.""" + throughput: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Maximum throughput supported for storage. Required when type of storage is PremiumV2_LRS or + UltraSSD_LRS.""" + type: Optional[Union[str, "_models.StorageType"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Type of storage assigned to a server. Allowed values are Premium_LRS, PremiumV2_LRS, or + UltraSSD_LRS. If not specified, it defaults to Premium_LRS. Known values are: \"Premium_LRS\", + \"PremiumV2_LRS\", and \"UltraSSD_LRS\".""" + + @overload + def __init__( + self, + *, + storage_size_gb: Optional[int] = None, + auto_grow: Optional[Union[str, "_models.StorageAutoGrow"]] = None, + tier: Optional[Union[str, "_models.AzureManagedDiskPerformanceTier"]] = None, + iops: Optional[int] = None, + throughput: Optional[int] = None, + type: Optional[Union[str, "_models.StorageType"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class StorageEditionCapability(CapabilityBase): + """Capabilities in terms of storage tier. + + :ivar status: The status of the capability. Known values are: "Visible", "Available", + "Default", and "Disabled". + :vartype status: str or ~azure.mgmt.postgresql.models.CapabilityStatus + :ivar reason: The reason for the capability not being available. + :vartype reason: str + :ivar name: Name of storage tier. + :vartype name: str + :ivar default_storage_size_mb: Default storage size (in MB) for this storage tier. + :vartype default_storage_size_mb: int + :ivar supported_storage_mb: Configurations of storage supported for this storage tier. + :vartype supported_storage_mb: list[~azure.mgmt.postgresql.models.StorageMbCapability] + """ + + name: Optional[str] = rest_field(visibility=["read"]) + """Name of storage tier.""" + default_storage_size_mb: Optional[int] = rest_field(name="defaultStorageSizeMb", visibility=["read"]) + """Default storage size (in MB) for this storage tier.""" + supported_storage_mb: Optional[list["_models.StorageMbCapability"]] = rest_field( + name="supportedStorageMb", visibility=["read"] + ) + """Configurations of storage supported for this storage tier.""" + + +class StorageMbCapability(CapabilityBase): + """Storage size (in MB) capability. + + :ivar status: The status of the capability. Known values are: "Visible", "Available", + "Default", and "Disabled". + :vartype status: str or ~azure.mgmt.postgresql.models.CapabilityStatus + :ivar reason: The reason for the capability not being available. + :vartype reason: str + :ivar supported_iops: Minimum IOPS supported by the storage size. + :vartype supported_iops: int + :ivar supported_maximum_iops: Maximum IOPS supported by the storage size. + :vartype supported_maximum_iops: int + :ivar storage_size_mb: Minimum supported size (in MB) of storage. + :vartype storage_size_mb: int + :ivar maximum_storage_size_mb: Maximum supported size (in MB) of storage. + :vartype maximum_storage_size_mb: int + :ivar supported_throughput: Minimum supported throughput (in MB/s) of storage. + :vartype supported_throughput: int + :ivar supported_maximum_throughput: Maximum supported throughput (in MB/s) of storage. + :vartype supported_maximum_throughput: int + :ivar default_iops_tier: Default IOPS for this tier and storage size. + :vartype default_iops_tier: str + :ivar supported_iops_tiers: List of all supported storage tiers for this tier and storage size. + :vartype supported_iops_tiers: list[~azure.mgmt.postgresql.models.StorageTierCapability] + """ + + supported_iops: Optional[int] = rest_field(name="supportedIops", visibility=["read"]) + """Minimum IOPS supported by the storage size.""" + supported_maximum_iops: Optional[int] = rest_field(name="supportedMaximumIops", visibility=["read"]) + """Maximum IOPS supported by the storage size.""" + storage_size_mb: Optional[int] = rest_field(name="storageSizeMb", visibility=["read"]) + """Minimum supported size (in MB) of storage.""" + maximum_storage_size_mb: Optional[int] = rest_field(name="maximumStorageSizeMb", visibility=["read"]) + """Maximum supported size (in MB) of storage.""" + supported_throughput: Optional[int] = rest_field(name="supportedThroughput", visibility=["read"]) + """Minimum supported throughput (in MB/s) of storage.""" + supported_maximum_throughput: Optional[int] = rest_field(name="supportedMaximumThroughput", visibility=["read"]) + """Maximum supported throughput (in MB/s) of storage.""" + default_iops_tier: Optional[str] = rest_field(name="defaultIopsTier", visibility=["read"]) + """Default IOPS for this tier and storage size.""" + supported_iops_tiers: Optional[list["_models.StorageTierCapability"]] = rest_field( + name="supportedIopsTiers", visibility=["read"] + ) + """List of all supported storage tiers for this tier and storage size.""" + + +class StorageTierCapability(CapabilityBase): + """Capability of a storage tier. + + :ivar status: The status of the capability. Known values are: "Visible", "Available", + "Default", and "Disabled". + :vartype status: str or ~azure.mgmt.postgresql.models.CapabilityStatus + :ivar reason: The reason for the capability not being available. + :vartype reason: str + :ivar name: Name of the storage tier. + :vartype name: str + :ivar iops: Supported IOPS for the storage tier. + :vartype iops: int + """ + + name: Optional[str] = rest_field(visibility=["read"]) + """Name of the storage tier.""" + iops: Optional[int] = rest_field(visibility=["read"]) + """Supported IOPS for the storage tier.""" + + +class SupportedFeature(_Model): + """Features supported. + + :ivar name: Name of the feature. + :vartype name: str + :ivar status: Status of the feature. Indicates if the feature is enabled or not. Known values + are: "Enabled" and "Disabled". + :vartype status: str or ~azure.mgmt.postgresql.models.FeatureStatus + """ + + name: Optional[str] = rest_field(visibility=["read"]) + """Name of the feature.""" + status: Optional[Union[str, "_models.FeatureStatus"]] = rest_field(visibility=["read"]) + """Status of the feature. Indicates if the feature is enabled or not. Known values are: + \"Enabled\" and \"Disabled\".""" + + +class SystemData(_Model): + """Metadata pertaining to creation and last modification of the resource. + + :ivar created_by: The identity that created the resource. + :vartype created_by: str + :ivar created_by_type: The type of identity that created the resource. Known values are: + "User", "Application", "ManagedIdentity", and "Key". + :vartype created_by_type: str or ~azure.mgmt.postgresql.models.CreatedByType + :ivar created_at: The timestamp of resource creation (UTC). + :vartype created_at: ~datetime.datetime + :ivar last_modified_by: The identity that last modified the resource. + :vartype last_modified_by: str + :ivar last_modified_by_type: The type of identity that last modified the resource. Known values + are: "User", "Application", "ManagedIdentity", and "Key". + :vartype last_modified_by_type: str or ~azure.mgmt.postgresql.models.CreatedByType + :ivar last_modified_at: The timestamp of resource last modification (UTC). + :vartype last_modified_at: ~datetime.datetime + """ + + created_by: Optional[str] = rest_field(name="createdBy", visibility=["read", "create", "update", "delete", "query"]) + """The identity that created the resource.""" + created_by_type: Optional[Union[str, "_models.CreatedByType"]] = rest_field( + name="createdByType", visibility=["read", "create", "update", "delete", "query"] + ) + """The type of identity that created the resource. Known values are: \"User\", \"Application\", + \"ManagedIdentity\", and \"Key\".""" + created_at: Optional[datetime.datetime] = rest_field( + name="createdAt", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """The timestamp of resource creation (UTC).""" + last_modified_by: Optional[str] = rest_field( + name="lastModifiedBy", visibility=["read", "create", "update", "delete", "query"] + ) + """The identity that last modified the resource.""" + last_modified_by_type: Optional[Union[str, "_models.CreatedByType"]] = rest_field( + name="lastModifiedByType", visibility=["read", "create", "update", "delete", "query"] + ) + """The type of identity that last modified the resource. Known values are: \"User\", + \"Application\", \"ManagedIdentity\", and \"Key\".""" + last_modified_at: Optional[datetime.datetime] = rest_field( + name="lastModifiedAt", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """The timestamp of resource last modification (UTC).""" + + @overload + def __init__( + self, + *, + created_by: Optional[str] = None, + created_by_type: Optional[Union[str, "_models.CreatedByType"]] = None, + created_at: Optional[datetime.datetime] = None, + last_modified_by: Optional[str] = None, + last_modified_by_type: Optional[Union[str, "_models.CreatedByType"]] = None, + last_modified_at: Optional[datetime.datetime] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TuningOptions(ProxyResource): + """Impact on some metric if this recommended action is applied. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + """ + + +class UserAssignedIdentity(_Model): + """Identities associated with a server. + + :ivar user_assigned_identities: Map of user assigned managed identities. + :vartype user_assigned_identities: dict[str, ~azure.mgmt.postgresql.models.UserIdentity] + :ivar principal_id: Identifier of the object of the service principal associated to the user + assigned managed identity. + :vartype principal_id: str + :ivar type: Types of identities associated with a server. Required. Known values are: "None", + "UserAssigned", "SystemAssigned", and "SystemAssigned,UserAssigned". + :vartype type: str or ~azure.mgmt.postgresql.models.IdentityType + :ivar tenant_id: Identifier of the tenant of a server. + :vartype tenant_id: str + """ + + user_assigned_identities: Optional[dict[str, "_models.UserIdentity"]] = rest_field( + name="userAssignedIdentities", visibility=["read", "create", "update", "delete", "query"] + ) + """Map of user assigned managed identities.""" + principal_id: Optional[str] = rest_field( + name="principalId", visibility=["read", "create", "update", "delete", "query"] + ) + """Identifier of the object of the service principal associated to the user assigned managed + identity.""" + type: Union[str, "_models.IdentityType"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Types of identities associated with a server. Required. Known values are: \"None\", + \"UserAssigned\", \"SystemAssigned\", and \"SystemAssigned,UserAssigned\".""" + tenant_id: Optional[str] = rest_field(name="tenantId", visibility=["read"]) + """Identifier of the tenant of a server.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.IdentityType"], + user_assigned_identities: Optional[dict[str, "_models.UserIdentity"]] = None, + principal_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UserIdentity(_Model): + """User assigned managed identity associated with a server. + + :ivar principal_id: Identifier of the object of the service principal associated to the user + assigned managed identity. + :vartype principal_id: str + :ivar client_id: Identifier of the client of the service principal associated to the user + assigned managed identity. + :vartype client_id: str + """ + + principal_id: Optional[str] = rest_field( + name="principalId", visibility=["read", "create", "update", "delete", "query"] + ) + """Identifier of the object of the service principal associated to the user assigned managed + identity.""" + client_id: Optional[str] = rest_field(name="clientId", visibility=["read", "create", "update", "delete", "query"]) + """Identifier of the client of the service principal associated to the user assigned managed + identity.""" + + @overload + def __init__( + self, + *, + principal_id: Optional[str] = None, + client_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ValidationDetails(_Model): + """Details for the validation for migration. + + :ivar status: Validation status for migration. Known values are: "Failed", "Succeeded", and + "Warning". + :vartype status: str or ~azure.mgmt.postgresql.models.ValidationState + :ivar validation_start_time_in_utc: Start time (UTC) for validation. + :vartype validation_start_time_in_utc: ~datetime.datetime + :ivar validation_end_time_in_utc: End time (UTC) for validation. + :vartype validation_end_time_in_utc: ~datetime.datetime + :ivar server_level_validation_details: Details of server level validations. + :vartype server_level_validation_details: + list[~azure.mgmt.postgresql.models.ValidationSummaryItem] + :ivar db_level_validation_details: Details of server level validations. + :vartype db_level_validation_details: + list[~azure.mgmt.postgresql.models.DbLevelValidationStatus] + """ + + status: Optional[Union[str, "_models.ValidationState"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Validation status for migration. Known values are: \"Failed\", \"Succeeded\", and \"Warning\".""" + validation_start_time_in_utc: Optional[datetime.datetime] = rest_field( + name="validationStartTimeInUtc", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """Start time (UTC) for validation.""" + validation_end_time_in_utc: Optional[datetime.datetime] = rest_field( + name="validationEndTimeInUtc", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """End time (UTC) for validation.""" + server_level_validation_details: Optional[list["_models.ValidationSummaryItem"]] = rest_field( + name="serverLevelValidationDetails", visibility=["read", "create", "update", "delete", "query"] + ) + """Details of server level validations.""" + db_level_validation_details: Optional[list["_models.DbLevelValidationStatus"]] = rest_field( + name="dbLevelValidationDetails", visibility=["read", "create", "update", "delete", "query"] + ) + """Details of server level validations.""" + + @overload + def __init__( + self, + *, + status: Optional[Union[str, "_models.ValidationState"]] = None, + validation_start_time_in_utc: Optional[datetime.datetime] = None, + validation_end_time_in_utc: Optional[datetime.datetime] = None, + server_level_validation_details: Optional[list["_models.ValidationSummaryItem"]] = None, + db_level_validation_details: Optional[list["_models.DbLevelValidationStatus"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ValidationMessage(_Model): + """Validation message object. + + :ivar state: Severity of validation message. Known values are: "Failed", "Succeeded", and + "Warning". + :vartype state: str or ~azure.mgmt.postgresql.models.ValidationState + :ivar message: Validation message string. + :vartype message: str + """ + + state: Optional[Union[str, "_models.ValidationState"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Severity of validation message. Known values are: \"Failed\", \"Succeeded\", and \"Warning\".""" + message: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Validation message string.""" + + @overload + def __init__( + self, + *, + state: Optional[Union[str, "_models.ValidationState"]] = None, + message: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ValidationSummaryItem(_Model): + """Validation summary object. + + :ivar type: Validation type. + :vartype type: str + :ivar state: Validation status for migration. Known values are: "Failed", "Succeeded", and + "Warning". + :vartype state: str or ~azure.mgmt.postgresql.models.ValidationState + :ivar messages: Validation messages. + :vartype messages: list[~azure.mgmt.postgresql.models.ValidationMessage] + """ + + type: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Validation type.""" + state: Optional[Union[str, "_models.ValidationState"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Validation status for migration. Known values are: \"Failed\", \"Succeeded\", and \"Warning\".""" + messages: Optional[list["_models.ValidationMessage"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Validation messages.""" + + @overload + def __init__( + self, + *, + type: Optional[str] = None, + state: Optional[Union[str, "_models.ValidationState"]] = None, + messages: Optional[list["_models.ValidationMessage"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VirtualEndpoint(ProxyResource): + """Pair of virtual endpoints for a server. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy + information. + :vartype system_data: ~azure.mgmt.postgresql.models.SystemData + :ivar properties: Properties of the pair of virtual endpoints. + :vartype properties: ~azure.mgmt.postgresql.models.VirtualEndpointResourceProperties + """ + + properties: Optional["_models.VirtualEndpointResourceProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Properties of the pair of virtual endpoints.""" + + __flattened_items = ["endpoint_type", "members", "virtual_endpoints"] + + @overload + def __init__( + self, + *, + properties: Optional["_models.VirtualEndpointResourceProperties"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class VirtualEndpointResourceForPatch(_Model): + """Pair of virtual endpoints for a server. + + :ivar properties: Properties of the pair of virtual endpoints. + :vartype properties: ~azure.mgmt.postgresql.models.VirtualEndpointResourceProperties + """ + + properties: Optional["_models.VirtualEndpointResourceProperties"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Properties of the pair of virtual endpoints.""" + + __flattened_items = ["endpoint_type", "members", "virtual_endpoints"] + + @overload + def __init__( + self, + *, + properties: Optional["_models.VirtualEndpointResourceProperties"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + _flattened_input = {k: kwargs.pop(k) for k in kwargs.keys() & self.__flattened_items} + super().__init__(*args, **kwargs) + for k, v in _flattened_input.items(): + setattr(self, k, v) + + def __getattr__(self, name: str) -> Any: + if name in self.__flattened_items: + if self.properties is None: + return None + return getattr(self.properties, name) + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") + + def __setattr__(self, key: str, value: Any) -> None: + if key in self.__flattened_items: + if self.properties is None: + self.properties = self._attr_to_rest_field["properties"]._class_type() + setattr(self.properties, key, value) + else: + super().__setattr__(key, value) + + +class VirtualEndpointResourceProperties(_Model): + """Properties of a pair of virtual endpoints. + + :ivar endpoint_type: Type of endpoint for the virtual endpoints. "ReadWrite" + :vartype endpoint_type: str or ~azure.mgmt.postgresql.models.VirtualEndpointType + :ivar members: List of servers that one of the virtual endpoints can refer to. + :vartype members: list[str] + :ivar virtual_endpoints: List of virtual endpoints for a server. + :vartype virtual_endpoints: list[str] + """ + + endpoint_type: Optional[Union[str, "_models.VirtualEndpointType"]] = rest_field( + name="endpointType", visibility=["read", "create", "update", "delete", "query"] + ) + """Type of endpoint for the virtual endpoints. \"ReadWrite\"""" + members: Optional[list[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """List of servers that one of the virtual endpoints can refer to.""" + virtual_endpoints: Optional[list[str]] = rest_field(name="virtualEndpoints", visibility=["read"]) + """List of virtual endpoints for a server.""" + + @overload + def __init__( + self, + *, + endpoint_type: Optional[Union[str, "_models.VirtualEndpointType"]] = None, + members: Optional[list[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VirtualNetworkSubnetUsageModel(_Model): + """Virtual network subnet usage data. + + :ivar delegated_subnets_usage: + :vartype delegated_subnets_usage: list[~azure.mgmt.postgresql.models.DelegatedSubnetUsage] + :ivar location: location of the delegated subnet usage. + :vartype location: str + :ivar subscription_id: subscriptionId of the delegated subnet usage. + :vartype subscription_id: str + """ + + delegated_subnets_usage: Optional[list["_models.DelegatedSubnetUsage"]] = rest_field( + name="delegatedSubnetsUsage", visibility=["read"] + ) + location: Optional[str] = rest_field(visibility=["read"]) + """location of the delegated subnet usage.""" + subscription_id: Optional[str] = rest_field(name="subscriptionId", visibility=["read"]) + """subscriptionId of the delegated subnet usage.""" + + +class VirtualNetworkSubnetUsageParameter(_Model): + """Virtual network subnet usage parameter. + + :ivar virtual_network_arm_resource_id: Virtual network resource id. + :vartype virtual_network_arm_resource_id: str + """ + + virtual_network_arm_resource_id: Optional[str] = rest_field( + name="virtualNetworkArmResourceId", visibility=["read", "create", "update", "delete", "query"] + ) + """Virtual network resource id.""" + + @overload + def __init__( + self, + *, + virtual_network_arm_resource_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/_patch.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/_patch.py new file mode 100644 index 000000000000..87676c65a8f0 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/models/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + + +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/operations/__init__.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/operations/__init__.py new file mode 100644 index 000000000000..ec5ea57d4214 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/operations/__init__.py @@ -0,0 +1,69 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import Operations # type: ignore +from ._operations import MigrationsOperations # type: ignore +from ._operations import ServersOperations # type: ignore +from ._operations import ConfigurationsOperations # type: ignore +from ._operations import DatabasesOperations # type: ignore +from ._operations import FirewallRulesOperations # type: ignore +from ._operations import PrivateEndpointConnectionsOperations # type: ignore +from ._operations import PrivateLinkResourcesOperations # type: ignore +from ._operations import VirtualEndpointsOperations # type: ignore +from ._operations import AdministratorsMicrosoftEntraOperations # type: ignore +from ._operations import CapabilitiesByServerOperations # type: ignore +from ._operations import CapturedLogsOperations # type: ignore +from ._operations import BackupsLongTermRetentionOperations # type: ignore +from ._operations import ReplicasOperations # type: ignore +from ._operations import AdvancedThreatProtectionSettingsOperations # type: ignore +from ._operations import ServerThreatProtectionSettingsOperations # type: ignore +from ._operations import BackupsAutomaticAndOnDemandOperations # type: ignore +from ._operations import TuningOptionsOperations # type: ignore +from ._operations import CapabilitiesByLocationOperations # type: ignore +from ._operations import NameAvailabilityOperations # type: ignore +from ._operations import PrivateDnsZoneSuffixOperations # type: ignore +from ._operations import QuotaUsagesOperations # type: ignore +from ._operations import VirtualNetworkSubnetUsageOperations # type: ignore + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "Operations", + "MigrationsOperations", + "ServersOperations", + "ConfigurationsOperations", + "DatabasesOperations", + "FirewallRulesOperations", + "PrivateEndpointConnectionsOperations", + "PrivateLinkResourcesOperations", + "VirtualEndpointsOperations", + "AdministratorsMicrosoftEntraOperations", + "CapabilitiesByServerOperations", + "CapturedLogsOperations", + "BackupsLongTermRetentionOperations", + "ReplicasOperations", + "AdvancedThreatProtectionSettingsOperations", + "ServerThreatProtectionSettingsOperations", + "BackupsAutomaticAndOnDemandOperations", + "TuningOptionsOperations", + "CapabilitiesByLocationOperations", + "NameAvailabilityOperations", + "PrivateDnsZoneSuffixOperations", + "QuotaUsagesOperations", + "VirtualNetworkSubnetUsageOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/operations/_operations.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/operations/_operations.py new file mode 100644 index 000000000000..04cf13ed2550 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/operations/_operations.py @@ -0,0 +1,11393 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from collections.abc import MutableMapping +from io import IOBase +import json +from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core import PipelineClient +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models as _models +from .._configuration import PostgreSQLManagementClientConfiguration +from .._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from .._utils.serialization import Deserializer, Serializer +from .._validation import api_version_validation + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] +JSON = MutableMapping[str, Any] +List = list + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_operations_list_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/providers/Microsoft.DBforPostgreSQL/operations" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_migrations_get_request( + resource_group_name: str, server_name: str, migration_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/migrations/{migrationName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "migrationName": _SERIALIZER.url("migration_name", migration_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_migrations_create_request( + resource_group_name: str, server_name: str, migration_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/migrations/{migrationName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "migrationName": _SERIALIZER.url("migration_name", migration_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_migrations_update_request( + resource_group_name: str, server_name: str, migration_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/migrations/{migrationName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "migrationName": _SERIALIZER.url("migration_name", migration_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_migrations_cancel_request( + resource_group_name: str, server_name: str, migration_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/migrations/{migrationName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "migrationName": _SERIALIZER.url("migration_name", migration_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_migrations_list_by_target_server_request( # pylint: disable=name-too-long + resource_group_name: str, + server_name: str, + subscription_id: str, + *, + migration_list_filter: Optional[Union[str, _models.MigrationListFilter]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/migrations" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if migration_list_filter is not None: + _params["migrationListFilter"] = _SERIALIZER.query("migration_list_filter", migration_list_filter, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_migrations_check_name_availability_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/checkMigrationNameAvailability" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_servers_get_request( + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_servers_create_or_update_request( + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_servers_update_request( + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_servers_delete_request( + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) + + +def build_servers_list_by_resource_group_request( # pylint: disable=name-too-long + resource_group_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_servers_list_by_subscription_request( # pylint: disable=name-too-long + subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/providers/Microsoft.DBforPostgreSQL/flexibleServers" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_servers_restart_request( + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/restart" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_servers_start_request( + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/start" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="POST", url=_url, params=_params, **kwargs) + + +def build_servers_stop_request( + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/stop" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="POST", url=_url, params=_params, **kwargs) + + +def build_servers_migrate_network_mode_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/migrateNetwork" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_configurations_get_request( + resource_group_name: str, server_name: str, configuration_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/configurations/{configurationName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "configurationName": _SERIALIZER.url("configuration_name", configuration_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_configurations_put_request( + resource_group_name: str, server_name: str, configuration_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/configurations/{configurationName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "configurationName": _SERIALIZER.url("configuration_name", configuration_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_configurations_update_request( + resource_group_name: str, server_name: str, configuration_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/configurations/{configurationName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "configurationName": _SERIALIZER.url("configuration_name", configuration_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_configurations_list_by_server_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/configurations" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_databases_get_request( + resource_group_name: str, server_name: str, database_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/databases/{databaseName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "databaseName": _SERIALIZER.url("database_name", database_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_databases_create_request( + resource_group_name: str, server_name: str, database_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/databases/{databaseName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "databaseName": _SERIALIZER.url("database_name", database_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_databases_delete_request( + resource_group_name: str, server_name: str, database_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/databases/{databaseName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "databaseName": _SERIALIZER.url("database_name", database_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) + + +def build_databases_list_by_server_request( + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/databases" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_firewall_rules_get_request( + resource_group_name: str, server_name: str, firewall_rule_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/firewallRules/{firewallRuleName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "firewallRuleName": _SERIALIZER.url("firewall_rule_name", firewall_rule_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_firewall_rules_create_or_update_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, firewall_rule_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/firewallRules/{firewallRuleName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "firewallRuleName": _SERIALIZER.url("firewall_rule_name", firewall_rule_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_firewall_rules_delete_request( + resource_group_name: str, server_name: str, firewall_rule_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/firewallRules/{firewallRuleName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "firewallRuleName": _SERIALIZER.url("firewall_rule_name", firewall_rule_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) + + +def build_firewall_rules_list_by_server_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/firewallRules" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_private_endpoint_connections_get_request( # pylint: disable=name-too-long + resource_group_name: str, + server_name: str, + private_endpoint_connection_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "privateEndpointConnectionName": _SERIALIZER.url( + "private_endpoint_connection_name", private_endpoint_connection_name, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_private_endpoint_connections_update_request( # pylint: disable=name-too-long + resource_group_name: str, + server_name: str, + private_endpoint_connection_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "privateEndpointConnectionName": _SERIALIZER.url( + "private_endpoint_connection_name", private_endpoint_connection_name, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_private_endpoint_connections_delete_request( # pylint: disable=name-too-long + resource_group_name: str, + server_name: str, + private_endpoint_connection_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "privateEndpointConnectionName": _SERIALIZER.url( + "private_endpoint_connection_name", private_endpoint_connection_name, "str" + ), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) + + +def build_private_endpoint_connections_list_by_server_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/privateEndpointConnections" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_private_link_resources_get_request( + resource_group_name: str, server_name: str, group_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/privateLinkResources/{groupName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "groupName": _SERIALIZER.url("group_name", group_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_private_link_resources_list_by_server_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/privateLinkResources" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_virtual_endpoints_get_request( + resource_group_name: str, server_name: str, virtual_endpoint_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/virtualendpoints/{virtualEndpointName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "virtualEndpointName": _SERIALIZER.url("virtual_endpoint_name", virtual_endpoint_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_virtual_endpoints_create_request( + resource_group_name: str, server_name: str, virtual_endpoint_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/virtualendpoints/{virtualEndpointName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "virtualEndpointName": _SERIALIZER.url("virtual_endpoint_name", virtual_endpoint_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_virtual_endpoints_update_request( + resource_group_name: str, server_name: str, virtual_endpoint_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/virtualendpoints/{virtualEndpointName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "virtualEndpointName": _SERIALIZER.url("virtual_endpoint_name", virtual_endpoint_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_virtual_endpoints_delete_request( + resource_group_name: str, server_name: str, virtual_endpoint_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/virtualendpoints/{virtualEndpointName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "virtualEndpointName": _SERIALIZER.url("virtual_endpoint_name", virtual_endpoint_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) + + +def build_virtual_endpoints_list_by_server_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/virtualendpoints" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_administrators_microsoft_entra_get_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, object_id: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/administrators/{objectId}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "objectId": _SERIALIZER.url("object_id", object_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_administrators_microsoft_entra_create_or_update_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, object_id: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/administrators/{objectId}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "objectId": _SERIALIZER.url("object_id", object_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_administrators_microsoft_entra_delete_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, object_id: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/administrators/{objectId}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "objectId": _SERIALIZER.url("object_id", object_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) + + +def build_administrators_microsoft_entra_list_by_server_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/administrators" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_capabilities_by_server_list_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/capabilities" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_captured_logs_list_by_server_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/logFiles" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_backups_long_term_retention_check_prerequisites_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/ltrPreBackup" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_backups_long_term_retention_start_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/startLtrBackup" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_backups_long_term_retention_get_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, backup_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/ltrBackupOperations/{backupName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "backupName": _SERIALIZER.url("backup_name", backup_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_backups_long_term_retention_list_by_server_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/ltrBackupOperations" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_replicas_list_by_server_request( + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/replicas" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_advanced_threat_protection_settings_get_request( # pylint: disable=name-too-long + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/advancedThreatProtectionSettings/{threatProtectionName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "threatProtectionName": _SERIALIZER.url("threat_protection_name", threat_protection_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_advanced_threat_protection_settings_list_by_server_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/advancedThreatProtectionSettings" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_server_threat_protection_settings_create_or_update_request( # pylint: disable=name-too-long + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/advancedThreatProtectionSettings/{threatProtectionName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "threatProtectionName": _SERIALIZER.url("threat_protection_name", threat_protection_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_backups_automatic_and_on_demand_get_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, backup_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/backups/{backupName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "backupName": _SERIALIZER.url("backup_name", backup_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_backups_automatic_and_on_demand_create_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, backup_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/backups/{backupName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "backupName": _SERIALIZER.url("backup_name", backup_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, **kwargs) + + +def build_backups_automatic_and_on_demand_delete_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, backup_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/backups/{backupName}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "backupName": _SERIALIZER.url("backup_name", backup_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) + + +def build_backups_automatic_and_on_demand_list_by_server_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/backups" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_tuning_options_get_request( + resource_group_name: str, + server_name: str, + tuning_option: Union[str, _models.TuningOptionParameterEnum], + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/tuningOptions/{tuningOption}" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "tuningOption": _SERIALIZER.url("tuning_option", tuning_option, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_tuning_options_list_by_server_request( # pylint: disable=name-too-long + resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/tuningOptions" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_tuning_options_list_recommendations_request( # pylint: disable=name-too-long + resource_group_name: str, + server_name: str, + tuning_option: Union[str, _models.TuningOptionParameterEnum], + subscription_id: str, + *, + recommendation_type: Optional[Union[str, _models.RecommendationTypeParameterEnum]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/tuningOptions/{tuningOption}/recommendations" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "serverName": _SERIALIZER.url("server_name", server_name, "str"), + "tuningOption": _SERIALIZER.url("tuning_option", tuning_option, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if recommendation_type is not None: + _params["recommendationType"] = _SERIALIZER.query("recommendation_type", recommendation_type, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_capabilities_by_location_list_request( # pylint: disable=name-too-long + location_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/providers/Microsoft.DBforPostgreSQL/locations/{locationName}/capabilities" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "locationName": _SERIALIZER.url("location_name", location_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_name_availability_check_globally_request( # pylint: disable=name-too-long + subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/providers/Microsoft.DBforPostgreSQL/checkNameAvailability" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_name_availability_check_with_location_request( # pylint: disable=name-too-long + location_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/providers/Microsoft.DBforPostgreSQL/locations/{locationName}/checkNameAvailability" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "locationName": _SERIALIZER.url("location_name", location_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_private_dns_zone_suffix_get_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/providers/Microsoft.DBforPostgreSQL/getPrivateDnsZoneSuffix" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_quota_usages_list_request(location_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/providers/Microsoft.DBforPostgreSQL/locations/{locationName}/resourceType/flexibleServers/usages" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "locationName": _SERIALIZER.url("location_name", location_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_virtual_network_subnet_usage_list_request( # pylint: disable=name-too-long + location_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-01-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/subscriptions/{subscriptionId}/providers/Microsoft.DBforPostgreSQL/locations/{locationName}/checkVirtualNetworkSubnetUsage" + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "locationName": _SERIALIZER.url("location_name", location_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +class Operations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`operations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list(self, **kwargs: Any) -> ItemPaged["_models.Operation"]: + """Lists all available REST API operations. + + :return: An iterator like instance of Operation + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.Operation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Operation]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_operations_list_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Operation], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class MigrationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`migrations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get(self, resource_group_name: str, server_name: str, migration_name: str, **kwargs: Any) -> _models.Migration: + """Gets information about a migration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Migration] = kwargs.pop("cls", None) + + _request = build_migrations_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + migration_name=migration_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Migration, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: _models.Migration, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Migration: + """Creates a new migration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required for creating a migration. Required. + :type parameters: ~azure.mgmt.postgresql.models.Migration + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Migration: + """Creates a new migration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required for creating a migration. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Migration: + """Creates a new migration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required for creating a migration. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: Union[_models.Migration, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.Migration: + """Creates a new migration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required for creating a migration. Is one of the following types: + Migration, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.Migration or JSON or IO[bytes] + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Migration] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_migrations_create_request( + resource_group_name=resource_group_name, + server_name=server_name, + migration_name=migration_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Migration, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: _models.MigrationResourceForPatch, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Migration: + """Updates an existing migration. The request body can contain one to many of the mutable + properties present in the migration definition. Certain property updates initiate migration + state transitions. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required to update an existing migration. Required. + :type parameters: ~azure.mgmt.postgresql.models.MigrationResourceForPatch + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Migration: + """Updates an existing migration. The request body can contain one to many of the mutable + properties present in the migration definition. Certain property updates initiate migration + state transitions. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required to update an existing migration. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.Migration: + """Updates an existing migration. The request body can contain one to many of the mutable + properties present in the migration definition. Certain property updates initiate migration + state transitions. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required to update an existing migration. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update( + self, + resource_group_name: str, + server_name: str, + migration_name: str, + parameters: Union[_models.MigrationResourceForPatch, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.Migration: + """Updates an existing migration. The request body can contain one to many of the mutable + properties present in the migration definition. Certain property updates initiate migration + state transitions. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :param parameters: Parameters required to update an existing migration. Is one of the following + types: MigrationResourceForPatch, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.MigrationResourceForPatch or JSON or IO[bytes] + :return: Migration. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Migration] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_migrations_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + migration_name=migration_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Migration, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def cancel( + self, resource_group_name: str, server_name: str, migration_name: str, **kwargs: Any + ) -> Optional[_models.Migration]: + """Cancels an active migration. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param migration_name: Name of migration. Required. + :type migration_name: str + :return: Migration or None. The Migration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Migration or None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Optional[_models.Migration]] = kwargs.pop("cls", None) + + _request = build_migrations_cancel_request( + resource_group_name=resource_group_name, + server_name=server_name, + migration_name=migration_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 204]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Migration, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_by_target_server( + self, + resource_group_name: str, + server_name: str, + *, + migration_list_filter: Optional[Union[str, _models.MigrationListFilter]] = None, + **kwargs: Any + ) -> ItemPaged["_models.Migration"]: + """Lists all migrations of a target flexible server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :keyword migration_list_filter: Migration list filter. Indicates if the request should retrieve + only active migrations or all migrations. Defaults to Active. Known values are: "Active" and + "All". Default value is None. + :paramtype migration_list_filter: str or ~azure.mgmt.postgresql.models.MigrationListFilter + :return: An iterator like instance of Migration + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.Migration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Migration]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_migrations_list_by_target_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + migration_list_filter=migration_list_filter, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Migration], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @overload + def check_name_availability( + self, + resource_group_name: str, + server_name: str, + parameters: _models.MigrationNameAvailability, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.MigrationNameAvailability: + """Check the validity and availability of the given name, to assign it to a new migration. + + Checks if a proposed migration name is valid and available. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to check if a migration name is valid and available. + Required. + :type parameters: ~azure.mgmt.postgresql.models.MigrationNameAvailability + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: MigrationNameAvailability. The MigrationNameAvailability is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.MigrationNameAvailability + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def check_name_availability( + self, + resource_group_name: str, + server_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.MigrationNameAvailability: + """Check the validity and availability of the given name, to assign it to a new migration. + + Checks if a proposed migration name is valid and available. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to check if a migration name is valid and available. + Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: MigrationNameAvailability. The MigrationNameAvailability is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.MigrationNameAvailability + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def check_name_availability( + self, + resource_group_name: str, + server_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.MigrationNameAvailability: + """Check the validity and availability of the given name, to assign it to a new migration. + + Checks if a proposed migration name is valid and available. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to check if a migration name is valid and available. + Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: MigrationNameAvailability. The MigrationNameAvailability is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.MigrationNameAvailability + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def check_name_availability( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.MigrationNameAvailability, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.MigrationNameAvailability: + """Check the validity and availability of the given name, to assign it to a new migration. + + Checks if a proposed migration name is valid and available. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to check if a migration name is valid and available. Is + one of the following types: MigrationNameAvailability, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.MigrationNameAvailability or JSON or IO[bytes] + :return: MigrationNameAvailability. The MigrationNameAvailability is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.MigrationNameAvailability + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.MigrationNameAvailability] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_migrations_check_name_availability_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.MigrationNameAvailability, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class ServersOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`servers` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get(self, resource_group_name: str, server_name: str, **kwargs: Any) -> _models.Server: + """Gets information about an existing server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: Server. The Server is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Server + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Server] = kwargs.pop("cls", None) + + _request = build_servers_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Server, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _create_or_update_initial( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.Server, JSON, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_servers_create_or_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + parameters: _models.Server, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Server]: + """Creates a new server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to create a new server or to update an existing server. + Required. + :type parameters: ~azure.mgmt.postgresql.models.Server + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Server]: + """Creates a new server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to create a new server or to update an existing server. + Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Server]: + """Creates a new server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to create a new server or to update an existing server. + Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.Server, JSON, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.Server]: + """Creates a new server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to create a new server or to update an existing server. + Is one of the following types: Server, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.Server or JSON or IO[bytes] + :return: An instance of LROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Server] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.Server, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.Server].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.Server]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _update_initial( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.ServerForPatch, JSON, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_servers_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_update( + self, + resource_group_name: str, + server_name: str, + parameters: _models.ServerForPatch, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Server]: + """Updates an existing server. The request body can contain one or multiple of the properties + present in the normal server definition. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to update a server. Required. + :type parameters: ~azure.mgmt.postgresql.models.ServerForPatch + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + server_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Server]: + """Updates an existing server. The request body can contain one or multiple of the properties + present in the normal server definition. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to update a server. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + server_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Server]: + """Updates an existing server. The request body can contain one or multiple of the properties + present in the normal server definition. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to update a server. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_update( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.ServerForPatch, JSON, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.Server]: + """Updates an existing server. The request body can contain one or multiple of the properties + present in the normal server definition. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters required to update a server. Is one of the following types: + ServerForPatch, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.ServerForPatch or JSON or IO[bytes] + :return: An instance of LROPoller that returns Server. The Server is compatible with + MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Server] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.Server, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.Server].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.Server]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _delete_initial(self, resource_group_name: str, server_name: str, **kwargs: Any) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_servers_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_delete(self, resource_group_name: str, server_name: str, **kwargs: Any) -> LROPoller[None]: + """Deletes or drops an existing server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> ItemPaged["_models.Server"]: + """Lists all servers in a resource group. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :return: An iterator like instance of Server + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Server]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_servers_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Server], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def list_by_subscription(self, **kwargs: Any) -> ItemPaged["_models.Server"]: + """Lists all servers in a subscription. + + :return: An iterator like instance of Server + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Server]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_servers_list_by_subscription_request( + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Server], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + def _restart_initial( + self, + resource_group_name: str, + server_name: str, + parameters: Optional[Union[_models.RestartParameter, JSON, IO[bytes]]] = None, + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + content_type = content_type if parameters else None + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" if parameters else None + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + if parameters is not None: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_servers_restart_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_restart( + self, + resource_group_name: str, + server_name: str, + parameters: Optional[_models.RestartParameter] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[None]: + """Restarts PostgreSQL database engine in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters to restart a server. Default value is None. + :type parameters: ~azure.mgmt.postgresql.models.RestartParameter + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_restart( + self, + resource_group_name: str, + server_name: str, + parameters: Optional[JSON] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[None]: + """Restarts PostgreSQL database engine in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters to restart a server. Default value is None. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_restart( + self, + resource_group_name: str, + server_name: str, + parameters: Optional[IO[bytes]] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[None]: + """Restarts PostgreSQL database engine in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters to restart a server. Default value is None. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_restart( + self, + resource_group_name: str, + server_name: str, + parameters: Optional[Union[_models.RestartParameter, JSON, IO[bytes]]] = None, + **kwargs: Any + ) -> LROPoller[None]: + """Restarts PostgreSQL database engine in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Parameters to restart a server. Is one of the following types: + RestartParameter, JSON, IO[bytes] Default value is None. + :type parameters: ~azure.mgmt.postgresql.models.RestartParameter or JSON or IO[bytes] + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + content_type = content_type if parameters else None + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._restart_initial( + resource_group_name=resource_group_name, + server_name=server_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + def _start_initial(self, resource_group_name: str, server_name: str, **kwargs: Any) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_servers_start_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_start(self, resource_group_name: str, server_name: str, **kwargs: Any) -> LROPoller[None]: + """Starts a stopped server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._start_initial( + resource_group_name=resource_group_name, + server_name=server_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + def _stop_initial(self, resource_group_name: str, server_name: str, **kwargs: Any) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_servers_stop_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_stop(self, resource_group_name: str, server_name: str, **kwargs: Any) -> LROPoller[None]: + """Stops a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._stop_initial( + resource_group_name=resource_group_name, + server_name=server_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @api_version_validation( + method_added_on="2026-01-01-preview", + params_added_on={ + "2026-01-01-preview": ["api_version", "subscription_id", "resource_group_name", "server_name", "accept"] + }, + api_versions_list=["2026-01-01-preview"], + ) + def _migrate_network_mode_initial( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_servers_migrate_network_mode_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 200: + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + @api_version_validation( + method_added_on="2026-01-01-preview", + params_added_on={ + "2026-01-01-preview": ["api_version", "subscription_id", "resource_group_name", "server_name", "accept"] + }, + api_versions_list=["2026-01-01-preview"], + ) + def begin_migrate_network_mode( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> LROPoller[_models.MigrateNetworkStatus]: + """Migrates the network configuration of a PostgreSQL flexible server from customer owned VNET to + Microsoft owned VNET with support for private endpoints, or from Microsoft owned VNET with no + support for private endpoints to Microsoft owned VNET with support for private endpoints. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An instance of LROPoller that returns MigrateNetworkStatus. The MigrateNetworkStatus + is compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.MigrateNetworkStatus] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.MigrateNetworkStatus] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._migrate_network_mode_initial( + resource_group_name=resource_group_name, + server_name=server_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + deserialized = _deserialize(_models.MigrateNetworkStatus, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.MigrateNetworkStatus].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.MigrateNetworkStatus]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + +class ConfigurationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`configurations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get( + self, resource_group_name: str, server_name: str, configuration_name: str, **kwargs: Any + ) -> _models.Configuration: + """Gets information about a specific configuration (also known as server parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :return: Configuration. The Configuration is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Configuration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Configuration] = kwargs.pop("cls", None) + + _request = build_configurations_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + configuration_name=configuration_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Configuration, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _put_initial( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: Union[_models.ConfigurationForUpdate, JSON, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_configurations_put_request( + resource_group_name=resource_group_name, + server_name=server_name, + configuration_name=configuration_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_put( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: _models.ConfigurationForUpdate, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Configuration]: + """Updates, using Put verb, the value assigned to a specific modifiable configuration (also known + as server parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Required. + :type parameters: ~azure.mgmt.postgresql.models.ConfigurationForUpdate + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Configuration. The Configuration is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_put( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Configuration]: + """Updates, using Put verb, the value assigned to a specific modifiable configuration (also known + as server parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Configuration. The Configuration is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_put( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Configuration]: + """Updates, using Put verb, the value assigned to a specific modifiable configuration (also known + as server parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Configuration. The Configuration is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_put( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: Union[_models.ConfigurationForUpdate, JSON, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.Configuration]: + """Updates, using Put verb, the value assigned to a specific modifiable configuration (also known + as server parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Is one of the following types: + ConfigurationForUpdate, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.ConfigurationForUpdate or JSON or IO[bytes] + :return: An instance of LROPoller that returns Configuration. The Configuration is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Configuration] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._put_initial( + resource_group_name=resource_group_name, + server_name=server_name, + configuration_name=configuration_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.Configuration, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.Configuration].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.Configuration]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _update_initial( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: Union[_models.ConfigurationForUpdate, JSON, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_configurations_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + configuration_name=configuration_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_update( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: _models.ConfigurationForUpdate, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Configuration]: + """Updates the value assigned to a specific modifiable configuration (also known as server + parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Required. + :type parameters: ~azure.mgmt.postgresql.models.ConfigurationForUpdate + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Configuration. The Configuration is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Configuration]: + """Updates the value assigned to a specific modifiable configuration (also known as server + parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Configuration. The Configuration is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Configuration]: + """Updates the value assigned to a specific modifiable configuration (also known as server + parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Configuration. The Configuration is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_update( + self, + resource_group_name: str, + server_name: str, + configuration_name: str, + parameters: Union[_models.ConfigurationForUpdate, JSON, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.Configuration]: + """Updates the value assigned to a specific modifiable configuration (also known as server + parameter) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param configuration_name: Name of the configuration (also known as server parameter). + Required. + :type configuration_name: str + :param parameters: Parameters required to update the value of a specific modifiable + configuration (also known as server parameter). Is one of the following types: + ConfigurationForUpdate, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.ConfigurationForUpdate or JSON or IO[bytes] + :return: An instance of LROPoller that returns Configuration. The Configuration is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Configuration] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + configuration_name=configuration_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.Configuration, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.Configuration].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.Configuration]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> ItemPaged["_models.Configuration"]: + """Lists all configurations (also known as server parameters) of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of Configuration + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.Configuration] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Configuration]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_configurations_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Configuration], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class DatabasesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`databases` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get(self, resource_group_name: str, server_name: str, database_name: str, **kwargs: Any) -> _models.Database: + """Gets information about an existing database. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param database_name: Name of the database (case-sensitive). Exact database names can be + retrieved by getting the list of all existing databases in a server. Required. + :type database_name: str + :return: Database. The Database is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.Database + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Database] = kwargs.pop("cls", None) + + _request = build_databases_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + database_name=database_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Database, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _create_initial( + self, + resource_group_name: str, + server_name: str, + database_name: str, + parameters: Union[_models.Database, JSON, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_databases_create_request( + resource_group_name=resource_group_name, + server_name=server_name, + database_name=database_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_create( + self, + resource_group_name: str, + server_name: str, + database_name: str, + parameters: _models.Database, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Database]: + """Creates a new database. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param database_name: Name of the database (case-sensitive). Exact database names can be + retrieved by getting the list of all existing databases in a server. Required. + :type database_name: str + :param parameters: Parameters required to create a new database. Required. + :type parameters: ~azure.mgmt.postgresql.models.Database + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Database. The Database is compatible with + MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Database] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create( + self, + resource_group_name: str, + server_name: str, + database_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Database]: + """Creates a new database. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param database_name: Name of the database (case-sensitive). Exact database names can be + retrieved by getting the list of all existing databases in a server. Required. + :type database_name: str + :param parameters: Parameters required to create a new database. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Database. The Database is compatible with + MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Database] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create( + self, + resource_group_name: str, + server_name: str, + database_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Database]: + """Creates a new database. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param database_name: Name of the database (case-sensitive). Exact database names can be + retrieved by getting the list of all existing databases in a server. Required. + :type database_name: str + :param parameters: Parameters required to create a new database. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns Database. The Database is compatible with + MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Database] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create( + self, + resource_group_name: str, + server_name: str, + database_name: str, + parameters: Union[_models.Database, JSON, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.Database]: + """Creates a new database. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param database_name: Name of the database (case-sensitive). Exact database names can be + retrieved by getting the list of all existing databases in a server. Required. + :type database_name: str + :param parameters: Parameters required to create a new database. Is one of the following types: + Database, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.Database or JSON or IO[bytes] + :return: An instance of LROPoller that returns Database. The Database is compatible with + MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.Database] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Database] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_initial( + resource_group_name=resource_group_name, + server_name=server_name, + database_name=database_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.Database, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.Database].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.Database]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _delete_initial( + self, resource_group_name: str, server_name: str, database_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_databases_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + database_name=database_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_delete( + self, resource_group_name: str, server_name: str, database_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Deletes an existing database. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param database_name: Name of the database (case-sensitive). Exact database names can be + retrieved by getting the list of all existing databases in a server. Required. + :type database_name: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + database_name=database_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> ItemPaged["_models.Database"]: + """Lists all databases in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of Database + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.Database] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Database]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_databases_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Database], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class FirewallRulesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`firewall_rules` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get( + self, resource_group_name: str, server_name: str, firewall_rule_name: str, **kwargs: Any + ) -> _models.FirewallRule: + """Gets information about a firewall rule in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param firewall_rule_name: Name of the firewall rule. Required. + :type firewall_rule_name: str + :return: FirewallRule. The FirewallRule is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.FirewallRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FirewallRule] = kwargs.pop("cls", None) + + _request = build_firewall_rules_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + firewall_rule_name=firewall_rule_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FirewallRule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _create_or_update_initial( + self, + resource_group_name: str, + server_name: str, + firewall_rule_name: str, + parameters: Union[_models.FirewallRule, JSON, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_firewall_rules_create_or_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + firewall_rule_name=firewall_rule_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + firewall_rule_name: str, + parameters: _models.FirewallRule, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.FirewallRule]: + """Creates a new firewall rule or updates an existing firewall rule. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param firewall_rule_name: Name of the firewall rule. Required. + :type firewall_rule_name: str + :param parameters: Parameters required for creating or updating a firewall rule. Required. + :type parameters: ~azure.mgmt.postgresql.models.FirewallRule + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns FirewallRule. The FirewallRule is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.FirewallRule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + firewall_rule_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.FirewallRule]: + """Creates a new firewall rule or updates an existing firewall rule. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param firewall_rule_name: Name of the firewall rule. Required. + :type firewall_rule_name: str + :param parameters: Parameters required for creating or updating a firewall rule. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns FirewallRule. The FirewallRule is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.FirewallRule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + firewall_rule_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.FirewallRule]: + """Creates a new firewall rule or updates an existing firewall rule. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param firewall_rule_name: Name of the firewall rule. Required. + :type firewall_rule_name: str + :param parameters: Parameters required for creating or updating a firewall rule. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns FirewallRule. The FirewallRule is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.FirewallRule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + firewall_rule_name: str, + parameters: Union[_models.FirewallRule, JSON, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.FirewallRule]: + """Creates a new firewall rule or updates an existing firewall rule. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param firewall_rule_name: Name of the firewall rule. Required. + :type firewall_rule_name: str + :param parameters: Parameters required for creating or updating a firewall rule. Is one of the + following types: FirewallRule, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.FirewallRule or JSON or IO[bytes] + :return: An instance of LROPoller that returns FirewallRule. The FirewallRule is compatible + with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.FirewallRule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.FirewallRule] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + firewall_rule_name=firewall_rule_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.FirewallRule, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.FirewallRule].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.FirewallRule]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _delete_initial( + self, resource_group_name: str, server_name: str, firewall_rule_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_firewall_rules_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + firewall_rule_name=firewall_rule_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_delete( + self, resource_group_name: str, server_name: str, firewall_rule_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Deletes an existing firewall rule. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param firewall_rule_name: Name of the firewall rule. Required. + :type firewall_rule_name: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + firewall_rule_name=firewall_rule_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> ItemPaged["_models.FirewallRule"]: + """Lists information about all firewall rules in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of FirewallRule + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.FirewallRule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.FirewallRule]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_firewall_rules_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.FirewallRule], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class PrivateEndpointConnectionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`private_endpoint_connections` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get( + self, resource_group_name: str, server_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> _models.PrivateEndpointConnection: + """Gets a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection associated + with the Azure resource. Required. + :type private_endpoint_connection_name: str + :return: PrivateEndpointConnection. The PrivateEndpointConnection is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.PrivateEndpointConnection + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + + _request = build_private_endpoint_connections_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PrivateEndpointConnection, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _update_initial( + self, + resource_group_name: str, + server_name: str, + private_endpoint_connection_name: str, + parameters: Union[_models.PrivateEndpointConnection, JSON, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_private_endpoint_connections_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_update( + self, + resource_group_name: str, + server_name: str, + private_endpoint_connection_name: str, + parameters: _models.PrivateEndpointConnection, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.PrivateEndpointConnection]: + """Approves or rejects a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection associated + with the Azure resource. Required. + :type private_endpoint_connection_name: str + :param parameters: Parameters required to update a private endpoint connection. Required. + :type parameters: ~azure.mgmt.postgresql.models.PrivateEndpointConnection + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns PrivateEndpointConnection. The + PrivateEndpointConnection is compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + server_name: str, + private_endpoint_connection_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.PrivateEndpointConnection]: + """Approves or rejects a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection associated + with the Azure resource. Required. + :type private_endpoint_connection_name: str + :param parameters: Parameters required to update a private endpoint connection. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns PrivateEndpointConnection. The + PrivateEndpointConnection is compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + server_name: str, + private_endpoint_connection_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.PrivateEndpointConnection]: + """Approves or rejects a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection associated + with the Azure resource. Required. + :type private_endpoint_connection_name: str + :param parameters: Parameters required to update a private endpoint connection. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns PrivateEndpointConnection. The + PrivateEndpointConnection is compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_update( + self, + resource_group_name: str, + server_name: str, + private_endpoint_connection_name: str, + parameters: Union[_models.PrivateEndpointConnection, JSON, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.PrivateEndpointConnection]: + """Approves or rejects a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection associated + with the Azure resource. Required. + :type private_endpoint_connection_name: str + :param parameters: Parameters required to update a private endpoint connection. Is one of the + following types: PrivateEndpointConnection, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.PrivateEndpointConnection or JSON or IO[bytes] + :return: An instance of LROPoller that returns PrivateEndpointConnection. The + PrivateEndpointConnection is compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + private_endpoint_connection_name=private_endpoint_connection_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.PrivateEndpointConnection, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.PrivateEndpointConnection].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.PrivateEndpointConnection]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _delete_initial( + self, resource_group_name: str, server_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_private_endpoint_connections_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_delete( + self, resource_group_name: str, server_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Deletes a private endpoint connection. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection associated + with the Azure resource. Required. + :type private_endpoint_connection_name: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + private_endpoint_connection_name=private_endpoint_connection_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> ItemPaged["_models.PrivateEndpointConnection"]: + """Lists all private endpoint connections on a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of PrivateEndpointConnection + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.PrivateEndpointConnection]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_private_endpoint_connections_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.PrivateEndpointConnection], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class PrivateLinkResourcesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`private_link_resources` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get( + self, resource_group_name: str, server_name: str, group_name: str, **kwargs: Any + ) -> _models.PrivateLinkResource: + """Gets a private link resource for PostgreSQL server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param group_name: The name of the private link resource. Required. + :type group_name: str + :return: PrivateLinkResource. The PrivateLinkResource is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.PrivateLinkResource + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.PrivateLinkResource] = kwargs.pop("cls", None) + + _request = build_private_link_resources_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + group_name=group_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.PrivateLinkResource, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> ItemPaged["_models.PrivateLinkResource"]: + """Gets the private link resources for PostgreSQL server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of PrivateLinkResource + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.PrivateLinkResource] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.PrivateLinkResource]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_private_link_resources_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.PrivateLinkResource], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class VirtualEndpointsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`virtual_endpoints` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get( + self, resource_group_name: str, server_name: str, virtual_endpoint_name: str, **kwargs: Any + ) -> _models.VirtualEndpoint: + """Gets information about a pair of virtual endpoints. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :return: VirtualEndpoint. The VirtualEndpoint is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.VirtualEndpoint + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VirtualEndpoint] = kwargs.pop("cls", None) + + _request = build_virtual_endpoints_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VirtualEndpoint, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _create_initial( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: Union[_models.VirtualEndpoint, JSON, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_virtual_endpoints_create_request( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_create( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: _models.VirtualEndpoint, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.VirtualEndpoint]: + """Creates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to create or update a pair of virtual endpoints. + Required. + :type parameters: ~azure.mgmt.postgresql.models.VirtualEndpoint + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.VirtualEndpoint]: + """Creates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to create or update a pair of virtual endpoints. + Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.VirtualEndpoint]: + """Creates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to create or update a pair of virtual endpoints. + Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: Union[_models.VirtualEndpoint, JSON, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.VirtualEndpoint]: + """Creates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to create or update a pair of virtual endpoints. Is one + of the following types: VirtualEndpoint, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.VirtualEndpoint or JSON or IO[bytes] + :return: An instance of LROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VirtualEndpoint] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_initial( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.VirtualEndpoint, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.VirtualEndpoint].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.VirtualEndpoint]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _update_initial( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: Union[_models.VirtualEndpointResourceForPatch, JSON, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_virtual_endpoints_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_update( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: _models.VirtualEndpointResourceForPatch, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.VirtualEndpoint]: + """Updates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to update a pair of virtual endpoints. Required. + :type parameters: ~azure.mgmt.postgresql.models.VirtualEndpointResourceForPatch + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.VirtualEndpoint]: + """Updates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to update a pair of virtual endpoints. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.VirtualEndpoint]: + """Updates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to update a pair of virtual endpoints. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_update( + self, + resource_group_name: str, + server_name: str, + virtual_endpoint_name: str, + parameters: Union[_models.VirtualEndpointResourceForPatch, JSON, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.VirtualEndpoint]: + """Updates a pair of virtual endpoints for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :param parameters: Parameters required to update a pair of virtual endpoints. Is one of the + following types: VirtualEndpointResourceForPatch, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.VirtualEndpointResourceForPatch or JSON or + IO[bytes] + :return: An instance of LROPoller that returns VirtualEndpoint. The VirtualEndpoint is + compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VirtualEndpoint] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.VirtualEndpoint, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.VirtualEndpoint].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.VirtualEndpoint]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _delete_initial( + self, resource_group_name: str, server_name: str, virtual_endpoint_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_virtual_endpoints_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_delete( + self, resource_group_name: str, server_name: str, virtual_endpoint_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Deletes a pair of virtual endpoints. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param virtual_endpoint_name: Base name of the virtual endpoints. Required. + :type virtual_endpoint_name: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + virtual_endpoint_name=virtual_endpoint_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> ItemPaged["_models.VirtualEndpoint"]: + """Lists pair of virtual endpoints associated to a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of VirtualEndpoint + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.VirtualEndpoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.VirtualEndpoint]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_virtual_endpoints_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.VirtualEndpoint], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class AdministratorsMicrosoftEntraOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`administrators_microsoft_entra` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get( + self, resource_group_name: str, server_name: str, object_id: str, **kwargs: Any + ) -> _models.AdministratorMicrosoftEntra: + """Gets information about a server administrator associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param object_id: Object identifier of the Microsoft Entra principal. Required. + :type object_id: str + :return: AdministratorMicrosoftEntra. The AdministratorMicrosoftEntra is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.AdministratorMicrosoftEntra + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AdministratorMicrosoftEntra] = kwargs.pop("cls", None) + + _request = build_administrators_microsoft_entra_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + object_id=object_id, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AdministratorMicrosoftEntra, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _create_or_update_initial( + self, + resource_group_name: str, + server_name: str, + object_id: str, + parameters: Union[_models.AdministratorMicrosoftEntraAdd, JSON, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_administrators_microsoft_entra_create_or_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + object_id=object_id, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + object_id: str, + parameters: _models.AdministratorMicrosoftEntraAdd, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AdministratorMicrosoftEntra]: + """Creates a new server administrator associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param object_id: Object identifier of the Microsoft Entra principal. Required. + :type object_id: str + :param parameters: Required parameters for adding a server administrator associated to a + Microsoft Entra principal. Required. + :type parameters: ~azure.mgmt.postgresql.models.AdministratorMicrosoftEntraAdd + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns AdministratorMicrosoftEntra. The + AdministratorMicrosoftEntra is compatible with MutableMapping + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.AdministratorMicrosoftEntra] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + object_id: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AdministratorMicrosoftEntra]: + """Creates a new server administrator associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param object_id: Object identifier of the Microsoft Entra principal. Required. + :type object_id: str + :param parameters: Required parameters for adding a server administrator associated to a + Microsoft Entra principal. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns AdministratorMicrosoftEntra. The + AdministratorMicrosoftEntra is compatible with MutableMapping + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.AdministratorMicrosoftEntra] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + object_id: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AdministratorMicrosoftEntra]: + """Creates a new server administrator associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param object_id: Object identifier of the Microsoft Entra principal. Required. + :type object_id: str + :param parameters: Required parameters for adding a server administrator associated to a + Microsoft Entra principal. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns AdministratorMicrosoftEntra. The + AdministratorMicrosoftEntra is compatible with MutableMapping + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.AdministratorMicrosoftEntra] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + object_id: str, + parameters: Union[_models.AdministratorMicrosoftEntraAdd, JSON, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.AdministratorMicrosoftEntra]: + """Creates a new server administrator associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param object_id: Object identifier of the Microsoft Entra principal. Required. + :type object_id: str + :param parameters: Required parameters for adding a server administrator associated to a + Microsoft Entra principal. Is one of the following types: AdministratorMicrosoftEntraAdd, JSON, + IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.AdministratorMicrosoftEntraAdd or JSON or + IO[bytes] + :return: An instance of LROPoller that returns AdministratorMicrosoftEntra. The + AdministratorMicrosoftEntra is compatible with MutableMapping + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.AdministratorMicrosoftEntra] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AdministratorMicrosoftEntra] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + object_id=object_id, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.AdministratorMicrosoftEntra, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.AdministratorMicrosoftEntra].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.AdministratorMicrosoftEntra]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _delete_initial( + self, resource_group_name: str, server_name: str, object_id: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_administrators_microsoft_entra_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + object_id=object_id, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_delete( + self, resource_group_name: str, server_name: str, object_id: str, **kwargs: Any + ) -> LROPoller[None]: + """Deletes an existing server administrator associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param object_id: Object identifier of the Microsoft Entra principal. Required. + :type object_id: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + object_id=object_id, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> ItemPaged["_models.AdministratorMicrosoftEntra"]: + """List all server administrators associated to a Microsoft Entra principal. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of AdministratorMicrosoftEntra + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.AdministratorMicrosoftEntra] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.AdministratorMicrosoftEntra]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_administrators_microsoft_entra_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.AdministratorMicrosoftEntra], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class CapabilitiesByServerOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`capabilities_by_server` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list(self, resource_group_name: str, server_name: str, **kwargs: Any) -> ItemPaged["_models.Capability"]: + """Lists the capabilities available for a given server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of Capability + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.Capability] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Capability]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_capabilities_by_server_list_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Capability], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class CapturedLogsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`captured_logs` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> ItemPaged["_models.CapturedLog"]: + """Lists all captured logs for download in a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of CapturedLog + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.CapturedLog] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.CapturedLog]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_captured_logs_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.CapturedLog], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class BackupsLongTermRetentionOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`backups_long_term_retention` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + def check_prerequisites( + self, + resource_group_name: str, + server_name: str, + parameters: _models.LtrPreBackupRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.LtrPreBackupResponse: + """Performs all checks required for a long term retention backup operation to succeed. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Required. + :type parameters: ~azure.mgmt.postgresql.models.LtrPreBackupRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: LtrPreBackupResponse. The LtrPreBackupResponse is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.LtrPreBackupResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def check_prerequisites( + self, + resource_group_name: str, + server_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.LtrPreBackupResponse: + """Performs all checks required for a long term retention backup operation to succeed. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: LtrPreBackupResponse. The LtrPreBackupResponse is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.LtrPreBackupResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def check_prerequisites( + self, + resource_group_name: str, + server_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.LtrPreBackupResponse: + """Performs all checks required for a long term retention backup operation to succeed. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: LtrPreBackupResponse. The LtrPreBackupResponse is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.LtrPreBackupResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def check_prerequisites( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.LtrPreBackupRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.LtrPreBackupResponse: + """Performs all checks required for a long term retention backup operation to succeed. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Is one of the following types: + LtrPreBackupRequest, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.LtrPreBackupRequest or JSON or IO[bytes] + :return: LtrPreBackupResponse. The LtrPreBackupResponse is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.LtrPreBackupResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.LtrPreBackupResponse] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_backups_long_term_retention_check_prerequisites_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.LtrPreBackupResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + def _start_initial( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.BackupsLongTermRetentionRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_backups_long_term_retention_start_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 200: + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_start( + self, + resource_group_name: str, + server_name: str, + parameters: _models.BackupsLongTermRetentionRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.BackupsLongTermRetentionResponse]: + """Initiates a long term retention backup. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Required. + :type parameters: ~azure.mgmt.postgresql.models.BackupsLongTermRetentionRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns BackupsLongTermRetentionResponse. The + BackupsLongTermRetentionResponse is compatible with MutableMapping + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.BackupsLongTermRetentionResponse] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_start( + self, + resource_group_name: str, + server_name: str, + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.BackupsLongTermRetentionResponse]: + """Initiates a long term retention backup. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns BackupsLongTermRetentionResponse. The + BackupsLongTermRetentionResponse is compatible with MutableMapping + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.BackupsLongTermRetentionResponse] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_start( + self, + resource_group_name: str, + server_name: str, + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.BackupsLongTermRetentionResponse]: + """Initiates a long term retention backup. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns BackupsLongTermRetentionResponse. The + BackupsLongTermRetentionResponse is compatible with MutableMapping + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.BackupsLongTermRetentionResponse] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_start( + self, + resource_group_name: str, + server_name: str, + parameters: Union[_models.BackupsLongTermRetentionRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.BackupsLongTermRetentionResponse]: + """Initiates a long term retention backup. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param parameters: Request body for operation. Is one of the following types: + BackupsLongTermRetentionRequest, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.BackupsLongTermRetentionRequest or JSON or + IO[bytes] + :return: An instance of LROPoller that returns BackupsLongTermRetentionResponse. The + BackupsLongTermRetentionResponse is compatible with MutableMapping + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.BackupsLongTermRetentionResponse] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.BackupsLongTermRetentionResponse] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._start_initial( + resource_group_name=resource_group_name, + server_name=server_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + + deserialized = _deserialize(_models.BackupsLongTermRetentionResponse, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.BackupsLongTermRetentionResponse].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.BackupsLongTermRetentionResponse]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + @distributed_trace + def get( + self, resource_group_name: str, server_name: str, backup_name: str, **kwargs: Any + ) -> _models.BackupsLongTermRetentionOperation: + """Gets the results of a long retention backup operation for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param backup_name: The name of the backup. Required. + :type backup_name: str + :return: BackupsLongTermRetentionOperation. The BackupsLongTermRetentionOperation is compatible + with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.BackupsLongTermRetentionOperation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BackupsLongTermRetentionOperation] = kwargs.pop("cls", None) + + _request = build_backups_long_term_retention_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + backup_name=backup_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BackupsLongTermRetentionOperation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> ItemPaged["_models.BackupsLongTermRetentionOperation"]: + """Lists the results of the long term retention backup operations for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of BackupsLongTermRetentionOperation + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.BackupsLongTermRetentionOperation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BackupsLongTermRetentionOperation]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_backups_long_term_retention_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BackupsLongTermRetentionOperation], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class ReplicasOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`replicas` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list_by_server(self, resource_group_name: str, server_name: str, **kwargs: Any) -> ItemPaged["_models.Server"]: + """Lists all read replicas of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of Server + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.Server] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Server]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_replicas_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Server], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class AdvancedThreatProtectionSettingsOperations: # pylint: disable=name-too-long + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`advanced_threat_protection_settings` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get( + self, + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + **kwargs: Any + ) -> _models.AdvancedThreatProtectionSettingsModel: + """Gets state of advanced threat protection settings for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param threat_protection_name: Name of the advanced threat protection settings. "Default" + Required. + :type threat_protection_name: str or ~azure.mgmt.postgresql.models.ThreatProtectionName + :return: AdvancedThreatProtectionSettingsModel. The AdvancedThreatProtectionSettingsModel is + compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AdvancedThreatProtectionSettingsModel] = kwargs.pop("cls", None) + + _request = build_advanced_threat_protection_settings_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + threat_protection_name=threat_protection_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AdvancedThreatProtectionSettingsModel, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> ItemPaged["_models.AdvancedThreatProtectionSettingsModel"]: + """Lists state of advanced threat protection settings for a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of AdvancedThreatProtectionSettingsModel + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.AdvancedThreatProtectionSettingsModel]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_advanced_threat_protection_settings_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize( + List[_models.AdvancedThreatProtectionSettingsModel], deserialized.get("value", []) + ) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class ServerThreatProtectionSettingsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`server_threat_protection_settings` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + def _create_or_update_initial( + self, + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + parameters: Union[_models.AdvancedThreatProtectionSettingsModel, JSON, IO[bytes]], + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_server_threat_protection_settings_create_or_update_request( + resource_group_name=resource_group_name, + server_name=server_name, + threat_protection_name=threat_protection_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + parameters: _models.AdvancedThreatProtectionSettingsModel, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AdvancedThreatProtectionSettingsModel]: + """Creates or updates a server's Advanced Threat Protection settings. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param threat_protection_name: Name of the advanced threat protection settings. "Default" + Required. + :type threat_protection_name: str or ~azure.mgmt.postgresql.models.ThreatProtectionName + :param parameters: The Advanced Threat Protection state for the server. Required. + :type parameters: ~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns AdvancedThreatProtectionSettingsModel. The + AdvancedThreatProtectionSettingsModel is compatible with MutableMapping + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + parameters: JSON, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AdvancedThreatProtectionSettingsModel]: + """Creates or updates a server's Advanced Threat Protection settings. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param threat_protection_name: Name of the advanced threat protection settings. "Default" + Required. + :type threat_protection_name: str or ~azure.mgmt.postgresql.models.ThreatProtectionName + :param parameters: The Advanced Threat Protection state for the server. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns AdvancedThreatProtectionSettingsModel. The + AdvancedThreatProtectionSettingsModel is compatible with MutableMapping + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + parameters: IO[bytes], + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AdvancedThreatProtectionSettingsModel]: + """Creates or updates a server's Advanced Threat Protection settings. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param threat_protection_name: Name of the advanced threat protection settings. "Default" + Required. + :type threat_protection_name: str or ~azure.mgmt.postgresql.models.ThreatProtectionName + :param parameters: The Advanced Threat Protection state for the server. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns AdvancedThreatProtectionSettingsModel. The + AdvancedThreatProtectionSettingsModel is compatible with MutableMapping + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create_or_update( + self, + resource_group_name: str, + server_name: str, + threat_protection_name: Union[str, _models.ThreatProtectionName], + parameters: Union[_models.AdvancedThreatProtectionSettingsModel, JSON, IO[bytes]], + **kwargs: Any + ) -> LROPoller[_models.AdvancedThreatProtectionSettingsModel]: + """Creates or updates a server's Advanced Threat Protection settings. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param threat_protection_name: Name of the advanced threat protection settings. "Default" + Required. + :type threat_protection_name: str or ~azure.mgmt.postgresql.models.ThreatProtectionName + :param parameters: The Advanced Threat Protection state for the server. Is one of the following + types: AdvancedThreatProtectionSettingsModel, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel or JSON + or IO[bytes] + :return: An instance of LROPoller that returns AdvancedThreatProtectionSettingsModel. The + AdvancedThreatProtectionSettingsModel is compatible with MutableMapping + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.AdvancedThreatProtectionSettingsModel] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AdvancedThreatProtectionSettingsModel] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + server_name=server_name, + threat_protection_name=threat_protection_name, + parameters=parameters, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.AdvancedThreatProtectionSettingsModel, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.AdvancedThreatProtectionSettingsModel].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.AdvancedThreatProtectionSettingsModel]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + +class BackupsAutomaticAndOnDemandOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`backups_automatic_and_on_demand` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get( + self, resource_group_name: str, server_name: str, backup_name: str, **kwargs: Any + ) -> _models.BackupAutomaticAndOnDemand: + """Gets information of an on demand backup, given its name. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param backup_name: Name of the backup. Required. + :type backup_name: str + :return: BackupAutomaticAndOnDemand. The BackupAutomaticAndOnDemand is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.BackupAutomaticAndOnDemand + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BackupAutomaticAndOnDemand] = kwargs.pop("cls", None) + + _request = build_backups_automatic_and_on_demand_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + backup_name=backup_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.BackupAutomaticAndOnDemand, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _create_initial( + self, resource_group_name: str, server_name: str, backup_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_backups_automatic_and_on_demand_create_request( + resource_group_name=resource_group_name, + server_name=server_name, + backup_name=backup_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_create( + self, resource_group_name: str, server_name: str, backup_name: str, **kwargs: Any + ) -> LROPoller[_models.BackupAutomaticAndOnDemand]: + """Creates an on demand backup of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param backup_name: Name of the backup. Required. + :type backup_name: str + :return: An instance of LROPoller that returns BackupAutomaticAndOnDemand. The + BackupAutomaticAndOnDemand is compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.postgresql.models.BackupAutomaticAndOnDemand] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.BackupAutomaticAndOnDemand] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_initial( + resource_group_name=resource_group_name, + server_name=server_name, + backup_name=backup_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = _deserialize(_models.BackupAutomaticAndOnDemand, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.BackupAutomaticAndOnDemand].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.BackupAutomaticAndOnDemand]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + def _delete_initial( + self, resource_group_name: str, server_name: str, backup_name: str, **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_backups_automatic_and_on_demand_delete_request( + resource_group_name=resource_group_name, + server_name=server_name, + backup_name=backup_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202, 204]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + response_headers = {} + if response.status_code == 202: + response_headers["Azure-AsyncOperation"] = self._deserialize( + "str", response.headers.get("Azure-AsyncOperation") + ) + response_headers["Location"] = self._deserialize("str", response.headers.get("Location")) + response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_delete( + self, resource_group_name: str, server_name: str, backup_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Deletes a specific backup, given its name. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param backup_name: Name of the backup. Required. + :type backup_name: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + server_name=server_name, + backup_name=backup_name, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> ItemPaged["_models.BackupAutomaticAndOnDemand"]: + """Lists all available backups of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of BackupAutomaticAndOnDemand + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.BackupAutomaticAndOnDemand] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.BackupAutomaticAndOnDemand]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_backups_automatic_and_on_demand_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.BackupAutomaticAndOnDemand], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class TuningOptionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`tuning_options` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get( + self, + resource_group_name: str, + server_name: str, + tuning_option: Union[str, _models.TuningOptionParameterEnum], + **kwargs: Any + ) -> _models.TuningOptions: + """Gets the tuning options of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param tuning_option: The name of the tuning option. Known values are: "index" and "table". + Required. + :type tuning_option: str or ~azure.mgmt.postgresql.models.TuningOptionParameterEnum + :return: TuningOptions. The TuningOptions is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.TuningOptions + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.TuningOptions] = kwargs.pop("cls", None) + + _request = build_tuning_options_get_request( + resource_group_name=resource_group_name, + server_name=server_name, + tuning_option=tuning_option, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TuningOptions, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_by_server( + self, resource_group_name: str, server_name: str, **kwargs: Any + ) -> ItemPaged["_models.TuningOptions"]: + """Lists the tuning options of a server. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :return: An iterator like instance of TuningOptions + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.TuningOptions] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.TuningOptions]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_tuning_options_list_by_server_request( + resource_group_name=resource_group_name, + server_name=server_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.TuningOptions], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def list_recommendations( + self, + resource_group_name: str, + server_name: str, + tuning_option: Union[str, _models.TuningOptionParameterEnum], + *, + recommendation_type: Optional[Union[str, _models.RecommendationTypeParameterEnum]] = None, + **kwargs: Any + ) -> ItemPaged["_models.ObjectRecommendation"]: + """Lists available object recommendations. + + :param resource_group_name: The name of the resource group. The name is case insensitive. + Required. + :type resource_group_name: str + :param server_name: The name of the server. Required. + :type server_name: str + :param tuning_option: The name of the tuning option. Known values are: "index" and "table". + Required. + :type tuning_option: str or ~azure.mgmt.postgresql.models.TuningOptionParameterEnum + :keyword recommendation_type: Recommendations list filter. Retrieves recommendations based on + type. Known values are: "CreateIndex", "DropIndex", "ReIndex", and "AnalyzeTable". Default + value is None. + :paramtype recommendation_type: str or + ~azure.mgmt.postgresql.models.RecommendationTypeParameterEnum + :return: An iterator like instance of ObjectRecommendation + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.ObjectRecommendation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.ObjectRecommendation]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_tuning_options_list_recommendations_request( + resource_group_name=resource_group_name, + server_name=server_name, + tuning_option=tuning_option, + subscription_id=self._config.subscription_id, + recommendation_type=recommendation_type, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.ObjectRecommendation], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class CapabilitiesByLocationOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`capabilities_by_location` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list(self, location_name: str, **kwargs: Any) -> ItemPaged["_models.Capability"]: + """Lists the capabilities available in a given location for a specific subscription. + + :param location_name: The name of the location. Required. + :type location_name: str + :return: An iterator like instance of Capability + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.Capability] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.Capability]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_capabilities_by_location_list_request( + location_name=location_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Capability], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class NameAvailabilityOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`name_availability` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + def check_globally( + self, parameters: _models.CheckNameAvailabilityRequest, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Checks the validity and availability of the given name, to assign it to a new server or to use + it as the base name of a new pair of virtual endpoints. + + :param parameters: The request body. Required. + :type parameters: ~azure.mgmt.postgresql.models.CheckNameAvailabilityRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def check_globally( + self, parameters: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Checks the validity and availability of the given name, to assign it to a new server or to use + it as the base name of a new pair of virtual endpoints. + + :param parameters: The request body. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def check_globally( + self, parameters: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Checks the validity and availability of the given name, to assign it to a new server or to use + it as the base name of a new pair of virtual endpoints. + + :param parameters: The request body. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def check_globally( + self, parameters: Union[_models.CheckNameAvailabilityRequest, JSON, IO[bytes]], **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Checks the validity and availability of the given name, to assign it to a new server or to use + it as the base name of a new pair of virtual endpoints. + + :param parameters: The request body. Is one of the following types: + CheckNameAvailabilityRequest, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.CheckNameAvailabilityRequest or JSON or + IO[bytes] + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.NameAvailabilityModel] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_name_availability_check_globally_request( + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.NameAvailabilityModel, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def check_with_location( + self, + location_name: str, + parameters: _models.CheckNameAvailabilityRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Check the availability of name for resource. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Required. + :type parameters: ~azure.mgmt.postgresql.models.CheckNameAvailabilityRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def check_with_location( + self, location_name: str, parameters: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Check the availability of name for resource. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def check_with_location( + self, location_name: str, parameters: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Check the availability of name for resource. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def check_with_location( + self, + location_name: str, + parameters: Union[_models.CheckNameAvailabilityRequest, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.NameAvailabilityModel: + """Check the availability of name for resource. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Is one of the following types: + CheckNameAvailabilityRequest, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.CheckNameAvailabilityRequest or JSON or + IO[bytes] + :return: NameAvailabilityModel. The NameAvailabilityModel is compatible with MutableMapping + :rtype: ~azure.mgmt.postgresql.models.NameAvailabilityModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.NameAvailabilityModel] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_name_availability_check_with_location_request( + location_name=location_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.NameAvailabilityModel, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class PrivateDnsZoneSuffixOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`private_dns_zone_suffix` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get(self, **kwargs: Any) -> str: + """Gets the private DNS zone suffix. + + :return: str + :rtype: str + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[str] = kwargs.pop("cls", None) + + _request = build_private_dns_zone_suffix_get_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(str, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class QuotaUsagesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`quota_usages` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list(self, location_name: str, **kwargs: Any) -> ItemPaged["_models.QuotaUsage"]: + """Get quota usages at specified location in a given subscription. + + :param location_name: The name of the location. Required. + :type location_name: str + :return: An iterator like instance of QuotaUsage + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.postgresql.models.QuotaUsage] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.QuotaUsage]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_quota_usages_list_request( + location_name=location_name, + subscription_id=self._config.subscription_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.base_url", self._config.base_url, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.QuotaUsage], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + +class VirtualNetworkSubnetUsageOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.postgresql.PostgreSQLManagementClient`'s + :attr:`virtual_network_subnet_usage` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: PostgreSQLManagementClientConfiguration = ( + input_args.pop(0) if input_args else kwargs.pop("config") + ) + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + def list( + self, + location_name: str, + parameters: _models.VirtualNetworkSubnetUsageParameter, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.VirtualNetworkSubnetUsageModel: + """Lists the virtual network subnet usage for a given virtual network. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Required. + :type parameters: ~azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageParameter + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VirtualNetworkSubnetUsageModel. The VirtualNetworkSubnetUsageModel is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def list( + self, location_name: str, parameters: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VirtualNetworkSubnetUsageModel: + """Lists the virtual network subnet usage for a given virtual network. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Required. + :type parameters: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VirtualNetworkSubnetUsageModel. The VirtualNetworkSubnetUsageModel is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def list( + self, location_name: str, parameters: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VirtualNetworkSubnetUsageModel: + """Lists the virtual network subnet usage for a given virtual network. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Required. + :type parameters: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VirtualNetworkSubnetUsageModel. The VirtualNetworkSubnetUsageModel is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def list( + self, + location_name: str, + parameters: Union[_models.VirtualNetworkSubnetUsageParameter, JSON, IO[bytes]], + **kwargs: Any + ) -> _models.VirtualNetworkSubnetUsageModel: + """Lists the virtual network subnet usage for a given virtual network. + + :param location_name: The name of the location. Required. + :type location_name: str + :param parameters: The request body. Is one of the following types: + VirtualNetworkSubnetUsageParameter, JSON, IO[bytes] Required. + :type parameters: ~azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageParameter or JSON or + IO[bytes] + :return: VirtualNetworkSubnetUsageModel. The VirtualNetworkSubnetUsageModel is compatible with + MutableMapping + :rtype: ~azure.mgmt.postgresql.models.VirtualNetworkSubnetUsageModel + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VirtualNetworkSubnetUsageModel] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(parameters, (IOBase, bytes)): + _content = parameters + else: + _content = json.dumps(parameters, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_virtual_network_subnet_usage_list_request( + location_name=location_name, + subscription_id=self._config.subscription_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.base_url", self._config.base_url, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize( + _models.ErrorResponse, + response, + ) + raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VirtualNetworkSubnetUsageModel, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/operations/_patch.py b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/operations/_patch.py new file mode 100644 index 000000000000..87676c65a8f0 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/operations/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" + + +__all__: list[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/py.typed b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/azure/mgmt/postgresql/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/postgresql/azure-mgmt-postgresql/dev_requirements.txt b/sdk/postgresql/azure-mgmt-postgresql/dev_requirements.txt new file mode 100644 index 000000000000..ece056fe0984 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/dev_requirements.txt @@ -0,0 +1,5 @@ +-e ../../../eng/tools/azure-sdk-tools +../../core/azure-core +../../identity/azure-identity +../../core/azure-mgmt-core +aiohttp \ No newline at end of file diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_add.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_add.py new file mode 100644 index 000000000000..9939ac57cf7f --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_add.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python administrators_microsoft_entra_add.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.administrators_microsoft_entra.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + object_id="oooooooo-oooo-oooo-oooo-oooooooooooo", + parameters={ + "properties": { + "principalName": "exampleuser@contoso.com", + "principalType": "User", + "tenantId": "tttttttt-tttt-tttt-tttt-tttttttttttt", + } + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/AdministratorsMicrosoftEntraAdd.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_delete.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_delete.py new file mode 100644 index 000000000000..d4c2dca24c76 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_delete.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python administrators_microsoft_entra_delete.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + client.administrators_microsoft_entra.begin_delete( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + object_id="oooooooo-oooo-oooo-oooo-oooooooooooo", + ).result() + + +# x-ms-original-file: 2026-01-01-preview/AdministratorsMicrosoftEntraDelete.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_get.py new file mode 100644 index 000000000000..6ecc494ea9a7 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_get.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python administrators_microsoft_entra_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.administrators_microsoft_entra.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + object_id="oooooooo-oooo-oooo-oooo-oooooooooooo", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/AdministratorsMicrosoftEntraGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_list_by_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_list_by_server.py new file mode 100644 index 000000000000..c76a0470d1d9 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/administrators_microsoft_entra_list_by_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python administrators_microsoft_entra_list_by_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.administrators_microsoft_entra.list_by_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/AdministratorsMicrosoftEntraListByServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/advanced_threat_protection_settings_create_or_update.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/advanced_threat_protection_settings_create_or_update.py new file mode 100644 index 000000000000..70359db69b4e --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/advanced_threat_protection_settings_create_or_update.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python advanced_threat_protection_settings_create_or_update.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.server_threat_protection_settings.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + threat_protection_name="Default", + parameters={"properties": {"state": "Enabled"}}, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/AdvancedThreatProtectionSettingsCreateOrUpdate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/advanced_threat_protection_settings_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/advanced_threat_protection_settings_get.py new file mode 100644 index 000000000000..5d88cc6115fe --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/advanced_threat_protection_settings_get.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python advanced_threat_protection_settings_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.advanced_threat_protection_settings.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + threat_protection_name="Default", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/AdvancedThreatProtectionSettingsGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/advanced_threat_protection_settings_list_by_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/advanced_threat_protection_settings_list_by_server.py new file mode 100644 index 000000000000..508ac238ae62 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/advanced_threat_protection_settings_list_by_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python advanced_threat_protection_settings_list_by_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.advanced_threat_protection_settings.list_by_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/AdvancedThreatProtectionSettingsListByServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_create.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_create.py new file mode 100644 index 000000000000..3a8bf726ba73 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_create.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python backups_automatic_and_on_demand_create.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.backups_automatic_and_on_demand.begin_create( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + backup_name="ondemandbackup-20250601T183022", + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/BackupsAutomaticAndOnDemandCreate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_delete.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_delete.py new file mode 100644 index 000000000000..84ad1b516181 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_delete.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python backups_automatic_and_on_demand_delete.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + client.backups_automatic_and_on_demand.begin_delete( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + backup_name="ondemandbackup-20250601T183022", + ).result() + + +# x-ms-original-file: 2026-01-01-preview/BackupsAutomaticAndOnDemandDelete.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_get.py new file mode 100644 index 000000000000..cfc60e395b0c --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_get.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python backups_automatic_and_on_demand_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.backups_automatic_and_on_demand.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + backup_name="backup_638830782181266873", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/BackupsAutomaticAndOnDemandGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_list_by_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_list_by_server.py new file mode 100644 index 000000000000..9a2c21cb2502 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_automatic_and_on_demand_list_by_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python backups_automatic_and_on_demand_list_by_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.backups_automatic_and_on_demand.list_by_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/BackupsAutomaticAndOnDemandListByServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_check_prerequisites.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_check_prerequisites.py new file mode 100644 index 000000000000..e733da91fae8 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_check_prerequisites.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python backups_long_term_retention_check_prerequisites.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.backups_long_term_retention.check_prerequisites( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={"backupSettings": {"backupName": "exampleltrbackup"}}, + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/BackupsLongTermRetentionCheckPrerequisites.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_get.py new file mode 100644 index 000000000000..d5896922867e --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_get.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python backups_long_term_retention_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.backups_long_term_retention.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + backup_name="exampleltrbackup", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/BackupsLongTermRetentionGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_list_by_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_list_by_server.py new file mode 100644 index 000000000000..249f9ce4f32b --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_list_by_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python backups_long_term_retention_list_by_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.backups_long_term_retention.list_by_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/BackupsLongTermRetentionListByServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_start.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_start.py new file mode 100644 index 000000000000..a28e1d8130a8 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/backups_long_term_retention_start.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python backups_long_term_retention_start.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.backups_long_term_retention.begin_start( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={"backupSettings": {"backupName": "exampleltrbackup"}, "targetDetails": {"sasUriList": ["sasuri"]}}, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/BackupsLongTermRetentionStart.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/capabilities_by_location_list.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/capabilities_by_location_list.py new file mode 100644 index 000000000000..f3c3f6bc4243 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/capabilities_by_location_list.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python capabilities_by_location_list.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.capabilities_by_location.list( + location_name="eastus", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/CapabilitiesByLocationList.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/capabilities_by_server_list.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/capabilities_by_server_list.py new file mode 100644 index 000000000000..98cd67fd041c --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/capabilities_by_server_list.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python capabilities_by_server_list.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.capabilities_by_server.list( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/CapabilitiesByServerList.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/captured_logs_list_by_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/captured_logs_list_by_server.py new file mode 100644 index 000000000000..6fc8f38def6d --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/captured_logs_list_by_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python captured_logs_list_by_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.captured_logs.list_by_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/CapturedLogsListByServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_get.py new file mode 100644 index 000000000000..fe9e8bcdca27 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_get.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python configurations_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.configurations.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + configuration_name="array_nulls", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ConfigurationsGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_list_by_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_list_by_server.py new file mode 100644 index 000000000000..1d4d435ca0b4 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_list_by_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python configurations_list_by_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.configurations.list_by_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/ConfigurationsListByServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_update.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_update.py new file mode 100644 index 000000000000..8430c0e4dbbe --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_update.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python configurations_update.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.configurations.begin_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + configuration_name="constraint_exclusion", + parameters={"properties": {"source": "user-override", "value": "on"}}, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ConfigurationsUpdate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_update_using_put.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_update_using_put.py new file mode 100644 index 000000000000..df92a1827500 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/configurations_update_using_put.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python configurations_update_using_put.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.configurations.begin_put( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + configuration_name="constraint_exclusion", + parameters={"properties": {"source": "user-override", "value": "on"}}, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ConfigurationsUpdateUsingPut.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_create.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_create.py new file mode 100644 index 000000000000..09b0cd177718 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_create.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python databases_create.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.databases.begin_create( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + database_name="exampledatabase", + parameters={"properties": {"charset": "utf8", "collation": "en_US.utf8"}}, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/DatabasesCreate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_delete.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_delete.py new file mode 100644 index 000000000000..ad4ba40c5687 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_delete.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python databases_delete.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + client.databases.begin_delete( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + database_name="exampledatabase", + ).result() + + +# x-ms-original-file: 2026-01-01-preview/DatabasesDelete.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_get.py new file mode 100644 index 000000000000..c40ccfd61218 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_get.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python databases_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.databases.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + database_name="exampledatabase", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/DatabasesGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_list_by_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_list_by_server.py new file mode 100644 index 000000000000..2e2047a9eb57 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/databases_list_by_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python databases_list_by_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.databases.list_by_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/DatabasesListByServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_create_or_update.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_create_or_update.py new file mode 100644 index 000000000000..a87230c50a5a --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_create_or_update.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python firewall_rules_create_or_update.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.firewall_rules.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + firewall_rule_name="examplefirewallrule", + parameters={"properties": {"endIpAddress": "255.255.255.255", "startIpAddress": "0.0.0.0"}}, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/FirewallRulesCreateOrUpdate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_delete.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_delete.py new file mode 100644 index 000000000000..2d75b657fcc6 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_delete.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python firewall_rules_delete.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + client.firewall_rules.begin_delete( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + firewall_rule_name="examplefirewallrule", + ).result() + + +# x-ms-original-file: 2026-01-01-preview/FirewallRulesDelete.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_get.py new file mode 100644 index 000000000000..d10d086b5f7d --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_get.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python firewall_rules_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.firewall_rules.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + firewall_rule_name="examplefirewallrule", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/FirewallRulesGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_list_by_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_list_by_server.py new file mode 100644 index 000000000000..8d3e32d87a9e --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/firewall_rules_list_by_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python firewall_rules_list_by_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.firewall_rules.list_by_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/FirewallRulesListByServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_cancel.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_cancel.py new file mode 100644 index 000000000000..9524098198f1 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_cancel.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_cancel.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.cancel( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsCancel.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_check_name_availability.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_check_name_availability.py new file mode 100644 index 000000000000..9eba900ee341 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_check_name_availability.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_check_name_availability.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.check_name_availability( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={"name": "examplemigration", "type": "Microsoft.DBforPostgreSQL/flexibleServers/migrations"}, + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsCheckNameAvailability.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create.py new file mode 100644 index 000000000000..286a4cbde972 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create.py @@ -0,0 +1,59 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_create.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.create( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + parameters={ + "location": "eastus", + "properties": { + "dbsToMigrate": ["exampledatabase1", "exampledatabase2", "exampledatabase3", "exampledatabase4"], + "migrationMode": "Offline", + "overwriteDbsInTarget": "True", + "secretParameters": { + "adminCredentials": { + "sourceServerPassword": "examplesourcepassword", + "targetServerPassword": "exampletargetpassword", + } + }, + "sourceDbServerResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.DBForPostgreSql/servers/examplesource", + }, + }, + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsCreate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_other_source_types_validate_migrate.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_other_source_types_validate_migrate.py new file mode 100644 index 000000000000..62770a36db43 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_other_source_types_validate_migrate.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_create_other_source_types_validate_migrate.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.create( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + parameters={ + "location": "eastus", + "properties": { + "dbsToMigrate": ["exampledatabase1", "exampledatabase2", "exampledatabase3", "exampledatabase4"], + "migrationMode": "Offline", + "migrationOption": "ValidateAndMigrate", + "overwriteDbsInTarget": "True", + "secretParameters": { + "adminCredentials": { + "sourceServerPassword": "examplesourcepassword", + "targetServerPassword": "exampletargetpassword", + } + }, + "sourceDbServerResourceId": "examplesource:5432@exampleuser", + "sourceType": "OnPremises", + "sslMode": "Prefer", + }, + }, + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsCreateOtherSourceTypesValidateMigrate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_validate_only.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_validate_only.py new file mode 100644 index 000000000000..366b1e296eae --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_validate_only.py @@ -0,0 +1,60 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_create_validate_only.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.create( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + parameters={ + "location": "eastus", + "properties": { + "dbsToMigrate": ["exampledatabase1", "exampledatabase2", "exampledatabase3", "exampledatabase4"], + "migrationMode": "Offline", + "migrationOption": "Validate", + "overwriteDbsInTarget": "True", + "secretParameters": { + "adminCredentials": { + "sourceServerPassword": "examplesourcepassword", + "targetServerPassword": "exampletargetpassword", + } + }, + "sourceDbServerResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.DBForPostgreSql/servers/examplesource", + }, + }, + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsCreateValidateOnly.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_fully_qualified_domain_name.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_fully_qualified_domain_name.py new file mode 100644 index 000000000000..e6ddfc84508e --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_fully_qualified_domain_name.py @@ -0,0 +1,58 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_create_with_fully_qualified_domain_name.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.create( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + parameters={ + "location": "eastus", + "properties": { + "dbsToMigrate": ["exampledatabase1", "exampledatabase2", "exampledatabase3", "exampledatabase4"], + "migrationMode": "Offline", + "overwriteDbsInTarget": "True", + "secretParameters": { + "adminCredentials": {"sourceServerPassword": "xxxxxxxx", "targetServerPassword": "xxxxxxxx"} + }, + "sourceDbServerFullyQualifiedDomainName": "examplesource.contoso.com", + "sourceDbServerResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.DBForPostgreSql/servers/examplesource", + "targetDbServerFullyQualifiedDomainName": "exampletarget.contoso.com", + }, + }, + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsCreateWithFullyQualifiedDomainName.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_other_users.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_other_users.py new file mode 100644 index 000000000000..2582f2006d61 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_other_users.py @@ -0,0 +1,60 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_create_with_other_users.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.create( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + parameters={ + "location": "eastus", + "properties": { + "dbsToMigrate": ["exampledatabase1", "exampledatabase2", "exampledatabase3", "exampledatabase4"], + "migrationMode": "Offline", + "secretParameters": { + "adminCredentials": { + "sourceServerPassword": "examplesourcepassword", + "targetServerPassword": "exampletargetpassword", + }, + "sourceServerUsername": "newadmin@examplesource", + "targetServerUsername": "targetadmin", + }, + "sourceDbServerResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.DBForPostgreSql/servers/examplesource", + }, + }, + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsCreateWithOtherUsers.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_private_endpoint_servers.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_private_endpoint_servers.py new file mode 100644 index 000000000000..005efe8837fa --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_private_endpoint_servers.py @@ -0,0 +1,60 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_create_with_private_endpoint_servers.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.create( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + parameters={ + "location": "eastus", + "properties": { + "dbsToMigrate": ["exampledatabase1", "exampledatabase2", "exampledatabase3", "exampledatabase4"], + "migrationInstanceResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.DBForPostgreSql/flexibleServers/examplesourcemigration", + "migrationMode": "Offline", + "overwriteDbsInTarget": "True", + "secretParameters": { + "adminCredentials": { + "sourceServerPassword": "examplesourcepassword", + "targetServerPassword": "exampletargetpassword", + } + }, + "sourceDbServerResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.DBForPostgreSql/servers/examplesource", + }, + }, + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsCreateWithPrivateEndpointServers.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_roles.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_roles.py new file mode 100644 index 000000000000..67d90c78c42d --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_create_with_roles.py @@ -0,0 +1,60 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_create_with_roles.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.create( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + parameters={ + "location": "eastus", + "properties": { + "dbsToMigrate": ["exampledatabase1", "exampledatabase2", "exampledatabase3", "exampledatabase4"], + "migrateRoles": "True", + "migrationMode": "Offline", + "overwriteDbsInTarget": "True", + "secretParameters": { + "adminCredentials": { + "sourceServerPassword": "examplesourcepassword", + "targetServerPassword": "exampletargetpassword", + } + }, + "sourceDbServerResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.DBForPostgreSql/servers/examplesource", + }, + }, + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsCreateWithRoles.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get.py new file mode 100644 index 000000000000..85f3fff2789d --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_successful_validation_and_migration.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_successful_validation_and_migration.py new file mode 100644 index 000000000000..8a541a21df25 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_successful_validation_and_migration.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_get_migration_with_successful_validation_and_migration.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsGetMigrationWithSuccessfulValidationAndMigration.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_successful_validation_but_migration_failure.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_successful_validation_but_migration_failure.py new file mode 100644 index 000000000000..d40d26544ae0 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_successful_validation_but_migration_failure.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_get_migration_with_successful_validation_but_migration_failure.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsGetMigrationWithSuccessfulValidationButMigrationFailure.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_successful_validation_only.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_successful_validation_only.py new file mode 100644 index 000000000000..1b343700d7e2 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_successful_validation_only.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_get_migration_with_successful_validation_only.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsGetMigrationWithSuccessfulValidationOnly.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_validation_failures.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_validation_failures.py new file mode 100644 index 000000000000..f4936658226e --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_get_migration_with_validation_failures.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_get_migration_with_validation_failures.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsGetMigrationWithValidationFailures.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_list_by_target_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_list_by_target_server.py new file mode 100644 index 000000000000..b78e98fd4e82 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_list_by_target_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_list_by_target_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.list_by_target_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsListByTargetServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_update.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_update.py new file mode 100644 index 000000000000..3ef0ca17d5f1 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/migrations_update.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python migrations_update.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.migrations.update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + migration_name="examplemigration", + parameters={"properties": {"setupLogicalReplicationOnSourceDbIfNeeded": "True"}}, + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/MigrationsUpdate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/name_availability_check_globally.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/name_availability_check_globally.py new file mode 100644 index 000000000000..faf57543e6e4 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/name_availability_check_globally.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python name_availability_check_globally.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.name_availability.check_globally( + parameters={"name": "exampleserver", "type": "Microsoft.DBforPostgreSQL/flexibleServers"}, + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/NameAvailabilityCheckGlobally.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/name_availability_check_with_location.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/name_availability_check_with_location.py new file mode 100644 index 000000000000..eeeae0f765f5 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/name_availability_check_with_location.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python name_availability_check_with_location.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.name_availability.check_with_location( + location_name="eastus", + parameters={"name": "exampleserver", "type": "Microsoft.DBforPostgreSQL/flexibleServers"}, + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/NameAvailabilityCheckWithLocation.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/operations_list.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/operations_list.py new file mode 100644 index 000000000000..e99577c4782d --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/operations_list.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python operations_list.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.operations.list() + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/OperationsList.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_dns_zone_suffix_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_dns_zone_suffix_get.py new file mode 100644 index 000000000000..89ee6feb4762 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_dns_zone_suffix_get.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python private_dns_zone_suffix_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.private_dns_zone_suffix.get() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/PrivateDnsZoneSuffixGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_delete.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_delete.py new file mode 100644 index 000000000000..933d69cd3cea --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_delete.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python private_endpoint_connections_delete.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + client.private_endpoint_connections.begin_delete( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + private_endpoint_connection_name="private-endpoint-connection-name.1fa229cd-bf3f-47f0-8c49-afb36723997e", + ).result() + + +# x-ms-original-file: 2026-01-01-preview/PrivateEndpointConnectionsDelete.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_get.py new file mode 100644 index 000000000000..46a774a8f38e --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_get.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python private_endpoint_connections_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.private_endpoint_connections.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + private_endpoint_connection_name="private-endpoint-connection-name.1fa229cd-bf3f-47f0-8c49-afb36723997e", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/PrivateEndpointConnectionsGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_list.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_list.py new file mode 100644 index 000000000000..f9e8c5a57419 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_list.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python private_endpoint_connections_list.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.private_endpoint_connections.list_by_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/PrivateEndpointConnectionsList.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_update.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_update.py new file mode 100644 index 000000000000..14cf5906577f --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_endpoint_connections_update.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python private_endpoint_connections_update.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.private_endpoint_connections.begin_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + private_endpoint_connection_name="private-endpoint-connection-name.1fa229cd-bf3f-47f0-8c49-afb36723997e", + parameters={ + "properties": { + "privateLinkServiceConnectionState": { + "description": "Approved by `johndoe@contoso.com `_", + "status": "Approved", + } + } + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/PrivateEndpointConnectionsUpdate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_link_resources_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_link_resources_get.py new file mode 100644 index 000000000000..97a408353b52 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_link_resources_get.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python private_link_resources_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.private_link_resources.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + group_name="exampleprivatelink", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/PrivateLinkResourcesGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_link_resources_list.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_link_resources_list.py new file mode 100644 index 000000000000..4b37d4426101 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/private_link_resources_list.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python private_link_resources_list.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.private_link_resources.list_by_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/PrivateLinkResourcesList.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/quota_usages_for_flexible_servers.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/quota_usages_for_flexible_servers.py new file mode 100644 index 000000000000..628d6fa5161b --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/quota_usages_for_flexible_servers.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python quota_usages_for_flexible_servers.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.quota_usages.list( + location_name="eastus", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/QuotaUsagesForFlexibleServers.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/replicas_list_by_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/replicas_list_by_server.py new file mode 100644 index 000000000000..7396bce3158f --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/replicas_list_by_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python replicas_list_by_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.replicas.list_by_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/ReplicasListByServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_cluster_create.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_cluster_create.py new file mode 100644 index 000000000000..84e277f91c91 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_cluster_create.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_cluster_create.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "location": "eastus", + "properties": { + "administratorLogin": "examplelogin", + "administratorLoginPassword": "examplepassword", + "backup": {"backupRetentionDays": 7, "geoRedundantBackup": "Disabled"}, + "cluster": {"clusterSize": 2, "defaultDatabaseName": "clusterdb"}, + "createMode": "Create", + "highAvailability": {"mode": "Disabled"}, + "network": {"publicNetworkAccess": "Disabled"}, + "storage": {"autoGrow": "Disabled", "storageSizeGB": 256, "tier": "P15"}, + "version": "16", + }, + "sku": {"name": "Standard_D4ds_v5", "tier": "GeneralPurpose"}, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersClusterCreate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_geo_restore_with_data_encryption_enabled.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_geo_restore_with_data_encryption_enabled.py new file mode 100644 index 000000000000..ca2c93069f7b --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_geo_restore_with_data_encryption_enabled.py @@ -0,0 +1,65 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_create_geo_restore_with_data_encryption_enabled.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "identity": { + "type": "UserAssigned", + "userAssignedIdentities": { + "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/examplegeoredundantidentity": {}, + "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity": {}, + }, + }, + "location": "eastus", + "properties": { + "createMode": "GeoRestore", + "dataEncryption": { + "geoBackupKeyURI": "https://examplegeoredundantkeyvault.vault.azure.net/keys/examplekey/yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy", + "geoBackupUserAssignedIdentityId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/examplegeoredundantidentity", + "primaryKeyURI": "https://exampleprimarykeyvault.vault.azure.net/keys/examplekey/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "primaryUserAssignedIdentityId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity", + "type": "AzureKeyVault", + }, + "pointInTimeUTC": "2025-06-01T18:35:22.123456Z", + "sourceServerResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.DBforPostgreSQL/flexibleServers/examplesourceserver", + }, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersCreateGeoRestoreWithDataEncryptionEnabled.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_geo_restore_with_data_encryption_enabled_auto_update.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_geo_restore_with_data_encryption_enabled_auto_update.py new file mode 100644 index 000000000000..804e441c6167 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_geo_restore_with_data_encryption_enabled_auto_update.py @@ -0,0 +1,65 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_create_geo_restore_with_data_encryption_enabled_auto_update.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "identity": { + "type": "UserAssigned", + "userAssignedIdentities": { + "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/examplegeoredundantidentity": {}, + "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity": {}, + }, + }, + "location": "eastus", + "properties": { + "createMode": "GeoRestore", + "dataEncryption": { + "geoBackupKeyURI": "https://examplegeoredundantkeyvault.vault.azure.net/keys/examplekey", + "geoBackupUserAssignedIdentityId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/examplegeoredundantidentity", + "primaryKeyURI": "https://exampleprimarykeyvault.vault.azure.net/keys/examplekey", + "primaryUserAssignedIdentityId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity", + "type": "AzureKeyVault", + }, + "pointInTimeUTC": "2025-06-01T18:35:22.123456Z", + "sourceServerResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.DBforPostgreSQL/flexibleServers/examplesourceserver", + }, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersCreateGeoRestoreWithDataEncryptionEnabledAutoUpdate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_in_microsoft_owned_virtual_network_with_zone_redundant_high_availability.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_in_microsoft_owned_virtual_network_with_zone_redundant_high_availability.py new file mode 100644 index 000000000000..559f119ecacd --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_in_microsoft_owned_virtual_network_with_zone_redundant_high_availability.py @@ -0,0 +1,59 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_create_in_microsoft_owned_virtual_network_with_zone_redundant_high_availability.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "location": "eastus", + "properties": { + "administratorLogin": "exampleadministratorlogin", + "administratorLoginPassword": "examplepassword", + "availabilityZone": "1", + "backup": {"backupRetentionDays": 7, "geoRedundantBackup": "Enabled"}, + "createMode": "Create", + "highAvailability": {"mode": "ZoneRedundant"}, + "network": {"publicNetworkAccess": "Enabled"}, + "storage": {"autoGrow": "Disabled", "storageSizeGB": 512, "tier": "P20"}, + "version": "17", + }, + "sku": {"name": "Standard_D4ds_v5", "tier": "GeneralPurpose"}, + "tags": {"InCustomerVnet": "false", "InMicrosoftVnet": "true"}, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersCreateInMicrosoftOwnedVirtualNetworkWithZoneRedundantHighAvailability.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_in_your_own_virtual_network_with_same_zone_high_availability.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_in_your_own_virtual_network_with_same_zone_high_availability.py new file mode 100644 index 000000000000..f607b2c52cdf --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_in_your_own_virtual_network_with_same_zone_high_availability.py @@ -0,0 +1,62 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_create_in_your_own_virtual_network_with_same_zone_high_availability.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "location": "eastus", + "properties": { + "administratorLogin": "exampleadministratorlogin", + "administratorLoginPassword": "examplepassword", + "availabilityZone": "1", + "backup": {"backupRetentionDays": 7, "geoRedundantBackup": "Enabled"}, + "createMode": "Create", + "highAvailability": {"mode": "SameZone"}, + "network": { + "delegatedSubnetResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.Network/virtualNetworks/examplevirtualnetwork/subnets/examplesubnet", + "privateDnsZoneArmResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.Network/privateDnsZones/exampleprivatednszone.private.postgres.database", + }, + "storage": {"autoGrow": "Disabled", "storageSizeGB": 512, "tier": "P20"}, + "version": "17", + }, + "sku": {"name": "Standard_D4ds_v5", "tier": "GeneralPurpose"}, + "tags": {"InCustomerVnet": "true", "InMicrosoftVnet": "false"}, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersCreateInYourOwnVirtualNetworkWithSameZoneHighAvailability.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_point_in_time_restore.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_point_in_time_restore.py new file mode 100644 index 000000000000..cee534d72258 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_point_in_time_restore.py @@ -0,0 +1,51 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_create_point_in_time_restore.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "location": "eastus", + "properties": { + "createMode": "PointInTimeRestore", + "pointInTimeUTC": "2025-06-01T18:35:22.123456Z", + "sourceServerResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.DBforPostgreSQL/flexibleServers/examplesourceserver", + }, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersCreatePointInTimeRestore.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_replica.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_replica.py new file mode 100644 index 000000000000..def58d02b64d --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_replica.py @@ -0,0 +1,64 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_create_replica.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "identity": { + "type": "UserAssigned", + "userAssignedIdentities": { + "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity": {} + }, + }, + "location": "eastus", + "properties": { + "createMode": "Replica", + "dataEncryption": { + "geoBackupKeyURI": "", + "geoBackupUserAssignedIdentityId": "", + "primaryKeyURI": "https://exampleprimarykeyvault.vault.azure.net/keys/examplekey/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "primaryUserAssignedIdentityId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity", + "type": "AzureKeyVault", + }, + "pointInTimeUTC": "2025-06-01T18:35:22.123456Z", + "sourceServerResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.DBforPostgreSQL/flexibleServers/examplesourceserver", + }, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersCreateReplica.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_revive_dropped.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_revive_dropped.py new file mode 100644 index 000000000000..1ad30044da2a --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_revive_dropped.py @@ -0,0 +1,51 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_create_revive_dropped.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "location": "eastus", + "properties": { + "createMode": "ReviveDropped", + "pointInTimeUTC": "2025-06-01T18:30:22.123456Z", + "sourceServerResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.DBforPostgreSQL/flexibleServers/exampledeletedserver", + }, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersCreateReviveDropped.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_with_data_encryption_enabled.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_with_data_encryption_enabled.py new file mode 100644 index 000000000000..eac3f1ad9391 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_with_data_encryption_enabled.py @@ -0,0 +1,74 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_create_with_data_encryption_enabled.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "identity": { + "type": "UserAssigned", + "userAssignedIdentities": { + "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity": {} + }, + }, + "location": "eastus", + "properties": { + "administratorLogin": "exampleadministratorlogin", + "administratorLoginPassword": "examplepassword", + "availabilityZone": "1", + "backup": {"backupRetentionDays": 7, "geoRedundantBackup": "Disabled"}, + "createMode": "Create", + "dataEncryption": { + "geoBackupKeyURI": "", + "geoBackupUserAssignedIdentityId": "", + "primaryKeyURI": "https://exampleprimarykeyvault.vault.azure.net/keys/examplekey/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "primaryUserAssignedIdentityId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity", + "type": "AzureKeyVault", + }, + "highAvailability": {"mode": "ZoneRedundant"}, + "network": { + "delegatedSubnetResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.Network/virtualNetworks/examplevirtualnetwork/subnets/examplesubnet", + "privateDnsZoneArmResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourcegroups/exampleresourcegroup/providers/Microsoft.Network/privateDnsZones/exampleprivatednszone.postgres.database.azure.com", + }, + "storage": {"autoGrow": "Disabled", "storageSizeGB": 512, "tier": "P20"}, + "version": "17", + }, + "sku": {"name": "Standard_D4ds_v5", "tier": "GeneralPurpose"}, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersCreateWithDataEncryptionEnabled.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_with_data_encryption_enabled_auto_update.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_with_data_encryption_enabled_auto_update.py new file mode 100644 index 000000000000..b035af1676a4 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_with_data_encryption_enabled_auto_update.py @@ -0,0 +1,74 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_create_with_data_encryption_enabled_auto_update.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "identity": { + "type": "UserAssigned", + "userAssignedIdentities": { + "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity": {} + }, + }, + "location": "eastus", + "properties": { + "administratorLogin": "exampleadministratorlogin", + "administratorLoginPassword": "examplepassword", + "availabilityZone": "1", + "backup": {"backupRetentionDays": 7, "geoRedundantBackup": "Disabled"}, + "createMode": "Create", + "dataEncryption": { + "geoBackupKeyURI": "", + "geoBackupUserAssignedIdentityId": "", + "primaryKeyURI": "https://exampleprimarykeyvault.vault.azure.net/keys/examplekey", + "primaryUserAssignedIdentityId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity", + "type": "AzureKeyVault", + }, + "highAvailability": {"mode": "ZoneRedundant"}, + "network": { + "delegatedSubnetResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.Network/virtualNetworks/examplevirtualnetwork/subnets/examplesubnet", + "privateDnsZoneArmResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourcegroups/exampleresourcegroup/providers/Microsoft.Network/privateDnsZones/exampleprivatednszone.postgres.database.azure.com", + }, + "storage": {"autoGrow": "Disabled", "storageSizeGB": 512, "tier": "P20"}, + "version": "17", + }, + "sku": {"name": "Standard_D4ds_v5", "tier": "GeneralPurpose"}, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersCreateWithDataEncryptionEnabledAutoUpdate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_with_microsoft_entra_enabled_in_your_own_virtual_network_without_high_availability.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_with_microsoft_entra_enabled_in_your_own_virtual_network_without_high_availability.py new file mode 100644 index 000000000000..2b7d46b45a40 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_create_with_microsoft_entra_enabled_in_your_own_virtual_network_without_high_availability.py @@ -0,0 +1,67 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_create_with_microsoft_entra_enabled_in_your_own_virtual_network_without_high_availability.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_create_or_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "location": "eastus", + "properties": { + "administratorLogin": "exampleadministratorlogin", + "administratorLoginPassword": "examplepassword", + "authConfig": { + "activeDirectoryAuth": "Enabled", + "passwordAuth": "Enabled", + "tenantId": "tttttt-tttt-tttt-tttt-tttttttttttt", + }, + "availabilityZone": "1", + "backup": {"backupRetentionDays": 7, "geoRedundantBackup": "Disabled"}, + "createMode": "Create", + "dataEncryption": {"type": "SystemManaged"}, + "highAvailability": {"mode": "Disabled"}, + "network": { + "delegatedSubnetResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.Network/virtualNetworks/examplevirtualnetwork/subnets/examplesubnet", + "privateDnsZoneArmResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourcegroups/exampleresourcegroup/providers/Microsoft.Network/privateDnsZones/exampleprivatednszone.postgres.database.azure.com", + }, + "storage": {"autoGrow": "Disabled", "storageSizeGB": 512, "tier": "P20"}, + "version": "17", + }, + "sku": {"name": "Standard_D4ds_v5", "tier": "GeneralPurpose"}, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersCreateWithMicrosoftEntraEnabledInYourOwnVirtualNetworkWithoutHighAvailability.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_delete.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_delete.py new file mode 100644 index 000000000000..eff696c7c097 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_delete.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_delete.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + client.servers.begin_delete( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ).result() + + +# x-ms-original-file: 2026-01-01-preview/ServersDelete.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_get.py new file mode 100644 index 000000000000..ec4feda11eca --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_get.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_get_with_private_endpoints.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_get_with_private_endpoints.py new file mode 100644 index 000000000000..d4a60789a13c --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_get_with_private_endpoints.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_get_with_private_endpoints.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersGetWithPrivateEndpoints.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_get_with_vnet.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_get_with_vnet.py new file mode 100644 index 000000000000..2840852415ab --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_get_with_vnet.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_get_with_vnet.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersGetWithVnet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_list_by_resource_group.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_list_by_resource_group.py new file mode 100644 index 000000000000..c54d9ecee65a --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_list_by_resource_group.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_list_by_resource_group.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.list_by_resource_group( + resource_group_name="exampleresourcegroup", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/ServersListByResourceGroup.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_list_by_subscription.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_list_by_subscription.py new file mode 100644 index 000000000000..2aa1083bab8b --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_list_by_subscription.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_list_by_subscription.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.list_by_subscription() + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/ServersListBySubscription.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_migrate_network_mode.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_migrate_network_mode.py new file mode 100644 index 000000000000..bfec2fd32828 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_migrate_network_mode.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_migrate_network_mode.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_migrate_network_mode( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersMigrateNetworkMode.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_forced_standalone_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_forced_standalone_server.py new file mode 100644 index 000000000000..a9914f775696 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_forced_standalone_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_promote_replica_as_forced_standalone_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={"properties": {"replica": {"promoteMode": "Standalone", "promoteOption": "Forced"}}}, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersPromoteReplicaAsForcedStandaloneServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_forced_switchover.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_forced_switchover.py new file mode 100644 index 000000000000..7c5889332b28 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_forced_switchover.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_promote_replica_as_forced_switchover.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={"properties": {"replica": {"promoteMode": "Switchover", "promoteOption": "Forced"}}}, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersPromoteReplicaAsForcedSwitchover.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_planned_standalone_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_planned_standalone_server.py new file mode 100644 index 000000000000..ac019f7d4add --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_planned_standalone_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_promote_replica_as_planned_standalone_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={"properties": {"replica": {"promoteMode": "Standalone", "promoteOption": "Planned"}}}, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersPromoteReplicaAsPlannedStandaloneServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_planned_switchover.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_planned_switchover.py new file mode 100644 index 000000000000..fa4a3d36f1a2 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_promote_replica_as_planned_switchover.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_promote_replica_as_planned_switchover.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={"properties": {"replica": {"promoteMode": "Switchover", "promoteOption": "Planned"}}}, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersPromoteReplicaAsPlannedSwitchover.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_restart.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_restart.py new file mode 100644 index 000000000000..564e62aaf233 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_restart.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_restart.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + client.servers.begin_restart( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ).result() + + +# x-ms-original-file: 2026-01-01-preview/ServersRestart.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_restart_with_failover.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_restart_with_failover.py new file mode 100644 index 000000000000..eb8328838828 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_restart_with_failover.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_restart_with_failover.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + client.servers.begin_restart( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ).result() + + +# x-ms-original-file: 2026-01-01-preview/ServersRestartWithFailover.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_start.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_start.py new file mode 100644 index 000000000000..bb732d81ea43 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_start.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_start.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + client.servers.begin_start( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ).result() + + +# x-ms-original-file: 2026-01-01-preview/ServersStart.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_stop.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_stop.py new file mode 100644 index 000000000000..3b74e6a5a6e0 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_stop.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_stop.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + client.servers.begin_stop( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ).result() + + +# x-ms-original-file: 2026-01-01-preview/ServersStop.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update.py new file mode 100644 index 000000000000..183d4371a91a --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_update.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "properties": { + "administratorLoginPassword": "examplenewpassword", + "backup": {"backupRetentionDays": 20}, + "createMode": "Update", + "storage": {"autoGrow": "Enabled", "storageSizeGB": 1024, "tier": "P30"}, + }, + "sku": {"name": "Standard_D8s_v3", "tier": "GeneralPurpose"}, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersUpdate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_custom_maintenance_window.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_custom_maintenance_window.py new file mode 100644 index 000000000000..e30d6d35a662 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_custom_maintenance_window.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_update_with_custom_maintenance_window.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "properties": { + "createMode": "Update", + "maintenanceWindow": {"customWindow": "Enabled", "dayOfWeek": 0, "startHour": 8, "startMinute": 0}, + } + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersUpdateWithCustomMaintenanceWindow.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_data_encryption_enabled.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_data_encryption_enabled.py new file mode 100644 index 000000000000..00a6af1571ee --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_data_encryption_enabled.py @@ -0,0 +1,65 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_update_with_data_encryption_enabled.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "identity": { + "type": "UserAssigned", + "userAssignedIdentities": { + "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/examplegeoredundantidentity": {}, + "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity": {}, + }, + }, + "properties": { + "administratorLoginPassword": "examplenewpassword", + "backup": {"backupRetentionDays": 20}, + "createMode": "Update", + "dataEncryption": { + "geoBackupKeyURI": "https://examplegeoredundantkeyvault.vault.azure.net/keys/examplekey/yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy", + "geoBackupUserAssignedIdentityId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/examplegeoredundantidentity", + "primaryKeyURI": "https://exampleprimarykeyvault.vault.azure.net/keys/examplekey/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "primaryUserAssignedIdentityId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity", + "type": "AzureKeyVault", + }, + }, + "sku": {"name": "Standard_D8s_v3", "tier": "GeneralPurpose"}, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersUpdateWithDataEncryptionEnabled.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_data_encryption_enabled_auto_update.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_data_encryption_enabled_auto_update.py new file mode 100644 index 000000000000..9b716d457b76 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_data_encryption_enabled_auto_update.py @@ -0,0 +1,65 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_update_with_data_encryption_enabled_auto_update.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "identity": { + "type": "UserAssigned", + "userAssignedIdentities": { + "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/examplegeoredundantidentity": {}, + "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity": {}, + }, + }, + "properties": { + "administratorLoginPassword": "examplenewpassword", + "backup": {"backupRetentionDays": 20}, + "createMode": "Update", + "dataEncryption": { + "geoBackupKeyURI": "https://examplegeoredundantkeyvault.vault.azure.net/keys/examplekey", + "geoBackupUserAssignedIdentityId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/examplegeoredundantidentity", + "primaryKeyURI": "https://exampleprimarykeyvault.vault.azure.net/keys/examplekey", + "primaryUserAssignedIdentityId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/exampleprimaryidentity", + "type": "AzureKeyVault", + }, + }, + "sku": {"name": "Standard_D8s_v3", "tier": "GeneralPurpose"}, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersUpdateWithDataEncryptionEnabledAutoUpdate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_major_version_upgrade.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_major_version_upgrade.py new file mode 100644 index 000000000000..9ae40ae202c0 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_major_version_upgrade.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_update_with_major_version_upgrade.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={"properties": {"createMode": "Update", "version": "17"}}, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersUpdateWithMajorVersionUpgrade.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_microsoft_entra_enabled.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_microsoft_entra_enabled.py new file mode 100644 index 000000000000..b5d840e49133 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/servers_update_with_microsoft_entra_enabled.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python servers_update_with_microsoft_entra_enabled.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.servers.begin_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + parameters={ + "properties": { + "administratorLoginPassword": "examplenewpassword", + "authConfig": { + "activeDirectoryAuth": "Enabled", + "passwordAuth": "Enabled", + "tenantId": "tttttt-tttt-tttt-tttt-tttttttttttt", + }, + "backup": {"backupRetentionDays": 20}, + "createMode": "Update", + "storage": {"autoGrow": "Disabled", "storageSizeGB": 1024, "tier": "P30"}, + }, + "sku": {"name": "Standard_D8s_v3", "tier": "GeneralPurpose"}, + }, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/ServersUpdateWithMicrosoftEntraEnabled.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_get.py new file mode 100644 index 000000000000..935efaf8acb7 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_get.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python tuning_options_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.tuning_options.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + tuning_option="index", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/TuningOptionsGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_by_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_by_server.py new file mode 100644 index 000000000000..505b03daa62b --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_by_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python tuning_options_list_by_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.tuning_options.list_by_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/TuningOptionsListByServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_index_recommendations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_index_recommendations.py new file mode 100644 index 000000000000..b9a5c485d9fc --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_index_recommendations.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python tuning_options_list_index_recommendations.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.tuning_options.list_recommendations( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + tuning_option="index", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/TuningOptionsListIndexRecommendations.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_index_recommendations_filtered_for_create_index.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_index_recommendations_filtered_for_create_index.py new file mode 100644 index 000000000000..f73fd96e426f --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_index_recommendations_filtered_for_create_index.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python tuning_options_list_index_recommendations_filtered_for_create_index.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.tuning_options.list_recommendations( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + tuning_option="index", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/TuningOptionsListIndexRecommendationsFilteredForCreateIndex.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_table_recommendations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_table_recommendations.py new file mode 100644 index 000000000000..e6222c227a28 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_table_recommendations.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python tuning_options_list_table_recommendations.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.tuning_options.list_recommendations( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + tuning_option="table", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/TuningOptionsListTableRecommendations.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_table_recommendations_filtered_for_analyze_table.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_table_recommendations_filtered_for_analyze_table.py new file mode 100644 index 000000000000..e6f05394fb2e --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/tuning_options_list_table_recommendations_filtered_for_analyze_table.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python tuning_options_list_table_recommendations_filtered_for_analyze_table.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.tuning_options.list_recommendations( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + tuning_option="table", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/TuningOptionsListTableRecommendationsFilteredForAnalyzeTable.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoint_create.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoint_create.py new file mode 100644 index 000000000000..759d292f629c --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoint_create.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python virtual_endpoint_create.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.virtual_endpoints.begin_create( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + virtual_endpoint_name="examplebasename", + parameters={"properties": {"endpointType": "ReadWrite", "members": ["exampleprimaryserver"]}}, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/VirtualEndpointCreate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoint_delete.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoint_delete.py new file mode 100644 index 000000000000..da70e4de25b0 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoint_delete.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python virtual_endpoint_delete.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + client.virtual_endpoints.begin_delete( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + virtual_endpoint_name="examplebasename", + ).result() + + +# x-ms-original-file: 2026-01-01-preview/VirtualEndpointDelete.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoint_update.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoint_update.py new file mode 100644 index 000000000000..19093801710e --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoint_update.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python virtual_endpoint_update.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.virtual_endpoints.begin_update( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + virtual_endpoint_name="examplebasename", + parameters={"properties": {"endpointType": "ReadWrite", "members": ["exampleprimaryserver"]}}, + ).result() + print(response) + + +# x-ms-original-file: 2026-01-01-preview/VirtualEndpointUpdate.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoints_get.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoints_get.py new file mode 100644 index 000000000000..b45e28aef575 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoints_get.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python virtual_endpoints_get.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.virtual_endpoints.get( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + virtual_endpoint_name="examplebasename", + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/VirtualEndpointsGet.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoints_list_by_server.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoints_list_by_server.py new file mode 100644 index 000000000000..222c3ed50067 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_endpoints_list_by_server.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python virtual_endpoints_list_by_server.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.virtual_endpoints.list_by_server( + resource_group_name="exampleresourcegroup", + server_name="exampleserver", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2026-01-01-preview/VirtualEndpointsListByServer.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_network_subnet_usage_list.py b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_network_subnet_usage_list.py new file mode 100644 index 000000000000..24d989e76263 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_samples/virtual_network_subnet_usage_list.py @@ -0,0 +1,45 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.mgmt.postgresql import PostgreSQLManagementClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-mgmt-postgresql +# USAGE + python virtual_network_subnet_usage_list.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = PostgreSQLManagementClient( + credential=DefaultAzureCredential(), + subscription_id="SUBSCRIPTION_ID", + ) + + response = client.virtual_network_subnet_usage.list( + location_name="eastus", + parameters={ + "virtualNetworkArmResourceId": "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/exampleresourcegroup/providers/Microsoft.Network/virtualNetworks/examplevirtualnetwork" + }, + ) + print(response) + + +# x-ms-original-file: 2026-01-01-preview/VirtualNetworkSubnetUsageList.json +if __name__ == "__main__": + main() diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/conftest.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/conftest.py new file mode 100644 index 000000000000..eb1224bf7bb8 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/conftest.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import os +import pytest +from dotenv import load_dotenv +from devtools_testutils import ( + test_proxy, + add_general_regex_sanitizer, + add_body_key_sanitizer, + add_header_regex_sanitizer, +) + +load_dotenv() + + +# For security, please avoid record sensitive identity information in recordings +@pytest.fixture(scope="session", autouse=True) +def add_sanitizers(test_proxy): + postgresqlmanagement_subscription_id = os.environ.get( + "AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000" + ) + postgresqlmanagement_tenant_id = os.environ.get("AZURE_TENANT_ID", "00000000-0000-0000-0000-000000000000") + postgresqlmanagement_client_id = os.environ.get("AZURE_CLIENT_ID", "00000000-0000-0000-0000-000000000000") + postgresqlmanagement_client_secret = os.environ.get("AZURE_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer( + regex=postgresqlmanagement_subscription_id, value="00000000-0000-0000-0000-000000000000" + ) + add_general_regex_sanitizer(regex=postgresqlmanagement_tenant_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=postgresqlmanagement_client_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=postgresqlmanagement_client_secret, value="00000000-0000-0000-0000-000000000000") + + add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]") + add_header_regex_sanitizer(key="Cookie", value="cookie;") + add_body_key_sanitizer(json_path="$..access_token", value="access_token") diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_administrators_microsoft_entra_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_administrators_microsoft_entra_operations.py new file mode 100644 index 000000000000..c8c7c2d85acb --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_administrators_microsoft_entra_operations.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementAdministratorsMicrosoftEntraOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_administrators_microsoft_entra_get(self, resource_group): + response = self.client.administrators_microsoft_entra.get( + resource_group_name=resource_group.name, + server_name="str", + object_id="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_administrators_microsoft_entra_begin_create_or_update(self, resource_group): + response = self.client.administrators_microsoft_entra.begin_create_or_update( + resource_group_name=resource_group.name, + server_name="str", + object_id="str", + parameters={"properties": {"principalName": "str", "principalType": "str", "tenantId": "str"}}, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_administrators_microsoft_entra_begin_delete(self, resource_group): + response = self.client.administrators_microsoft_entra.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + object_id="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_administrators_microsoft_entra_list_by_server(self, resource_group): + response = self.client.administrators_microsoft_entra.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_administrators_microsoft_entra_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_administrators_microsoft_entra_operations_async.py new file mode 100644 index 000000000000..2f3a3e53dc44 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_administrators_microsoft_entra_operations_async.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementAdministratorsMicrosoftEntraOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_administrators_microsoft_entra_get(self, resource_group): + response = await self.client.administrators_microsoft_entra.get( + resource_group_name=resource_group.name, + server_name="str", + object_id="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_administrators_microsoft_entra_begin_create_or_update(self, resource_group): + response = await ( + await self.client.administrators_microsoft_entra.begin_create_or_update( + resource_group_name=resource_group.name, + server_name="str", + object_id="str", + parameters={"properties": {"principalName": "str", "principalType": "str", "tenantId": "str"}}, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_administrators_microsoft_entra_begin_delete(self, resource_group): + response = await ( + await self.client.administrators_microsoft_entra.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + object_id="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_administrators_microsoft_entra_list_by_server(self, resource_group): + response = self.client.administrators_microsoft_entra.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_advanced_threat_protection_settings_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_advanced_threat_protection_settings_operations.py new file mode 100644 index 000000000000..b60433d931ed --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_advanced_threat_protection_settings_operations.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementAdvancedThreatProtectionSettingsOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_advanced_threat_protection_settings_get(self, resource_group): + response = self.client.advanced_threat_protection_settings.get( + resource_group_name=resource_group.name, + server_name="str", + threat_protection_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_advanced_threat_protection_settings_list_by_server(self, resource_group): + response = self.client.advanced_threat_protection_settings.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_advanced_threat_protection_settings_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_advanced_threat_protection_settings_operations_async.py new file mode 100644 index 000000000000..82f927a3c379 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_advanced_threat_protection_settings_operations_async.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementAdvancedThreatProtectionSettingsOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_advanced_threat_protection_settings_get(self, resource_group): + response = await self.client.advanced_threat_protection_settings.get( + resource_group_name=resource_group.name, + server_name="str", + threat_protection_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_advanced_threat_protection_settings_list_by_server(self, resource_group): + response = self.client.advanced_threat_protection_settings.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_automatic_and_on_demand_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_automatic_and_on_demand_operations.py new file mode 100644 index 000000000000..a18547cb0ef0 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_automatic_and_on_demand_operations.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementBackupsAutomaticAndOnDemandOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_backups_automatic_and_on_demand_get(self, resource_group): + response = self.client.backups_automatic_and_on_demand.get( + resource_group_name=resource_group.name, + server_name="str", + backup_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_backups_automatic_and_on_demand_begin_create(self, resource_group): + response = self.client.backups_automatic_and_on_demand.begin_create( + resource_group_name=resource_group.name, + server_name="str", + backup_name="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_backups_automatic_and_on_demand_begin_delete(self, resource_group): + response = self.client.backups_automatic_and_on_demand.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + backup_name="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_backups_automatic_and_on_demand_list_by_server(self, resource_group): + response = self.client.backups_automatic_and_on_demand.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_automatic_and_on_demand_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_automatic_and_on_demand_operations_async.py new file mode 100644 index 000000000000..5e99cc39df54 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_automatic_and_on_demand_operations_async.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementBackupsAutomaticAndOnDemandOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_backups_automatic_and_on_demand_get(self, resource_group): + response = await self.client.backups_automatic_and_on_demand.get( + resource_group_name=resource_group.name, + server_name="str", + backup_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_backups_automatic_and_on_demand_begin_create(self, resource_group): + response = await ( + await self.client.backups_automatic_and_on_demand.begin_create( + resource_group_name=resource_group.name, + server_name="str", + backup_name="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_backups_automatic_and_on_demand_begin_delete(self, resource_group): + response = await ( + await self.client.backups_automatic_and_on_demand.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + backup_name="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_backups_automatic_and_on_demand_list_by_server(self, resource_group): + response = self.client.backups_automatic_and_on_demand.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_long_term_retention_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_long_term_retention_operations.py new file mode 100644 index 000000000000..0c76e653668a --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_long_term_retention_operations.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementBackupsLongTermRetentionOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_backups_long_term_retention_check_prerequisites(self, resource_group): + response = self.client.backups_long_term_retention.check_prerequisites( + resource_group_name=resource_group.name, + server_name="str", + parameters={"backupSettings": {"backupName": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_backups_long_term_retention_begin_start(self, resource_group): + response = self.client.backups_long_term_retention.begin_start( + resource_group_name=resource_group.name, + server_name="str", + parameters={"backupSettings": {"backupName": "str"}, "targetDetails": {"sasUriList": ["str"]}}, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_backups_long_term_retention_get(self, resource_group): + response = self.client.backups_long_term_retention.get( + resource_group_name=resource_group.name, + server_name="str", + backup_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_backups_long_term_retention_list_by_server(self, resource_group): + response = self.client.backups_long_term_retention.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_long_term_retention_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_long_term_retention_operations_async.py new file mode 100644 index 000000000000..878312144e30 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_backups_long_term_retention_operations_async.py @@ -0,0 +1,69 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementBackupsLongTermRetentionOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_backups_long_term_retention_check_prerequisites(self, resource_group): + response = await self.client.backups_long_term_retention.check_prerequisites( + resource_group_name=resource_group.name, + server_name="str", + parameters={"backupSettings": {"backupName": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_backups_long_term_retention_begin_start(self, resource_group): + response = await ( + await self.client.backups_long_term_retention.begin_start( + resource_group_name=resource_group.name, + server_name="str", + parameters={"backupSettings": {"backupName": "str"}, "targetDetails": {"sasUriList": ["str"]}}, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_backups_long_term_retention_get(self, resource_group): + response = await self.client.backups_long_term_retention.get( + resource_group_name=resource_group.name, + server_name="str", + backup_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_backups_long_term_retention_list_by_server(self, resource_group): + response = self.client.backups_long_term_retention.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_location_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_location_operations.py new file mode 100644 index 000000000000..92281754f2e8 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_location_operations.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementCapabilitiesByLocationOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_capabilities_by_location_list(self, resource_group): + response = self.client.capabilities_by_location.list( + location_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_location_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_location_operations_async.py new file mode 100644 index 000000000000..1d05d6cbcd0f --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_location_operations_async.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementCapabilitiesByLocationOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_capabilities_by_location_list(self, resource_group): + response = self.client.capabilities_by_location.list( + location_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_server_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_server_operations.py new file mode 100644 index 000000000000..2aeba26b39cf --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_server_operations.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementCapabilitiesByServerOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_capabilities_by_server_list(self, resource_group): + response = self.client.capabilities_by_server.list( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_server_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_server_operations_async.py new file mode 100644 index 000000000000..b421c3515750 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_capabilities_by_server_operations_async.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementCapabilitiesByServerOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_capabilities_by_server_list(self, resource_group): + response = self.client.capabilities_by_server.list( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_captured_logs_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_captured_logs_operations.py new file mode 100644 index 000000000000..1cdb11bfaab8 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_captured_logs_operations.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementCapturedLogsOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_captured_logs_list_by_server(self, resource_group): + response = self.client.captured_logs.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_captured_logs_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_captured_logs_operations_async.py new file mode 100644 index 000000000000..4316ec83a9a6 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_captured_logs_operations_async.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementCapturedLogsOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_captured_logs_list_by_server(self, resource_group): + response = self.client.captured_logs.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_configurations_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_configurations_operations.py new file mode 100644 index 000000000000..bba76f087d8f --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_configurations_operations.py @@ -0,0 +1,96 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementConfigurationsOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_configurations_get(self, resource_group): + response = self.client.configurations.get( + resource_group_name=resource_group.name, + server_name="str", + configuration_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_configurations_begin_put(self, resource_group): + response = self.client.configurations.begin_put( + resource_group_name=resource_group.name, + server_name="str", + configuration_name="str", + parameters={ + "properties": { + "allowedValues": "str", + "dataType": "str", + "defaultValue": "str", + "description": "str", + "documentationLink": "str", + "isConfigPendingRestart": bool, + "isDynamicConfig": bool, + "isReadOnly": bool, + "source": "str", + "unit": "str", + "value": "str", + } + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_configurations_begin_update(self, resource_group): + response = self.client.configurations.begin_update( + resource_group_name=resource_group.name, + server_name="str", + configuration_name="str", + parameters={ + "properties": { + "allowedValues": "str", + "dataType": "str", + "defaultValue": "str", + "description": "str", + "documentationLink": "str", + "isConfigPendingRestart": bool, + "isDynamicConfig": bool, + "isReadOnly": bool, + "source": "str", + "unit": "str", + "value": "str", + } + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_configurations_list_by_server(self, resource_group): + response = self.client.configurations.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_configurations_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_configurations_operations_async.py new file mode 100644 index 000000000000..784417c8eda0 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_configurations_operations_async.py @@ -0,0 +1,101 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementConfigurationsOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_configurations_get(self, resource_group): + response = await self.client.configurations.get( + resource_group_name=resource_group.name, + server_name="str", + configuration_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_configurations_begin_put(self, resource_group): + response = await ( + await self.client.configurations.begin_put( + resource_group_name=resource_group.name, + server_name="str", + configuration_name="str", + parameters={ + "properties": { + "allowedValues": "str", + "dataType": "str", + "defaultValue": "str", + "description": "str", + "documentationLink": "str", + "isConfigPendingRestart": bool, + "isDynamicConfig": bool, + "isReadOnly": bool, + "source": "str", + "unit": "str", + "value": "str", + } + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_configurations_begin_update(self, resource_group): + response = await ( + await self.client.configurations.begin_update( + resource_group_name=resource_group.name, + server_name="str", + configuration_name="str", + parameters={ + "properties": { + "allowedValues": "str", + "dataType": "str", + "defaultValue": "str", + "description": "str", + "documentationLink": "str", + "isConfigPendingRestart": bool, + "isDynamicConfig": bool, + "isReadOnly": bool, + "source": "str", + "unit": "str", + "value": "str", + } + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_configurations_list_by_server(self, resource_group): + response = self.client.configurations.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_databases_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_databases_operations.py new file mode 100644 index 000000000000..a991115ab7a1 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_databases_operations.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementDatabasesOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_databases_get(self, resource_group): + response = self.client.databases.get( + resource_group_name=resource_group.name, + server_name="str", + database_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_databases_begin_create(self, resource_group): + response = self.client.databases.begin_create( + resource_group_name=resource_group.name, + server_name="str", + database_name="str", + parameters={ + "id": "str", + "name": "str", + "properties": {"charset": "str", "collation": "str"}, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "type": "str", + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_databases_begin_delete(self, resource_group): + response = self.client.databases.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + database_name="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_databases_list_by_server(self, resource_group): + response = self.client.databases.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_databases_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_databases_operations_async.py new file mode 100644 index 000000000000..3935552b8a0e --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_databases_operations_async.py @@ -0,0 +1,85 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementDatabasesOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_databases_get(self, resource_group): + response = await self.client.databases.get( + resource_group_name=resource_group.name, + server_name="str", + database_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_databases_begin_create(self, resource_group): + response = await ( + await self.client.databases.begin_create( + resource_group_name=resource_group.name, + server_name="str", + database_name="str", + parameters={ + "id": "str", + "name": "str", + "properties": {"charset": "str", "collation": "str"}, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "type": "str", + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_databases_begin_delete(self, resource_group): + response = await ( + await self.client.databases.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + database_name="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_databases_list_by_server(self, resource_group): + response = self.client.databases.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_firewall_rules_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_firewall_rules_operations.py new file mode 100644 index 000000000000..9df9c2b6827a --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_firewall_rules_operations.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementFirewallRulesOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_firewall_rules_get(self, resource_group): + response = self.client.firewall_rules.get( + resource_group_name=resource_group.name, + server_name="str", + firewall_rule_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_firewall_rules_begin_create_or_update(self, resource_group): + response = self.client.firewall_rules.begin_create_or_update( + resource_group_name=resource_group.name, + server_name="str", + firewall_rule_name="str", + parameters={ + "properties": {"endIpAddress": "str", "startIpAddress": "str"}, + "id": "str", + "name": "str", + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "type": "str", + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_firewall_rules_begin_delete(self, resource_group): + response = self.client.firewall_rules.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + firewall_rule_name="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_firewall_rules_list_by_server(self, resource_group): + response = self.client.firewall_rules.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_firewall_rules_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_firewall_rules_operations_async.py new file mode 100644 index 000000000000..4f870aa4b812 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_firewall_rules_operations_async.py @@ -0,0 +1,85 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementFirewallRulesOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_firewall_rules_get(self, resource_group): + response = await self.client.firewall_rules.get( + resource_group_name=resource_group.name, + server_name="str", + firewall_rule_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_firewall_rules_begin_create_or_update(self, resource_group): + response = await ( + await self.client.firewall_rules.begin_create_or_update( + resource_group_name=resource_group.name, + server_name="str", + firewall_rule_name="str", + parameters={ + "properties": {"endIpAddress": "str", "startIpAddress": "str"}, + "id": "str", + "name": "str", + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "type": "str", + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_firewall_rules_begin_delete(self, resource_group): + response = await ( + await self.client.firewall_rules.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + firewall_rule_name="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_firewall_rules_list_by_server(self, resource_group): + response = self.client.firewall_rules.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_migrations_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_migrations_operations.py new file mode 100644 index 000000000000..98b859dcfb96 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_migrations_operations.py @@ -0,0 +1,218 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementMigrationsOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_migrations_get(self, resource_group): + response = self.client.migrations.get( + resource_group_name=resource_group.name, + server_name="str", + migration_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_migrations_create(self, resource_group): + response = self.client.migrations.create( + resource_group_name=resource_group.name, + server_name="str", + migration_name="str", + parameters={ + "location": "str", + "id": "str", + "name": "str", + "properties": { + "cancel": "str", + "currentStatus": { + "currentSubStateDetails": { + "currentSubState": "str", + "dbDetails": { + "str": { + "appliedChanges": 0, + "cdcDeleteCounter": 0, + "cdcInsertCounter": 0, + "cdcUpdateCounter": 0, + "databaseName": "str", + "endedOn": "2020-02-20 00:00:00", + "fullLoadCompletedTables": 0, + "fullLoadErroredTables": 0, + "fullLoadLoadingTables": 0, + "fullLoadQueuedTables": 0, + "incomingChanges": 0, + "latency": 0, + "message": "str", + "migrationOperation": "str", + "migrationState": "str", + "startedOn": "2020-02-20 00:00:00", + } + }, + "validationDetails": { + "dbLevelValidationDetails": [ + { + "databaseName": "str", + "endedOn": "2020-02-20 00:00:00", + "startedOn": "2020-02-20 00:00:00", + "summary": [ + { + "messages": [{"message": "str", "state": "str"}], + "state": "str", + "type": "str", + } + ], + } + ], + "serverLevelValidationDetails": [ + {"messages": [{"message": "str", "state": "str"}], "state": "str", "type": "str"} + ], + "status": "str", + "validationEndTimeInUtc": "2020-02-20 00:00:00", + "validationStartTimeInUtc": "2020-02-20 00:00:00", + }, + }, + "error": "str", + "state": "str", + }, + "dbsToCancelMigrationOn": ["str"], + "dbsToMigrate": ["str"], + "dbsToTriggerCutoverOn": ["str"], + "migrateRoles": "str", + "migrationId": "str", + "migrationInstanceResourceId": "str", + "migrationMode": "str", + "migrationOption": "str", + "migrationWindowEndTimeInUtc": "2020-02-20 00:00:00", + "migrationWindowStartTimeInUtc": "2020-02-20 00:00:00", + "overwriteDbsInTarget": "str", + "secretParameters": { + "adminCredentials": {"sourceServerPassword": "str", "targetServerPassword": "str"}, + "sourceServerUsername": "str", + "targetServerUsername": "str", + }, + "setupLogicalReplicationOnSourceDbIfNeeded": "str", + "sourceDbServerFullyQualifiedDomainName": "str", + "sourceDbServerMetadata": { + "location": "str", + "sku": {"name": "str", "tier": "str"}, + "storageMb": 0, + "version": "str", + }, + "sourceDbServerResourceId": "str", + "sourceType": "str", + "sslMode": "str", + "startDataMigration": "str", + "targetDbServerFullyQualifiedDomainName": "str", + "targetDbServerMetadata": { + "location": "str", + "sku": {"name": "str", "tier": "str"}, + "storageMb": 0, + "version": "str", + }, + "targetDbServerResourceId": "str", + "triggerCutover": "str", + }, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "tags": {"str": "str"}, + "type": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_migrations_update(self, resource_group): + response = self.client.migrations.update( + resource_group_name=resource_group.name, + server_name="str", + migration_name="str", + parameters={ + "properties": { + "cancel": "str", + "dbsToCancelMigrationOn": ["str"], + "dbsToMigrate": ["str"], + "dbsToTriggerCutoverOn": ["str"], + "migrateRoles": "str", + "migrationMode": "str", + "migrationWindowStartTimeInUtc": "2020-02-20 00:00:00", + "overwriteDbsInTarget": "str", + "secretParameters": { + "adminCredentials": {"sourceServerPassword": "str", "targetServerPassword": "str"}, + "sourceServerUsername": "str", + "targetServerUsername": "str", + }, + "setupLogicalReplicationOnSourceDbIfNeeded": "str", + "sourceDbServerFullyQualifiedDomainName": "str", + "sourceDbServerResourceId": "str", + "startDataMigration": "str", + "targetDbServerFullyQualifiedDomainName": "str", + "triggerCutover": "str", + }, + "tags": {"str": "str"}, + }, + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_migrations_cancel(self, resource_group): + response = self.client.migrations.cancel( + resource_group_name=resource_group.name, + server_name="str", + migration_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_migrations_list_by_target_server(self, resource_group): + response = self.client.migrations.list_by_target_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_migrations_check_name_availability(self, resource_group): + response = self.client.migrations.check_name_availability( + resource_group_name=resource_group.name, + server_name="str", + parameters={"name": "str", "type": "str", "message": "str", "nameAvailable": bool, "reason": "str"}, + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_migrations_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_migrations_operations_async.py new file mode 100644 index 000000000000..808bc248071a --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_migrations_operations_async.py @@ -0,0 +1,219 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementMigrationsOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_migrations_get(self, resource_group): + response = await self.client.migrations.get( + resource_group_name=resource_group.name, + server_name="str", + migration_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_migrations_create(self, resource_group): + response = await self.client.migrations.create( + resource_group_name=resource_group.name, + server_name="str", + migration_name="str", + parameters={ + "location": "str", + "id": "str", + "name": "str", + "properties": { + "cancel": "str", + "currentStatus": { + "currentSubStateDetails": { + "currentSubState": "str", + "dbDetails": { + "str": { + "appliedChanges": 0, + "cdcDeleteCounter": 0, + "cdcInsertCounter": 0, + "cdcUpdateCounter": 0, + "databaseName": "str", + "endedOn": "2020-02-20 00:00:00", + "fullLoadCompletedTables": 0, + "fullLoadErroredTables": 0, + "fullLoadLoadingTables": 0, + "fullLoadQueuedTables": 0, + "incomingChanges": 0, + "latency": 0, + "message": "str", + "migrationOperation": "str", + "migrationState": "str", + "startedOn": "2020-02-20 00:00:00", + } + }, + "validationDetails": { + "dbLevelValidationDetails": [ + { + "databaseName": "str", + "endedOn": "2020-02-20 00:00:00", + "startedOn": "2020-02-20 00:00:00", + "summary": [ + { + "messages": [{"message": "str", "state": "str"}], + "state": "str", + "type": "str", + } + ], + } + ], + "serverLevelValidationDetails": [ + {"messages": [{"message": "str", "state": "str"}], "state": "str", "type": "str"} + ], + "status": "str", + "validationEndTimeInUtc": "2020-02-20 00:00:00", + "validationStartTimeInUtc": "2020-02-20 00:00:00", + }, + }, + "error": "str", + "state": "str", + }, + "dbsToCancelMigrationOn": ["str"], + "dbsToMigrate": ["str"], + "dbsToTriggerCutoverOn": ["str"], + "migrateRoles": "str", + "migrationId": "str", + "migrationInstanceResourceId": "str", + "migrationMode": "str", + "migrationOption": "str", + "migrationWindowEndTimeInUtc": "2020-02-20 00:00:00", + "migrationWindowStartTimeInUtc": "2020-02-20 00:00:00", + "overwriteDbsInTarget": "str", + "secretParameters": { + "adminCredentials": {"sourceServerPassword": "str", "targetServerPassword": "str"}, + "sourceServerUsername": "str", + "targetServerUsername": "str", + }, + "setupLogicalReplicationOnSourceDbIfNeeded": "str", + "sourceDbServerFullyQualifiedDomainName": "str", + "sourceDbServerMetadata": { + "location": "str", + "sku": {"name": "str", "tier": "str"}, + "storageMb": 0, + "version": "str", + }, + "sourceDbServerResourceId": "str", + "sourceType": "str", + "sslMode": "str", + "startDataMigration": "str", + "targetDbServerFullyQualifiedDomainName": "str", + "targetDbServerMetadata": { + "location": "str", + "sku": {"name": "str", "tier": "str"}, + "storageMb": 0, + "version": "str", + }, + "targetDbServerResourceId": "str", + "triggerCutover": "str", + }, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "tags": {"str": "str"}, + "type": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_migrations_update(self, resource_group): + response = await self.client.migrations.update( + resource_group_name=resource_group.name, + server_name="str", + migration_name="str", + parameters={ + "properties": { + "cancel": "str", + "dbsToCancelMigrationOn": ["str"], + "dbsToMigrate": ["str"], + "dbsToTriggerCutoverOn": ["str"], + "migrateRoles": "str", + "migrationMode": "str", + "migrationWindowStartTimeInUtc": "2020-02-20 00:00:00", + "overwriteDbsInTarget": "str", + "secretParameters": { + "adminCredentials": {"sourceServerPassword": "str", "targetServerPassword": "str"}, + "sourceServerUsername": "str", + "targetServerUsername": "str", + }, + "setupLogicalReplicationOnSourceDbIfNeeded": "str", + "sourceDbServerFullyQualifiedDomainName": "str", + "sourceDbServerResourceId": "str", + "startDataMigration": "str", + "targetDbServerFullyQualifiedDomainName": "str", + "triggerCutover": "str", + }, + "tags": {"str": "str"}, + }, + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_migrations_cancel(self, resource_group): + response = await self.client.migrations.cancel( + resource_group_name=resource_group.name, + server_name="str", + migration_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_migrations_list_by_target_server(self, resource_group): + response = self.client.migrations.list_by_target_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_migrations_check_name_availability(self, resource_group): + response = await self.client.migrations.check_name_availability( + resource_group_name=resource_group.name, + server_name="str", + parameters={"name": "str", "type": "str", "message": "str", "nameAvailable": bool, "reason": "str"}, + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_name_availability_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_name_availability_operations.py new file mode 100644 index 000000000000..06218269765a --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_name_availability_operations.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementNameAvailabilityOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_name_availability_check_globally(self, resource_group): + response = self.client.name_availability.check_globally( + parameters={"name": "str", "type": "str"}, + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_name_availability_check_with_location(self, resource_group): + response = self.client.name_availability.check_with_location( + location_name="str", + parameters={"name": "str", "type": "str"}, + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_name_availability_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_name_availability_operations_async.py new file mode 100644 index 000000000000..8f29e80e7024 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_name_availability_operations_async.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementNameAvailabilityOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_name_availability_check_globally(self, resource_group): + response = await self.client.name_availability.check_globally( + parameters={"name": "str", "type": "str"}, + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_name_availability_check_with_location(self, resource_group): + response = await self.client.name_availability.check_with_location( + location_name="str", + parameters={"name": "str", "type": "str"}, + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_operations.py new file mode 100644 index 000000000000..45945d3be654 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_operations.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_operations_list(self, resource_group): + response = self.client.operations.list() + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_operations_async.py new file mode 100644 index 000000000000..e14693598dae --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_operations_async.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_operations_list(self, resource_group): + response = self.client.operations.list() + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_dns_zone_suffix_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_dns_zone_suffix_operations.py new file mode 100644 index 000000000000..9458d2c20c52 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_dns_zone_suffix_operations.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementPrivateDnsZoneSuffixOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_private_dns_zone_suffix_get(self, resource_group): + response = self.client.private_dns_zone_suffix.get() + + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_dns_zone_suffix_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_dns_zone_suffix_operations_async.py new file mode 100644 index 000000000000..8311b8b28d18 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_dns_zone_suffix_operations_async.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementPrivateDnsZoneSuffixOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_private_dns_zone_suffix_get(self, resource_group): + response = await self.client.private_dns_zone_suffix.get() + + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_endpoint_connections_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_endpoint_connections_operations.py new file mode 100644 index 000000000000..3c845e7b17df --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_endpoint_connections_operations.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementPrivateEndpointConnectionsOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_private_endpoint_connections_get(self, resource_group): + response = self.client.private_endpoint_connections.get( + resource_group_name=resource_group.name, + server_name="str", + private_endpoint_connection_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_private_endpoint_connections_begin_update(self, resource_group): + response = self.client.private_endpoint_connections.begin_update( + resource_group_name=resource_group.name, + server_name="str", + private_endpoint_connection_name="str", + parameters={ + "id": "str", + "name": "str", + "properties": { + "privateLinkServiceConnectionState": { + "actionsRequired": "str", + "description": "str", + "status": "str", + }, + "groupIds": ["str"], + "privateEndpoint": {"id": "str"}, + "provisioningState": "str", + }, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "type": "str", + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_private_endpoint_connections_begin_delete(self, resource_group): + response = self.client.private_endpoint_connections.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + private_endpoint_connection_name="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_private_endpoint_connections_list_by_server(self, resource_group): + response = self.client.private_endpoint_connections.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_endpoint_connections_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_endpoint_connections_operations_async.py new file mode 100644 index 000000000000..41f82f08c2b1 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_endpoint_connections_operations_async.py @@ -0,0 +1,94 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementPrivateEndpointConnectionsOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_private_endpoint_connections_get(self, resource_group): + response = await self.client.private_endpoint_connections.get( + resource_group_name=resource_group.name, + server_name="str", + private_endpoint_connection_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_private_endpoint_connections_begin_update(self, resource_group): + response = await ( + await self.client.private_endpoint_connections.begin_update( + resource_group_name=resource_group.name, + server_name="str", + private_endpoint_connection_name="str", + parameters={ + "id": "str", + "name": "str", + "properties": { + "privateLinkServiceConnectionState": { + "actionsRequired": "str", + "description": "str", + "status": "str", + }, + "groupIds": ["str"], + "privateEndpoint": {"id": "str"}, + "provisioningState": "str", + }, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "type": "str", + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_private_endpoint_connections_begin_delete(self, resource_group): + response = await ( + await self.client.private_endpoint_connections.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + private_endpoint_connection_name="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_private_endpoint_connections_list_by_server(self, resource_group): + response = self.client.private_endpoint_connections.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_link_resources_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_link_resources_operations.py new file mode 100644 index 000000000000..e049f406dc09 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_link_resources_operations.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementPrivateLinkResourcesOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_private_link_resources_get(self, resource_group): + response = self.client.private_link_resources.get( + resource_group_name=resource_group.name, + server_name="str", + group_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_private_link_resources_list_by_server(self, resource_group): + response = self.client.private_link_resources.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_link_resources_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_link_resources_operations_async.py new file mode 100644 index 000000000000..15036c85cfab --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_private_link_resources_operations_async.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementPrivateLinkResourcesOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_private_link_resources_get(self, resource_group): + response = await self.client.private_link_resources.get( + resource_group_name=resource_group.name, + server_name="str", + group_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_private_link_resources_list_by_server(self, resource_group): + response = self.client.private_link_resources.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_quota_usages_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_quota_usages_operations.py new file mode 100644 index 000000000000..b404e7102ae4 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_quota_usages_operations.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementQuotaUsagesOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_quota_usages_list(self, resource_group): + response = self.client.quota_usages.list( + location_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_quota_usages_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_quota_usages_operations_async.py new file mode 100644 index 000000000000..0c46430eb46a --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_quota_usages_operations_async.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementQuotaUsagesOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_quota_usages_list(self, resource_group): + response = self.client.quota_usages.list( + location_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_replicas_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_replicas_operations.py new file mode 100644 index 000000000000..513bfdd8ca59 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_replicas_operations.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementReplicasOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_replicas_list_by_server(self, resource_group): + response = self.client.replicas.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_replicas_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_replicas_operations_async.py new file mode 100644 index 000000000000..0aef6999ec87 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_replicas_operations_async.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementReplicasOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_replicas_list_by_server(self, resource_group): + response = self.client.replicas.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_server_threat_protection_settings_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_server_threat_protection_settings_operations.py new file mode 100644 index 000000000000..d0a3901935a3 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_server_threat_protection_settings_operations.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementServerThreatProtectionSettingsOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_server_threat_protection_settings_begin_create_or_update(self, resource_group): + response = self.client.server_threat_protection_settings.begin_create_or_update( + resource_group_name=resource_group.name, + server_name="str", + threat_protection_name="str", + parameters={ + "id": "str", + "name": "str", + "properties": {"state": "str", "creationTime": "2020-02-20 00:00:00"}, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "type": "str", + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_server_threat_protection_settings_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_server_threat_protection_settings_operations_async.py new file mode 100644 index 000000000000..870a1af38d29 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_server_threat_protection_settings_operations_async.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementServerThreatProtectionSettingsOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_server_threat_protection_settings_begin_create_or_update(self, resource_group): + response = await ( + await self.client.server_threat_protection_settings.begin_create_or_update( + resource_group_name=resource_group.name, + server_name="str", + threat_protection_name="str", + parameters={ + "id": "str", + "name": "str", + "properties": {"state": "str", "creationTime": "2020-02-20 00:00:00"}, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "type": "str", + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_servers_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_servers_operations.py new file mode 100644 index 000000000000..630b2c56ba33 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_servers_operations.py @@ -0,0 +1,280 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementServersOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_servers_get(self, resource_group): + response = self.client.servers.get( + resource_group_name=resource_group.name, + server_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_servers_begin_create_or_update(self, resource_group): + response = self.client.servers.begin_create_or_update( + resource_group_name=resource_group.name, + server_name="str", + parameters={ + "location": "str", + "id": "str", + "identity": { + "type": "str", + "principalId": "str", + "tenantId": "str", + "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}}, + }, + "name": "str", + "properties": { + "administratorLogin": "str", + "administratorLoginPassword": "str", + "authConfig": {"activeDirectoryAuth": "str", "passwordAuth": "str", "tenantId": "str"}, + "availabilityZone": "str", + "backup": { + "backupRetentionDays": 0, + "earliestRestoreDate": "2020-02-20 00:00:00", + "geoRedundantBackup": "str", + }, + "cluster": {"clusterSize": 0, "defaultDatabaseName": "str"}, + "createMode": "str", + "dataEncryption": { + "geoBackupEncryptionKeyStatus": "str", + "geoBackupKeyURI": "str", + "geoBackupUserAssignedIdentityId": "str", + "primaryEncryptionKeyStatus": "str", + "primaryKeyURI": "str", + "primaryUserAssignedIdentityId": "str", + "type": "str", + }, + "fullyQualifiedDomainName": "str", + "highAvailability": {"mode": "str", "standbyAvailabilityZone": "str", "state": "str"}, + "maintenanceWindow": {"customWindow": "str", "dayOfWeek": 0, "startHour": 0, "startMinute": 0}, + "minorVersion": "str", + "network": { + "delegatedSubnetResourceId": "str", + "privateDnsZoneArmResourceId": "str", + "publicNetworkAccess": "str", + }, + "pointInTimeUTC": "2020-02-20 00:00:00", + "privateEndpointConnections": [ + { + "id": "str", + "name": "str", + "properties": { + "privateLinkServiceConnectionState": { + "actionsRequired": "str", + "description": "str", + "status": "str", + }, + "groupIds": ["str"], + "privateEndpoint": {"id": "str"}, + "provisioningState": "str", + }, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "type": "str", + } + ], + "replica": { + "capacity": 0, + "promoteMode": "str", + "promoteOption": "str", + "replicationState": "str", + "role": "str", + }, + "replicaCapacity": 0, + "replicationRole": "str", + "sourceServerResourceId": "str", + "state": "str", + "storage": { + "autoGrow": "str", + "iops": 0, + "storageSizeGB": 0, + "throughput": 0, + "tier": "str", + "type": "str", + }, + "version": "str", + }, + "sku": {"name": "str", "tier": "str"}, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "tags": {"str": "str"}, + "type": "str", + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_servers_begin_update(self, resource_group): + response = self.client.servers.begin_update( + resource_group_name=resource_group.name, + server_name="str", + parameters={ + "identity": { + "type": "str", + "principalId": "str", + "tenantId": "str", + "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}}, + }, + "properties": { + "administratorLogin": "str", + "administratorLoginPassword": "str", + "authConfig": {"activeDirectoryAuth": "str", "passwordAuth": "str", "tenantId": "str"}, + "availabilityZone": "str", + "backup": { + "backupRetentionDays": 0, + "earliestRestoreDate": "2020-02-20 00:00:00", + "geoRedundantBackup": "str", + }, + "cluster": {"clusterSize": 0, "defaultDatabaseName": "str"}, + "createMode": "str", + "dataEncryption": { + "geoBackupEncryptionKeyStatus": "str", + "geoBackupKeyURI": "str", + "geoBackupUserAssignedIdentityId": "str", + "primaryEncryptionKeyStatus": "str", + "primaryKeyURI": "str", + "primaryUserAssignedIdentityId": "str", + "type": "str", + }, + "highAvailability": {"mode": "str", "standbyAvailabilityZone": "str", "state": "str"}, + "maintenanceWindow": {"customWindow": "str", "dayOfWeek": 0, "startHour": 0, "startMinute": 0}, + "network": { + "delegatedSubnetResourceId": "str", + "privateDnsZoneArmResourceId": "str", + "publicNetworkAccess": "str", + }, + "replica": { + "capacity": 0, + "promoteMode": "str", + "promoteOption": "str", + "replicationState": "str", + "role": "str", + }, + "replicationRole": "str", + "storage": { + "autoGrow": "str", + "iops": 0, + "storageSizeGB": 0, + "throughput": 0, + "tier": "str", + "type": "str", + }, + "version": "str", + }, + "sku": {"name": "str", "tier": "str"}, + "tags": {"str": "str"}, + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_servers_begin_delete(self, resource_group): + response = self.client.servers.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_servers_list_by_resource_group(self, resource_group): + response = self.client.servers.list_by_resource_group( + resource_group_name=resource_group.name, + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_servers_list_by_subscription(self, resource_group): + response = self.client.servers.list_by_subscription() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_servers_begin_restart(self, resource_group): + response = self.client.servers.begin_restart( + resource_group_name=resource_group.name, + server_name="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_servers_begin_start(self, resource_group): + response = self.client.servers.begin_start( + resource_group_name=resource_group.name, + server_name="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_servers_begin_stop(self, resource_group): + response = self.client.servers.begin_stop( + resource_group_name=resource_group.name, + server_name="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_servers_begin_migrate_network_mode(self, resource_group): + response = self.client.servers.begin_migrate_network_mode( + resource_group_name=resource_group.name, + server_name="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_servers_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_servers_operations_async.py new file mode 100644 index 000000000000..1d937cd631b7 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_servers_operations_async.py @@ -0,0 +1,295 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementServersOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_servers_get(self, resource_group): + response = await self.client.servers.get( + resource_group_name=resource_group.name, + server_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_servers_begin_create_or_update(self, resource_group): + response = await ( + await self.client.servers.begin_create_or_update( + resource_group_name=resource_group.name, + server_name="str", + parameters={ + "location": "str", + "id": "str", + "identity": { + "type": "str", + "principalId": "str", + "tenantId": "str", + "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}}, + }, + "name": "str", + "properties": { + "administratorLogin": "str", + "administratorLoginPassword": "str", + "authConfig": {"activeDirectoryAuth": "str", "passwordAuth": "str", "tenantId": "str"}, + "availabilityZone": "str", + "backup": { + "backupRetentionDays": 0, + "earliestRestoreDate": "2020-02-20 00:00:00", + "geoRedundantBackup": "str", + }, + "cluster": {"clusterSize": 0, "defaultDatabaseName": "str"}, + "createMode": "str", + "dataEncryption": { + "geoBackupEncryptionKeyStatus": "str", + "geoBackupKeyURI": "str", + "geoBackupUserAssignedIdentityId": "str", + "primaryEncryptionKeyStatus": "str", + "primaryKeyURI": "str", + "primaryUserAssignedIdentityId": "str", + "type": "str", + }, + "fullyQualifiedDomainName": "str", + "highAvailability": {"mode": "str", "standbyAvailabilityZone": "str", "state": "str"}, + "maintenanceWindow": {"customWindow": "str", "dayOfWeek": 0, "startHour": 0, "startMinute": 0}, + "minorVersion": "str", + "network": { + "delegatedSubnetResourceId": "str", + "privateDnsZoneArmResourceId": "str", + "publicNetworkAccess": "str", + }, + "pointInTimeUTC": "2020-02-20 00:00:00", + "privateEndpointConnections": [ + { + "id": "str", + "name": "str", + "properties": { + "privateLinkServiceConnectionState": { + "actionsRequired": "str", + "description": "str", + "status": "str", + }, + "groupIds": ["str"], + "privateEndpoint": {"id": "str"}, + "provisioningState": "str", + }, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "type": "str", + } + ], + "replica": { + "capacity": 0, + "promoteMode": "str", + "promoteOption": "str", + "replicationState": "str", + "role": "str", + }, + "replicaCapacity": 0, + "replicationRole": "str", + "sourceServerResourceId": "str", + "state": "str", + "storage": { + "autoGrow": "str", + "iops": 0, + "storageSizeGB": 0, + "throughput": 0, + "tier": "str", + "type": "str", + }, + "version": "str", + }, + "sku": {"name": "str", "tier": "str"}, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "tags": {"str": "str"}, + "type": "str", + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_servers_begin_update(self, resource_group): + response = await ( + await self.client.servers.begin_update( + resource_group_name=resource_group.name, + server_name="str", + parameters={ + "identity": { + "type": "str", + "principalId": "str", + "tenantId": "str", + "userAssignedIdentities": {"str": {"clientId": "str", "principalId": "str"}}, + }, + "properties": { + "administratorLogin": "str", + "administratorLoginPassword": "str", + "authConfig": {"activeDirectoryAuth": "str", "passwordAuth": "str", "tenantId": "str"}, + "availabilityZone": "str", + "backup": { + "backupRetentionDays": 0, + "earliestRestoreDate": "2020-02-20 00:00:00", + "geoRedundantBackup": "str", + }, + "cluster": {"clusterSize": 0, "defaultDatabaseName": "str"}, + "createMode": "str", + "dataEncryption": { + "geoBackupEncryptionKeyStatus": "str", + "geoBackupKeyURI": "str", + "geoBackupUserAssignedIdentityId": "str", + "primaryEncryptionKeyStatus": "str", + "primaryKeyURI": "str", + "primaryUserAssignedIdentityId": "str", + "type": "str", + }, + "highAvailability": {"mode": "str", "standbyAvailabilityZone": "str", "state": "str"}, + "maintenanceWindow": {"customWindow": "str", "dayOfWeek": 0, "startHour": 0, "startMinute": 0}, + "network": { + "delegatedSubnetResourceId": "str", + "privateDnsZoneArmResourceId": "str", + "publicNetworkAccess": "str", + }, + "replica": { + "capacity": 0, + "promoteMode": "str", + "promoteOption": "str", + "replicationState": "str", + "role": "str", + }, + "replicationRole": "str", + "storage": { + "autoGrow": "str", + "iops": 0, + "storageSizeGB": 0, + "throughput": 0, + "tier": "str", + "type": "str", + }, + "version": "str", + }, + "sku": {"name": "str", "tier": "str"}, + "tags": {"str": "str"}, + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_servers_begin_delete(self, resource_group): + response = await ( + await self.client.servers.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_servers_list_by_resource_group(self, resource_group): + response = self.client.servers.list_by_resource_group( + resource_group_name=resource_group.name, + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_servers_list_by_subscription(self, resource_group): + response = self.client.servers.list_by_subscription() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_servers_begin_restart(self, resource_group): + response = await ( + await self.client.servers.begin_restart( + resource_group_name=resource_group.name, + server_name="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_servers_begin_start(self, resource_group): + response = await ( + await self.client.servers.begin_start( + resource_group_name=resource_group.name, + server_name="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_servers_begin_stop(self, resource_group): + response = await ( + await self.client.servers.begin_stop( + resource_group_name=resource_group.name, + server_name="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_servers_begin_migrate_network_mode(self, resource_group): + response = await ( + await self.client.servers.begin_migrate_network_mode( + resource_group_name=resource_group.name, + server_name="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_tuning_options_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_tuning_options_operations.py new file mode 100644 index 000000000000..22ecee813144 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_tuning_options_operations.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementTuningOptionsOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_tuning_options_get(self, resource_group): + response = self.client.tuning_options.get( + resource_group_name=resource_group.name, + server_name="str", + tuning_option="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_tuning_options_list_by_server(self, resource_group): + response = self.client.tuning_options.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_tuning_options_list_recommendations(self, resource_group): + response = self.client.tuning_options.list_recommendations( + resource_group_name=resource_group.name, + server_name="str", + tuning_option="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_tuning_options_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_tuning_options_operations_async.py new file mode 100644 index 000000000000..941a40cf735d --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_tuning_options_operations_async.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementTuningOptionsOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_tuning_options_get(self, resource_group): + response = await self.client.tuning_options.get( + resource_group_name=resource_group.name, + server_name="str", + tuning_option="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_tuning_options_list_by_server(self, resource_group): + response = self.client.tuning_options.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_tuning_options_list_recommendations(self, resource_group): + response = self.client.tuning_options.list_recommendations( + resource_group_name=resource_group.name, + server_name="str", + tuning_option="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_endpoints_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_endpoints_operations.py new file mode 100644 index 000000000000..c5b540cc062c --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_endpoints_operations.py @@ -0,0 +1,93 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementVirtualEndpointsOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_virtual_endpoints_get(self, resource_group): + response = self.client.virtual_endpoints.get( + resource_group_name=resource_group.name, + server_name="str", + virtual_endpoint_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_virtual_endpoints_begin_create(self, resource_group): + response = self.client.virtual_endpoints.begin_create( + resource_group_name=resource_group.name, + server_name="str", + virtual_endpoint_name="str", + parameters={ + "id": "str", + "name": "str", + "properties": {"endpointType": "str", "members": ["str"], "virtualEndpoints": ["str"]}, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "type": "str", + }, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_virtual_endpoints_begin_update(self, resource_group): + response = self.client.virtual_endpoints.begin_update( + resource_group_name=resource_group.name, + server_name="str", + virtual_endpoint_name="str", + parameters={"properties": {"endpointType": "str", "members": ["str"], "virtualEndpoints": ["str"]}}, + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_virtual_endpoints_begin_delete(self, resource_group): + response = self.client.virtual_endpoints.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + virtual_endpoint_name="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_virtual_endpoints_list_by_server(self, resource_group): + response = self.client.virtual_endpoints.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_endpoints_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_endpoints_operations_async.py new file mode 100644 index 000000000000..dc4a5c49feb1 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_endpoints_operations_async.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementVirtualEndpointsOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_virtual_endpoints_get(self, resource_group): + response = await self.client.virtual_endpoints.get( + resource_group_name=resource_group.name, + server_name="str", + virtual_endpoint_name="str", + ) + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_virtual_endpoints_begin_create(self, resource_group): + response = await ( + await self.client.virtual_endpoints.begin_create( + resource_group_name=resource_group.name, + server_name="str", + virtual_endpoint_name="str", + parameters={ + "id": "str", + "name": "str", + "properties": {"endpointType": "str", "members": ["str"], "virtualEndpoints": ["str"]}, + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedByType": "str", + }, + "type": "str", + }, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_virtual_endpoints_begin_update(self, resource_group): + response = await ( + await self.client.virtual_endpoints.begin_update( + resource_group_name=resource_group.name, + server_name="str", + virtual_endpoint_name="str", + parameters={"properties": {"endpointType": "str", "members": ["str"], "virtualEndpoints": ["str"]}}, + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_virtual_endpoints_begin_delete(self, resource_group): + response = await ( + await self.client.virtual_endpoints.begin_delete( + resource_group_name=resource_group.name, + server_name="str", + virtual_endpoint_name="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_virtual_endpoints_list_by_server(self, resource_group): + response = self.client.virtual_endpoints.list_by_server( + resource_group_name=resource_group.name, + server_name="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_network_subnet_usage_operations.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_network_subnet_usage_operations.py new file mode 100644 index 000000000000..985369345ddc --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_network_subnet_usage_operations.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementVirtualNetworkSubnetUsageOperations(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy + def test_virtual_network_subnet_usage_list(self, resource_group): + response = self.client.virtual_network_subnet_usage.list( + location_name="str", + parameters={"virtualNetworkArmResourceId": "str"}, + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_network_subnet_usage_operations_async.py b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_network_subnet_usage_operations_async.py new file mode 100644 index 000000000000..348d23571b74 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/generated_tests/test_postgre_sql_management_virtual_network_subnet_usage_operations_async.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from azure.mgmt.postgresql.aio import PostgreSQLManagementClient + +from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer +from devtools_testutils.aio import recorded_by_proxy_async + +AZURE_LOCATION = "eastus" + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestPostgreSQLManagementVirtualNetworkSubnetUsageOperationsAsync(AzureMgmtRecordedTestCase): + def setup_method(self, method): + self.client = self.create_mgmt_client(PostgreSQLManagementClient, is_async=True) + + @RandomNameResourceGroupPreparer(location=AZURE_LOCATION) + @recorded_by_proxy_async + async def test_virtual_network_subnet_usage_list(self, resource_group): + response = await self.client.virtual_network_subnet_usage.list( + location_name="str", + parameters={"virtualNetworkArmResourceId": "str"}, + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/postgresql/azure-mgmt-postgresql/pyproject.toml b/sdk/postgresql/azure-mgmt-postgresql/pyproject.toml new file mode 100644 index 000000000000..241199139805 --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/pyproject.toml @@ -0,0 +1,86 @@ +[build-system] +requires = [ + "setuptools>=77.0.3", + "wheel", +] +build-backend = "setuptools.build_meta" + +[project] +name = "azure-mgmt-postgresql" +authors = [ + { name = "Microsoft Corporation", email = "azpysdkhelp@microsoft.com" }, +] +description = "Microsoft Azure Postgresql Management Client Library for Python" +license = "MIT" +classifiers = [ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", +] +requires-python = ">=3.9" +keywords = [ + "azure", + "azure sdk", +] +dependencies = [ + "isodate>=0.6.1", + "azure-mgmt-core>=1.6.0", + "typing-extensions>=4.6.0", +] +dynamic = [ + "version", + "readme", +] + +[project.urls] +repository = "https://github.com/Azure/azure-sdk-for-python" + +[tool.setuptools.dynamic.version] +attr = "azure.mgmt.postgresql._version.VERSION" + +[tool.setuptools.dynamic.readme] +file = [ + "README.md", + "CHANGELOG.md", +] +content-type = "text/markdown" + +[tool.setuptools.packages.find] +exclude = [ + "tests*", + "generated_tests*", + "samples*", + "generated_samples*", + "doc*", + "azure", + "azure.mgmt", +] + +[tool.setuptools.package-data] +pytyped = [ + "py.typed", +] + +[tool.azure-sdk-build] +breaking = false +pyright = false +mypy = false + +[packaging] +package_name = "azure-mgmt-postgresql" +package_nspkg = "azure-mgmt-nspkg" +package_pprint_name = "Postgresql Management" +package_doc_id = "" +is_stable = false +is_arm = true +need_msrestazure = false +need_azuremgmtcore = true +sample_link = "" +exclude_folders = "" +title = "PostgreSQLManagementClient" diff --git a/sdk/postgresql/azure-mgmt-postgresql/tsp-location.yaml b/sdk/postgresql/azure-mgmt-postgresql/tsp-location.yaml new file mode 100644 index 000000000000..bd26bc89e2ea --- /dev/null +++ b/sdk/postgresql/azure-mgmt-postgresql/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/postgresql/DBforPostgreSQL.Management +commit: 74fd6c09ef3546c7997e174cb67002b5b273f381 +repo: Azure/azure-rest-api-specs +additionalDirectories: diff --git a/sdk/postgresql/ci.yml b/sdk/postgresql/ci.yml new file mode 100644 index 000000000000..83ec28bdfbbe --- /dev/null +++ b/sdk/postgresql/ci.yml @@ -0,0 +1,34 @@ +# DO NOT EDIT THIS FILE +# This file is generated automatically and any changes will be lost. + +trigger: + branches: + include: + - main + - hotfix/* + - release/* + - restapi* + paths: + include: + - sdk/postgresql/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + - restapi* + paths: + include: + - sdk/postgresql/ + +extends: + template: ../../eng/pipelines/templates/stages/archetype-sdk-client.yml + parameters: + ServiceDirectory: postgresql + TestProxy: true + Artifacts: + - name: azure-mgmt-postgresql + safeName: azuremgmtpostgresql