diff --git a/tests/test_004_cursor.py b/tests/test_004_cursor.py
index 57549629..f7e44dac 100644
--- a/tests/test_004_cursor.py
+++ b/tests/test_004_cursor.py
@@ -869,43 +869,6 @@ def test_parametrized_insert(cursor, db_connection, data):
pytest.fail(f"Parameterized data insertion/fetch failed: {e}")
-def test_rowcount(cursor, db_connection):
- """Test rowcount after insert operations"""
- try:
- cursor.execute(
- "CREATE TABLE #pytest_test_rowcount (id INT IDENTITY(1,1) PRIMARY KEY, name NVARCHAR(100))"
- )
- db_connection.commit()
-
- cursor.execute("INSERT INTO #pytest_test_rowcount (name) VALUES ('JohnDoe1');")
- assert cursor.rowcount == 1, "Rowcount should be 1 after first insert"
-
- cursor.execute("INSERT INTO #pytest_test_rowcount (name) VALUES ('JohnDoe2');")
- assert cursor.rowcount == 1, "Rowcount should be 1 after second insert"
-
- cursor.execute("INSERT INTO #pytest_test_rowcount (name) VALUES ('JohnDoe3');")
- assert cursor.rowcount == 1, "Rowcount should be 1 after third insert"
-
- cursor.execute("""
- INSERT INTO #pytest_test_rowcount (name)
- VALUES
- ('JohnDoe4'),
- ('JohnDoe5'),
- ('JohnDoe6');
- """)
- assert cursor.rowcount == 3, "Rowcount should be 3 after inserting multiple rows"
-
- cursor.execute("SELECT * FROM #pytest_test_rowcount;")
- assert cursor.rowcount == -1, "Rowcount should be -1 after a SELECT statement"
-
- db_connection.commit()
- except Exception as e:
- pytest.fail(f"Rowcount test failed: {e}")
- finally:
- cursor.execute("DROP TABLE #pytest_test_rowcount")
- db_connection.commit()
-
-
def test_rowcount_executemany(cursor, db_connection):
"""Test rowcount after executemany operations"""
try:
@@ -3048,7 +3011,7 @@ def test_cursor_context_manager_multiple_operations(db_connection):
count = cursor.fetchone()[0]
# This should fail or return 0 since table wasn't committed
assert count == 0, "Data should not be committed automatically"
- except:
+ except Exception:
# Table doesn't exist because transaction was rolled back
pass # This is expected behavior
@@ -3117,11 +3080,8 @@ def test_execute_fetchone_chaining(cursor, db_connection):
assert row is None, "Should return None for non-existent row"
finally:
- try:
- cursor.execute("DROP TABLE #test_chaining")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_chaining")
+ db_connection.commit()
def test_execute_fetchall_chaining(cursor, db_connection):
@@ -3151,11 +3111,8 @@ def test_execute_fetchall_chaining(cursor, db_connection):
assert rows[1] == [3, "third"], "Filtered second row incorrect"
finally:
- try:
- cursor.execute("DROP TABLE #test_chaining")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_chaining")
+ db_connection.commit()
def test_execute_fetchmany_chaining(cursor, db_connection):
@@ -3185,11 +3142,8 @@ def test_execute_fetchmany_chaining(cursor, db_connection):
assert rows[1] == [2, "value_2"], "Second row incorrect"
finally:
- try:
- cursor.execute("DROP TABLE #test_chaining")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_chaining")
+ db_connection.commit()
def test_execute_rowcount_chaining(cursor, db_connection):
@@ -3227,11 +3181,8 @@ def test_execute_rowcount_chaining(cursor, db_connection):
assert count == -1, "SELECT rowcount should be -1"
finally:
- try:
- cursor.execute("DROP TABLE #test_chaining")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_chaining")
+ db_connection.commit()
def test_execute_description_chaining(cursor):
@@ -3285,11 +3236,8 @@ def test_multiple_chaining_operations(cursor, db_connection):
assert all_rows[1] == ["second"], "Second row should be 'second'"
finally:
- try:
- cursor.execute("DROP TABLE #test_multi_chain")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_multi_chain")
+ db_connection.commit()
def test_chaining_with_parameters(cursor, db_connection):
@@ -3320,11 +3268,8 @@ def test_chaining_with_parameters(cursor, db_connection):
assert rows[1] == ["Charlie", 35], "Second result should be Charlie"
finally:
- try:
- cursor.execute("DROP TABLE #test_params")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_params")
+ db_connection.commit()
def test_chaining_with_iteration(cursor, db_connection):
@@ -3361,11 +3306,8 @@ def test_chaining_with_iteration(cursor, db_connection):
), f"Filtered iteration should return: {expected_names}, got: {results}"
finally:
- try:
- cursor.execute("DROP TABLE #test_iteration")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_iteration")
+ db_connection.commit()
def test_cursor_next_functionality(cursor, db_connection):
@@ -3433,11 +3375,8 @@ def test_cursor_next_functionality(cursor, db_connection):
assert no_more_rows is None, "No more rows should return None"
finally:
- try:
- cursor.execute("DROP TABLE #test_next")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_next")
+ db_connection.commit()
def test_cursor_next_with_different_data_types(cursor, db_connection):
@@ -3491,11 +3430,8 @@ def test_cursor_next_with_different_data_types(cursor, db_connection):
assert next_row is None, "No more rows should return None"
finally:
- try:
- cursor.execute("DROP TABLE #test_next_types")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_next_types")
+ db_connection.commit()
def test_cursor_next_error_conditions(cursor, db_connection):
@@ -3580,11 +3516,8 @@ def test_future_iterator_protocol_compatibility(cursor, db_connection):
assert results2 == expected2, f"Chained results should be {expected2}, got {results2}"
finally:
- try:
- cursor.execute("DROP TABLE #test_future_iter")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_future_iter")
+ db_connection.commit()
def test_chaining_error_handling(cursor):
@@ -3634,11 +3567,8 @@ def test_chaining_performance_statement_reuse(cursor, db_connection):
assert rows[2] == [3, "third"], "Third row incorrect"
finally:
- try:
- cursor.execute("DROP TABLE #test_reuse")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_reuse")
+ db_connection.commit()
def test_execute_chaining_compatibility_examples(cursor, db_connection):
@@ -3698,11 +3628,8 @@ def test_execute_chaining_compatibility_examples(cursor, db_connection):
assert final_count == 1, "Should have 1 user remaining"
finally:
- try:
- cursor.execute("DROP TABLE #users")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #users")
+ db_connection.commit()
def test_rownumber_basic_functionality(cursor, db_connection):
@@ -3762,11 +3689,8 @@ def test_rownumber_basic_functionality(cursor, db_connection):
), f"Rownumber should remain 4 after exhausting result set, got {cursor.rownumber}"
finally:
- try:
- cursor.execute("DROP TABLE #test_rownumber")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_rownumber")
+ db_connection.commit()
def test_cursor_rownumber_mixed_fetches(cursor, db_connection):
@@ -3836,11 +3760,8 @@ def test_cursor_rownumber_empty_results(cursor, db_connection):
except Exception as e:
pytest.fail(f"Empty results rownumber test failed: {e}")
finally:
- try:
- cursor.execute("DROP TABLE IF EXISTS #pytest_rownumber_empty_results")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #pytest_rownumber_empty_results")
+ db_connection.commit()
def test_rownumber_warning_logged(cursor, db_connection):
@@ -3904,11 +3825,8 @@ def test_rownumber_warning_logged(cursor, db_connection):
), f"Expected rownumber 0 after fetch, got {cursor.rownumber}"
finally:
- try:
- cursor.execute("DROP TABLE #test_rownumber_log")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_rownumber_log")
+ db_connection.commit()
def test_rownumber_closed_cursor(cursor, db_connection):
@@ -3952,7 +3870,7 @@ def test_rownumber_closed_cursor(cursor, db_connection):
# Use the main cursor to clean up
cursor.execute("DROP TABLE IF EXISTS #test_rownumber_closed")
db_connection.commit()
- except:
+ except Exception:
pass
@@ -4090,11 +4008,8 @@ def test_nextset_with_different_result_sizes_safe(cursor, db_connection):
except Exception as e:
pytest.fail(f"Safe nextset() different sizes test failed: {e}")
finally:
- try:
- cursor.execute("DROP TABLE #test_nextset_sizes")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_nextset_sizes")
+ db_connection.commit()
def test_nextset_basic_functionality_only(cursor, db_connection):
@@ -4150,11 +4065,8 @@ def test_nextset_basic_functionality_only(cursor, db_connection):
except Exception as e:
pytest.fail(f"Basic nextset() test failed: {e}")
finally:
- try:
- cursor.execute("DROP TABLE #test_basic_nextset")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_basic_nextset")
+ db_connection.commit()
def test_nextset_memory_safety_check(cursor, db_connection):
@@ -4202,11 +4114,8 @@ def test_nextset_memory_safety_check(cursor, db_connection):
except Exception as e:
pytest.fail(f"Memory safety nextset() test failed: {e}")
finally:
- try:
- cursor.execute("DROP TABLE #test_nextset_memory")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #test_nextset_memory")
+ db_connection.commit()
def test_nextset_error_conditions_safe(cursor, db_connection):
@@ -4412,11 +4321,8 @@ def test_fetchval_different_data_types(cursor, db_connection):
except Exception as e:
pytest.fail(f"fetchval data types test failed: {e}")
finally:
- try:
- cursor.execute("DROP TABLE #pytest_fetchval_types")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #pytest_fetchval_types")
+ db_connection.commit()
def test_fetchval_null_values(cursor, db_connection):
@@ -4440,11 +4346,8 @@ def test_fetchval_null_values(cursor, db_connection):
except Exception as e:
pytest.fail(f"fetchval NULL values test failed: {e}")
finally:
- try:
- cursor.execute("DROP TABLE #pytest_fetchval_null")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #pytest_fetchval_null")
+ db_connection.commit()
def test_fetchval_no_results(cursor, db_connection):
@@ -4468,11 +4371,8 @@ def test_fetchval_no_results(cursor, db_connection):
except Exception as e:
pytest.fail(f"fetchval no results test failed: {e}")
finally:
- try:
- cursor.execute("DROP TABLE #pytest_fetchval_empty")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #pytest_fetchval_empty")
+ db_connection.commit()
def test_fetchval_multiple_columns(cursor, db_connection):
@@ -4502,11 +4402,8 @@ def test_fetchval_multiple_columns(cursor, db_connection):
except Exception as e:
pytest.fail(f"fetchval multiple columns test failed: {e}")
finally:
- try:
- cursor.execute("DROP TABLE #pytest_fetchval_multi")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #pytest_fetchval_multi")
+ db_connection.commit()
def test_fetchval_multiple_rows(cursor, db_connection):
@@ -4531,11 +4428,8 @@ def test_fetchval_multiple_rows(cursor, db_connection):
except Exception as e:
pytest.fail(f"fetchval multiple rows test failed: {e}")
finally:
- try:
- cursor.execute("DROP TABLE #pytest_fetchval_rows")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #pytest_fetchval_rows")
+ db_connection.commit()
def test_fetchval_method_chaining(cursor, db_connection):
@@ -4600,11 +4494,8 @@ def test_fetchval_rownumber_tracking(cursor, db_connection):
except Exception as e:
pytest.fail(f"fetchval rownumber tracking test failed: {e}")
finally:
- try:
- cursor.execute("DROP TABLE #pytest_fetchval_rownumber")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #pytest_fetchval_rownumber")
+ db_connection.commit()
def test_fetchval_aggregate_functions(cursor, db_connection):
@@ -4639,11 +4530,8 @@ def test_fetchval_aggregate_functions(cursor, db_connection):
except Exception as e:
pytest.fail(f"fetchval aggregate functions test failed: {e}")
finally:
- try:
- cursor.execute("DROP TABLE #pytest_fetchval_agg")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #pytest_fetchval_agg")
+ db_connection.commit()
def test_fetchval_empty_result_set_edge_cases(cursor, db_connection):
@@ -4727,11 +4615,8 @@ def test_fetchval_performance_common_patterns(cursor, db_connection):
except Exception as e:
pytest.fail(f"fetchval performance patterns test failed: {e}")
finally:
- try:
- cursor.execute("DROP TABLE #pytest_fetchval_perf")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP TABLE IF EXISTS #pytest_fetchval_perf")
+ db_connection.commit()
def test_cursor_commit_basic(cursor, db_connection):
@@ -4777,7 +4662,7 @@ def test_cursor_commit_basic(cursor, db_connection):
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_cursor_commit")
cursor.commit()
- except:
+ except Exception:
pass
@@ -4826,7 +4711,7 @@ def test_cursor_rollback_basic(cursor, db_connection):
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_cursor_rollback")
cursor.commit()
- except:
+ except Exception:
pass
@@ -4886,7 +4771,7 @@ def test_cursor_commit_affects_all_cursors(db_connection):
cursor1.commit()
cursor1.close()
cursor2.close()
- except:
+ except Exception:
pass
@@ -4944,7 +4829,7 @@ def test_cursor_rollback_affects_all_cursors(db_connection):
cursor1.commit()
cursor1.close()
cursor2.close()
- except:
+ except Exception:
pass
@@ -5031,7 +4916,7 @@ def test_cursor_commit_equivalent_to_connection_commit(cursor, db_connection):
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_commit_equiv")
cursor.commit()
- except:
+ except Exception:
pass
@@ -5088,7 +4973,7 @@ def test_cursor_transaction_boundary_behavior(cursor, db_connection):
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_transaction")
cursor.commit()
- except:
+ except Exception:
pass
@@ -5123,7 +5008,7 @@ def test_cursor_commit_with_method_chaining(cursor, db_connection):
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_chaining")
cursor.commit()
- except:
+ except Exception:
pass
@@ -5175,7 +5060,7 @@ def test_cursor_commit_error_scenarios(cursor, db_connection):
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_commit_errors")
cursor.commit()
- except:
+ except Exception:
pass
@@ -5228,7 +5113,7 @@ def test_cursor_commit_performance_patterns(cursor, db_connection):
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_commit_perf")
cursor.commit()
- except:
+ except Exception:
pass
@@ -5301,7 +5186,7 @@ def test_cursor_rollback_error_scenarios(cursor, db_connection, conn_str):
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_rollback_errors")
cursor.commit()
- except:
+ except Exception:
pass
@@ -5352,7 +5237,7 @@ def test_cursor_rollback_with_method_chaining(cursor, db_connection, conn_str):
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_rollback_chaining")
cursor.commit()
- except:
+ except Exception:
pass
@@ -5422,7 +5307,7 @@ def test_cursor_rollback_savepoints_simulation(cursor, db_connection):
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_rollback_savepoints")
cursor.commit()
- except:
+ except Exception:
pass
@@ -5503,7 +5388,7 @@ def test_cursor_rollback_performance_patterns(cursor, db_connection):
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_rollback_perf")
cursor.commit()
- except:
+ except Exception:
pass
@@ -5576,7 +5461,7 @@ def test_cursor_rollback_equivalent_to_connection_rollback(cursor, db_connection
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_rollback_equiv")
cursor.commit()
- except:
+ except Exception:
pass
@@ -5653,7 +5538,7 @@ def test_cursor_rollback_nested_transactions_simulation(cursor, db_connection):
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_rollback_nested")
cursor.commit()
- except:
+ except Exception:
pass
@@ -5741,7 +5626,7 @@ def test_cursor_rollback_data_consistency(cursor, db_connection):
cursor.execute("DROP TABLE #pytest_rollback_orders")
cursor.execute("DROP TABLE #pytest_rollback_customers")
cursor.commit()
- except:
+ except Exception:
pass
@@ -5819,7 +5704,7 @@ def test_cursor_rollback_large_transaction(cursor, db_connection, conn_str):
db_connection.autocommit = original_autocommit
cursor.execute("DROP TABLE #pytest_rollback_large")
cursor.commit()
- except:
+ except Exception:
pass
@@ -6812,11 +6697,8 @@ def test_sql_no_total_large_data_scenario(cursor, db_connection):
print(f"Large data test completed with expected limitation: {e}")
finally:
- try:
- cursor.execute("DROP TABLE #pytest_large_data_no_total")
- db_connection.commit()
- except:
- pass # Table might not exist if test failed early
+ cursor.execute("DROP TABLE IF EXISTS #pytest_large_data_no_total")
+ db_connection.commit() # Table might not exist if test failed early
def test_batch_fetch_empty_values_no_assertion_failure(cursor, db_connection):
@@ -8496,6014 +8378,4865 @@ def test_executemany_uuid_roundtrip_fixed_value(cursor, db_connection):
db_connection.commit()
-def test_decimal_separator_with_multiple_values(cursor, db_connection):
- """Test decimal separator with multiple different decimal values"""
- original_separator = mssql_python.getDecimalSeparator()
-
+@pytest.mark.skipif(not os.getenv("DB_CONNECTION_STRING"), reason="Requires DB_CONNECTION_STRING")
+def test_decimal_separator_fetch_regression(cursor, db_connection):
+ """
+ Test that fetchall() dealing with DECIMALS works correctly even when
+ setDecimalSeparator is set to something other than '.'
+ """
try:
- # Create test table
- cursor.execute("""
- CREATE TABLE #pytest_decimal_multi_test (
- id INT PRIMARY KEY,
- positive_value DECIMAL(10, 2),
- negative_value DECIMAL(10, 2),
- zero_value DECIMAL(10, 2),
- small_value DECIMAL(10, 4)
- )
- """)
+ # Create a temp table
+ cursor.execute("CREATE TABLE #TestDecimal (Val DECIMAL(10, 2))")
+ cursor.execute("INSERT INTO #TestDecimal VALUES (1234.56)")
+ cursor.execute("INSERT INTO #TestDecimal VALUES (78.90)")
db_connection.commit()
- # Insert test data
- cursor.execute("""
- INSERT INTO #pytest_decimal_multi_test VALUES (1, 123.45, -67.89, 0.00, 0.0001)
- """)
- db_connection.commit()
+ # Set custom separator
+ mssql_python.setDecimalSeparator(",")
- # Test with default separator first
- cursor.execute("SELECT * FROM #pytest_decimal_multi_test")
- row = cursor.fetchone()
- default_str = str(row)
- assert "123.45" in default_str, "Default positive value formatting incorrect"
- assert "-67.89" in default_str, "Default negative value formatting incorrect"
+ # Test fetchall
+ cursor.execute("SELECT Val FROM #TestDecimal ORDER BY Val")
+ rows = cursor.fetchall()
- # Change to comma separator
- mssql_python.setDecimalSeparator(",")
- cursor.execute("SELECT * FROM #pytest_decimal_multi_test")
- row = cursor.fetchone()
- comma_str = str(row)
+ # Verify fetchall results
+ assert len(rows) == 2, f"Expected 2 rows, got {len(rows)}"
+ assert isinstance(rows[0][0], decimal.Decimal), f"Expected Decimal, got {type(rows[0][0])}"
+ assert rows[0][0] == decimal.Decimal("78.90"), f"Expected 78.90, got {rows[0][0]}"
+ assert rows[1][0] == decimal.Decimal("1234.56"), f"Expected 1234.56, got {rows[1][0]}"
- # Verify comma is used in all decimal values
- assert "123,45" in comma_str, "Positive value not formatted with comma"
- assert "-67,89" in comma_str, "Negative value not formatted with comma"
- assert "0,00" in comma_str, "Zero value not formatted with comma"
- assert "0,0001" in comma_str, "Small value not formatted with comma"
+ # Verify fetchmany
+ cursor.execute("SELECT Val FROM #TestDecimal ORDER BY Val")
+ batch = cursor.fetchmany(2)
+ assert len(batch) == 2
+ assert batch[1][0] == decimal.Decimal("1234.56")
+
+ # Verify fetchone behavior is consistent
+ cursor.execute("SELECT CAST(99.99 AS DECIMAL(10,2))")
+ val = cursor.fetchone()[0]
+ assert isinstance(val, decimal.Decimal)
+ assert val == decimal.Decimal("99.99")
finally:
- # Restore original separator
- mssql_python.setDecimalSeparator(original_separator)
+ # Reset separator to default just in case
+ mssql_python.setDecimalSeparator(".")
+ try:
+ cursor.execute("DROP TABLE IF EXISTS #TestDecimal")
+ db_connection.commit()
+ except Exception:
+ pass
- # Cleanup
- cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_multi_test")
- db_connection.commit()
+def test_datetimeoffset_read_write(cursor, db_connection):
+ """Test reading and writing timezone-aware DATETIMEOFFSET values."""
+ try:
+ test_cases = [
+ # Valid timezone-aware datetimes
+ datetime(2023, 10, 26, 10, 30, 0, tzinfo=timezone(timedelta(hours=5, minutes=30))),
+ datetime(2023, 10, 27, 15, 45, 10, 123456, tzinfo=timezone(timedelta(hours=-8))),
+ datetime(2023, 10, 28, 20, 0, 5, 987654, tzinfo=timezone.utc),
+ ]
-def test_decimal_separator_calculations(cursor, db_connection):
- """Test that decimal separator doesn't affect calculations"""
- original_separator = mssql_python.getDecimalSeparator()
+ cursor.execute(
+ "CREATE TABLE #pytest_datetimeoffset_read_write (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);"
+ )
+ db_connection.commit()
- try:
- # Create test table
- cursor.execute("""
- CREATE TABLE #pytest_decimal_calc_test (
- id INT PRIMARY KEY,
- value1 DECIMAL(10, 2),
- value2 DECIMAL(10, 2)
+ insert_stmt = (
+ "INSERT INTO #pytest_datetimeoffset_read_write (id, dto_column) VALUES (?, ?);"
)
- """)
+ for i, dt in enumerate(test_cases):
+ cursor.execute(insert_stmt, i, dt)
db_connection.commit()
- # Insert test data
- cursor.execute("""
- INSERT INTO #pytest_decimal_calc_test VALUES (1, 10.25, 5.75)
- """)
+ cursor.execute("SELECT id, dto_column FROM #pytest_datetimeoffset_read_write ORDER BY id;")
+ for i, dt in enumerate(test_cases):
+ row = cursor.fetchone()
+ assert row is not None
+ fetched_id, fetched_dt = row
+ assert fetched_dt.tzinfo is not None
+ assert fetched_dt == dt
+ finally:
+ cursor.execute("DROP TABLE IF EXISTS #pytest_datetimeoffset_read_write;")
db_connection.commit()
- # Test with default separator
- cursor.execute("SELECT value1 + value2 AS sum_result FROM #pytest_decimal_calc_test")
- row = cursor.fetchone()
- assert row.sum_result == decimal.Decimal(
- "16.00"
- ), "Sum calculation incorrect with default separator"
- # Change to comma separator
- mssql_python.setDecimalSeparator(",")
+def test_datetimeoffset_max_min_offsets(cursor, db_connection):
+ """
+ Test inserting and retrieving DATETIMEOFFSET with maximum and minimum allowed offsets (+14:00 and -14:00).
+ Uses fetchone() for retrieval.
+ """
+ try:
+ cursor.execute(
+ "CREATE TABLE #pytest_datetimeoffset_read_write (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);"
+ )
+ db_connection.commit()
- # Calculations should still work correctly
- cursor.execute("SELECT value1 + value2 AS sum_result FROM #pytest_decimal_calc_test")
- row = cursor.fetchone()
- assert row.sum_result == decimal.Decimal(
- "16.00"
- ), "Sum calculation affected by separator change"
+ test_cases = [
+ (
+ 1,
+ datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone(timedelta(hours=14))),
+ ), # max offset
+ (
+ 2,
+ datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone(timedelta(hours=-14))),
+ ), # min offset
+ ]
- # But string representation should use comma
- assert "16,00" in str(row), "Sum result not formatted with comma in string representation"
+ insert_stmt = (
+ "INSERT INTO #pytest_datetimeoffset_read_write (id, dto_column) VALUES (?, ?);"
+ )
+ for row_id, dt in test_cases:
+ cursor.execute(insert_stmt, row_id, dt)
+ db_connection.commit()
- finally:
- # Restore original separator
- mssql_python.setDecimalSeparator(original_separator)
+ cursor.execute("SELECT id, dto_column FROM #pytest_datetimeoffset_read_write ORDER BY id;")
- # Cleanup
- cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_calc_test")
- db_connection.commit()
+ for expected_id, expected_dt in test_cases:
+ row = cursor.fetchone()
+ assert row is not None, f"No row fetched for id {expected_id}."
+ fetched_id, fetched_dt = row
+ assert (
+ fetched_id == expected_id
+ ), f"ID mismatch: expected {expected_id}, got {fetched_id}"
+ assert (
+ fetched_dt.tzinfo is not None
+ ), f"Fetched datetime object is naive for id {fetched_id}"
-def test_decimal_separator_function(cursor, db_connection):
- """Test decimal separator functionality with database operations"""
- # Store original value to restore after test
- original_separator = mssql_python.getDecimalSeparator()
+ assert (
+ fetched_dt == expected_dt
+ ), f"Value mismatch for id {expected_id}: expected {expected_dt}, got {fetched_dt}"
- try:
- # Create test table
- cursor.execute("""
- CREATE TABLE #pytest_decimal_separator_test (
- id INT PRIMARY KEY,
- decimal_value DECIMAL(10, 2)
- )
- """)
+ finally:
+ cursor.execute("DROP TABLE IF EXISTS #pytest_datetimeoffset_read_write;")
db_connection.commit()
- # Insert test values with default separator (.)
- test_value = decimal.Decimal("123.45")
+
+def test_datetimeoffset_invalid_offsets(cursor, db_connection):
+ """Verify driver rejects offsets beyond ±14 hours."""
+ try:
cursor.execute(
- """
- INSERT INTO #pytest_decimal_separator_test (id, decimal_value)
- VALUES (1, ?)
- """,
- [test_value],
+ "CREATE TABLE #pytest_datetimeoffset_invalid_offsets (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);"
)
db_connection.commit()
- # First test with default decimal separator (.)
- cursor.execute("SELECT id, decimal_value FROM #pytest_decimal_separator_test")
- row = cursor.fetchone()
- default_str = str(row)
- assert "123.45" in default_str, "Default separator not found in string representation"
-
- # Now change to comma separator and test string representation
- mssql_python.setDecimalSeparator(",")
- cursor.execute("SELECT id, decimal_value FROM #pytest_decimal_separator_test")
- row = cursor.fetchone()
-
- # This should format the decimal with a comma in the string representation
- comma_str = str(row)
- assert (
- "123,45" in comma_str
- ), f"Expected comma in string representation but got: {comma_str}"
+ with pytest.raises(Exception):
+ cursor.execute(
+ "INSERT INTO #pytest_datetimeoffset_invalid_offsets (id, dto_column) VALUES (?, ?);",
+ 1,
+ datetime(2025, 1, 1, 12, 0, tzinfo=timezone(timedelta(hours=15))),
+ )
+ with pytest.raises(Exception):
+ cursor.execute(
+ "INSERT INTO #pytest_datetimeoffset_invalid_offsets (id, dto_column) VALUES (?, ?);",
+ 2,
+ datetime(2025, 1, 1, 12, 0, tzinfo=timezone(timedelta(hours=-15))),
+ )
finally:
- # Restore original decimal separator
- mssql_python.setDecimalSeparator(original_separator)
-
- # Cleanup
- cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_separator_test")
+ cursor.execute("DROP TABLE IF EXISTS #pytest_datetimeoffset_invalid_offsets;")
db_connection.commit()
-def test_decimal_separator_basic_functionality():
- """Test basic decimal separator functionality without database operations"""
- # Store original value to restore after test
- original_separator = mssql_python.getDecimalSeparator()
-
+def test_datetimeoffset_dst_transitions(cursor, db_connection):
+ """
+ Test inserting and retrieving DATETIMEOFFSET values around DST transitions.
+ Ensures that driver handles DST correctly and does not crash.
+ """
try:
- # Test default value
- assert mssql_python.getDecimalSeparator() == ".", "Default decimal separator should be '.'"
-
- # Test setting to comma
- mssql_python.setDecimalSeparator(",")
- assert (
- mssql_python.getDecimalSeparator() == ","
- ), "Decimal separator should be ',' after setting"
+ cursor.execute(
+ "CREATE TABLE #pytest_datetimeoffset_dst_transitions (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);"
+ )
+ db_connection.commit()
- # Test setting to other valid separators
- mssql_python.setDecimalSeparator(":")
- assert (
- mssql_python.getDecimalSeparator() == ":"
- ), "Decimal separator should be ':' after setting"
+ # Example DST transition dates (replace with actual region offset if needed)
+ dst_test_cases = [
+ (
+ 1,
+ datetime(2025, 3, 9, 1, 59, 59, tzinfo=timezone(timedelta(hours=-5))),
+ ), # Just before spring forward
+ (
+ 2,
+ datetime(2025, 3, 9, 3, 0, 0, tzinfo=timezone(timedelta(hours=-4))),
+ ), # Just after spring forward
+ (
+ 3,
+ datetime(2025, 11, 2, 1, 59, 59, tzinfo=timezone(timedelta(hours=-4))),
+ ), # Just before fall back
+ (
+ 4,
+ datetime(2025, 11, 2, 1, 0, 0, tzinfo=timezone(timedelta(hours=-5))),
+ ), # Just after fall back
+ ]
- # Test invalid inputs
- with pytest.raises(ValueError):
- mssql_python.setDecimalSeparator("") # Empty string
+ insert_stmt = (
+ "INSERT INTO #pytest_datetimeoffset_dst_transitions (id, dto_column) VALUES (?, ?);"
+ )
+ for row_id, dt in dst_test_cases:
+ cursor.execute(insert_stmt, row_id, dt)
+ db_connection.commit()
- with pytest.raises(ValueError):
- mssql_python.setDecimalSeparator("too_long") # More than one character
+ cursor.execute(
+ "SELECT id, dto_column FROM #pytest_datetimeoffset_dst_transitions ORDER BY id;"
+ )
- with pytest.raises(ValueError):
- mssql_python.setDecimalSeparator(123) # Not a string
+ for expected_id, expected_dt in dst_test_cases:
+ row = cursor.fetchone()
+ assert row is not None, f"No row fetched for id {expected_id}."
+ fetched_id, fetched_dt = row
- finally:
- # Restore original separator
- mssql_python.setDecimalSeparator(original_separator)
+ assert (
+ fetched_id == expected_id
+ ), f"ID mismatch: expected {expected_id}, got {fetched_id}"
+ assert (
+ fetched_dt.tzinfo is not None
+ ), f"Fetched datetime object is naive for id {fetched_id}"
+ assert (
+ fetched_dt == expected_dt
+ ), f"Value mismatch for id {expected_id}: expected {expected_dt}, got {fetched_dt}"
-def test_lowercase_attribute(cursor, db_connection):
- """Test that the lowercase attribute properly converts column names to lowercase"""
+ finally:
+ cursor.execute("DROP TABLE IF EXISTS #pytest_datetimeoffset_dst_transitions;")
+ db_connection.commit()
- # Store original value to restore after test
- original_lowercase = mssql_python.lowercase
- drop_cursor = None
+def test_datetimeoffset_leap_second(cursor, db_connection):
+ """Ensure driver handles leap-second-like microsecond edge cases without crashing."""
try:
- # Create a test table with mixed-case column names
- cursor.execute("""
- CREATE TABLE #pytest_lowercase_test (
- ID INT PRIMARY KEY,
- UserName VARCHAR(50),
- EMAIL_ADDRESS VARCHAR(100),
- PhoneNumber VARCHAR(20)
+ cursor.execute(
+ "CREATE TABLE #pytest_datetimeoffset_leap_second (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);"
)
- """)
db_connection.commit()
- # Insert test data
- cursor.execute("""
- INSERT INTO #pytest_lowercase_test (ID, UserName, EMAIL_ADDRESS, PhoneNumber)
- VALUES (1, 'JohnDoe', 'john@example.com', '555-1234')
- """)
+ leap_second_sim = datetime(2023, 12, 31, 23, 59, 59, 999999, tzinfo=timezone.utc)
+ cursor.execute(
+ "INSERT INTO #pytest_datetimeoffset_leap_second (id, dto_column) VALUES (?, ?);",
+ 1,
+ leap_second_sim,
+ )
db_connection.commit()
- # First test with lowercase=False (default)
- mssql_python.lowercase = False
- cursor1 = db_connection.cursor()
- cursor1.execute("SELECT * FROM #pytest_lowercase_test")
-
- # Description column names should preserve original case
- column_names1 = [desc[0] for desc in cursor1.description]
- assert "ID" in column_names1, "Column 'ID' should be present with original case"
- assert "UserName" in column_names1, "Column 'UserName' should be present with original case"
-
- # Make sure to consume all results and close the cursor
- cursor1.fetchall()
- cursor1.close()
-
- # Now test with lowercase=True
- mssql_python.lowercase = True
- cursor2 = db_connection.cursor()
- cursor2.execute("SELECT * FROM #pytest_lowercase_test")
-
- # Description column names should be lowercase
- column_names2 = [desc[0] for desc in cursor2.description]
- assert "id" in column_names2, "Column names should be lowercase when lowercase=True"
- assert "username" in column_names2, "Column names should be lowercase when lowercase=True"
+ row = cursor.execute(
+ "SELECT dto_column FROM #pytest_datetimeoffset_leap_second;"
+ ).fetchone()
+ assert row[0].tzinfo is not None
+ finally:
+ cursor.execute("DROP TABLE IF EXISTS #pytest_datetimeoffset_leap_second;")
+ db_connection.commit()
- # Make sure to consume all results and close the cursor
- cursor2.fetchall()
- cursor2.close()
- # Create a fresh cursor for cleanup
- drop_cursor = db_connection.cursor()
+def test_datetimeoffset_malformed_input(cursor, db_connection):
+ """Verify driver raises error for invalid datetimeoffset strings."""
+ try:
+ cursor.execute(
+ "CREATE TABLE #pytest_datetimeoffset_malformed_input (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);"
+ )
+ db_connection.commit()
+ with pytest.raises(Exception):
+ cursor.execute(
+ "INSERT INTO #pytest_datetimeoffset_malformed_input (id, dto_column) VALUES (?, ?);",
+ 1,
+ "2023-13-45 25:61:00 +99:99",
+ ) # invalid string
finally:
- # Restore original value
- mssql_python.lowercase = original_lowercase
-
- try:
- # Use a separate cursor for cleanup
- if drop_cursor:
- drop_cursor.execute("DROP TABLE IF EXISTS #pytest_lowercase_test")
- db_connection.commit()
- drop_cursor.close()
- except Exception as e:
- print(f"Warning: Failed to drop test table: {e}")
-
+ cursor.execute("DROP TABLE IF EXISTS #pytest_datetimeoffset_malformed_input;")
+ db_connection.commit()
-def test_decimal_separator_function(cursor, db_connection):
- """Test decimal separator functionality with database operations"""
- # Store original value to restore after test
- original_separator = mssql_python.getDecimalSeparator()
+def test_datetimeoffset_executemany(cursor, db_connection):
+ """
+ Test the driver's ability to correctly read and write DATETIMEOFFSET data
+ using executemany, including timezone information.
+ """
try:
- # Create test table
- cursor.execute("""
- CREATE TABLE #pytest_decimal_separator_test (
- id INT PRIMARY KEY,
- decimal_value DECIMAL(10, 2)
- )
- """)
- db_connection.commit()
+ datetimeoffset_test_cases = [
+ (
+ "2023-10-26 10:30:00.0000000 +05:30",
+ datetime(
+ 2023,
+ 10,
+ 26,
+ 10,
+ 30,
+ 0,
+ 0,
+ tzinfo=timezone(timedelta(hours=5, minutes=30)),
+ ),
+ ),
+ (
+ "2023-10-27 15:45:10.1234567 -08:00",
+ datetime(
+ 2023,
+ 10,
+ 27,
+ 15,
+ 45,
+ 10,
+ 123456,
+ tzinfo=timezone(timedelta(hours=-8)),
+ ),
+ ),
+ (
+ "2023-10-28 20:00:05.9876543 +00:00",
+ datetime(2023, 10, 28, 20, 0, 5, 987654, tzinfo=timezone(timedelta(hours=0))),
+ ),
+ ]
- # Insert test values with default separator (.)
- test_value = decimal.Decimal("123.45")
+ # Create temp table
cursor.execute(
- """
- INSERT INTO #pytest_decimal_separator_test (id, decimal_value)
- VALUES (1, ?)
- """,
- [test_value],
+ "IF OBJECT_ID('tempdb..#pytest_dto', 'U') IS NOT NULL DROP TABLE #pytest_dto;"
)
+ cursor.execute("CREATE TABLE #pytest_dto (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);")
db_connection.commit()
- # First test with default decimal separator (.)
- cursor.execute("SELECT id, decimal_value FROM #pytest_decimal_separator_test")
- row = cursor.fetchone()
- default_str = str(row)
- assert "123.45" in default_str, "Default separator not found in string representation"
+ # Prepare data for executemany
+ param_list = [(i, python_dt) for i, (_, python_dt) in enumerate(datetimeoffset_test_cases)]
+ cursor.executemany("INSERT INTO #pytest_dto (id, dto_column) VALUES (?, ?);", param_list)
+ db_connection.commit()
- # Now change to comma separator and test string representation
- mssql_python.setDecimalSeparator(",")
- cursor.execute("SELECT id, decimal_value FROM #pytest_decimal_separator_test")
- row = cursor.fetchone()
+ # Read back and validate
+ cursor.execute("SELECT id, dto_column FROM #pytest_dto ORDER BY id;")
+ rows = cursor.fetchall()
- # This should format the decimal with a comma in the string representation
- comma_str = str(row)
- assert (
- "123,45" in comma_str
- ), f"Expected comma in string representation but got: {comma_str}"
+ for i, (sql_str, python_dt) in enumerate(datetimeoffset_test_cases):
+ fetched_id, fetched_dto = rows[i]
+ assert fetched_dto.tzinfo is not None, "Fetched datetime object is naive."
+ assert (
+ fetched_dto == python_dt
+ ), f"Value mismatch for id {fetched_id}: expected {python_dt}, got {fetched_dto}"
finally:
- # Restore original decimal separator
- mssql_python.setDecimalSeparator(original_separator)
-
- # Cleanup
- cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_separator_test")
+ cursor.execute(
+ "IF OBJECT_ID('tempdb..#pytest_dto', 'U') IS NOT NULL DROP TABLE #pytest_dto;"
+ )
db_connection.commit()
-def test_decimal_separator_basic_functionality():
- """Test basic decimal separator functionality without database operations"""
- # Store original value to restore after test
- original_separator = mssql_python.getDecimalSeparator()
-
+def test_datetimeoffset_execute_vs_executemany_consistency(cursor, db_connection):
+ """
+ Check that execute() and executemany() produce the same stored DATETIMEOFFSET
+ for identical timezone-aware datetime objects.
+ """
try:
- # Test default value
- assert mssql_python.getDecimalSeparator() == ".", "Default decimal separator should be '.'"
-
- # Test setting to comma
- mssql_python.setDecimalSeparator(",")
- assert (
- mssql_python.getDecimalSeparator() == ","
- ), "Decimal separator should be ',' after setting"
+ test_dt = datetime(
+ 2023,
+ 10,
+ 30,
+ 12,
+ 0,
+ 0,
+ microsecond=123456,
+ tzinfo=timezone(timedelta(hours=5, minutes=30)),
+ )
+ cursor.execute(
+ "IF OBJECT_ID('tempdb..#pytest_dto', 'U') IS NOT NULL DROP TABLE #pytest_dto;"
+ )
+ cursor.execute("CREATE TABLE #pytest_dto (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);")
+ db_connection.commit()
- # Test setting to other valid separators
- mssql_python.setDecimalSeparator(":")
- assert (
- mssql_python.getDecimalSeparator() == ":"
- ), "Decimal separator should be ':' after setting"
+ # Insert using execute()
+ cursor.execute("INSERT INTO #pytest_dto (id, dto_column) VALUES (?, ?);", 1, test_dt)
+ db_connection.commit()
- # Test invalid inputs
- with pytest.raises(ValueError):
- mssql_python.setDecimalSeparator("") # Empty string
+ # Insert using executemany()
+ cursor.executemany(
+ "INSERT INTO #pytest_dto (id, dto_column) VALUES (?, ?);", [(2, test_dt)]
+ )
+ db_connection.commit()
- with pytest.raises(ValueError):
- mssql_python.setDecimalSeparator("too_long") # More than one character
+ cursor.execute("SELECT dto_column FROM #pytest_dto ORDER BY id;")
+ rows = cursor.fetchall()
+ assert len(rows) == 2
- with pytest.raises(ValueError):
- mssql_python.setDecimalSeparator(123) # Not a string
+ # Compare textual representation to ensure binding semantics match
+ cursor.execute("SELECT CONVERT(VARCHAR(35), dto_column, 127) FROM #pytest_dto ORDER BY id;")
+ textual_rows = [r[0] for r in cursor.fetchall()]
+ assert textual_rows[0] == textual_rows[1], "execute() and executemany() results differ"
finally:
- # Restore original separator
- mssql_python.setDecimalSeparator(original_separator)
-
+ cursor.execute(
+ "IF OBJECT_ID('tempdb..#pytest_dto', 'U') IS NOT NULL DROP TABLE #pytest_dto;"
+ )
+ db_connection.commit()
-def test_decimal_separator_with_multiple_values(cursor, db_connection):
- """Test decimal separator with multiple different decimal values"""
- original_separator = mssql_python.getDecimalSeparator()
+def test_datetimeoffset_extreme_offsets(cursor, db_connection):
+ """
+ Test boundary offsets (+14:00 and -12:00) to ensure correct round-trip handling.
+ """
try:
- # Create test table
- cursor.execute("""
- CREATE TABLE #pytest_decimal_multi_test (
- id INT PRIMARY KEY,
- positive_value DECIMAL(10, 2),
- negative_value DECIMAL(10, 2),
- zero_value DECIMAL(10, 2),
- small_value DECIMAL(10, 4)
+ extreme_offsets = [
+ datetime(2023, 10, 30, 0, 0, 0, 0, tzinfo=timezone(timedelta(hours=14))),
+ datetime(2023, 10, 30, 0, 0, 0, 0, tzinfo=timezone(timedelta(hours=-12))),
+ ]
+
+ cursor.execute(
+ "IF OBJECT_ID('tempdb..#pytest_dto', 'U') IS NOT NULL DROP TABLE #pytest_dto;"
)
- """)
+ cursor.execute("CREATE TABLE #pytest_dto (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);")
db_connection.commit()
- # Insert test data
- cursor.execute("""
- INSERT INTO #pytest_decimal_multi_test VALUES (1, 123.45, -67.89, 0.00, 0.0001)
- """)
+ param_list = [(i, dt) for i, dt in enumerate(extreme_offsets)]
+ cursor.executemany("INSERT INTO #pytest_dto (id, dto_column) VALUES (?, ?);", param_list)
db_connection.commit()
- # Test with default separator first
- cursor.execute("SELECT * FROM #pytest_decimal_multi_test")
- row = cursor.fetchone()
- default_str = str(row)
- assert "123.45" in default_str, "Default positive value formatting incorrect"
- assert "-67.89" in default_str, "Default negative value formatting incorrect"
-
- # Change to comma separator
- mssql_python.setDecimalSeparator(",")
- cursor.execute("SELECT * FROM #pytest_decimal_multi_test")
- row = cursor.fetchone()
- comma_str = str(row)
-
- # Verify comma is used in all decimal values
- assert "123,45" in comma_str, "Positive value not formatted with comma"
- assert "-67,89" in comma_str, "Negative value not formatted with comma"
- assert "0,00" in comma_str, "Zero value not formatted with comma"
- assert "0,0001" in comma_str, "Small value not formatted with comma"
+ cursor.execute("SELECT id, dto_column FROM #pytest_dto ORDER BY id;")
+ rows = cursor.fetchall()
+ for i, dt in enumerate(extreme_offsets):
+ _, fetched = rows[i]
+ assert fetched.tzinfo is not None
+ assert fetched == dt, f"Value mismatch for id {i}: expected {dt}, got {fetched}"
finally:
- # Restore original separator
- mssql_python.setDecimalSeparator(original_separator)
-
- # Cleanup
- cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_multi_test")
+ cursor.execute(
+ "IF OBJECT_ID('tempdb..#pytest_dto', 'U') IS NOT NULL DROP TABLE #pytest_dto;"
+ )
db_connection.commit()
-def test_decimal_separator_calculations(cursor, db_connection):
- """Test that decimal separator doesn't affect calculations"""
- original_separator = mssql_python.getDecimalSeparator()
-
+def test_datetimeoffset_native_vs_string_simple(cursor, db_connection):
+ """
+ Replicates the user's testing scenario: fetch DATETIMEOFFSET as native datetime
+ and as string using CONVERT(nvarchar(35), ..., 121).
+ """
try:
- # Create test table
- cursor.execute("""
- CREATE TABLE #pytest_decimal_calc_test (
- id INT PRIMARY KEY,
- value1 DECIMAL(10, 2),
- value2 DECIMAL(10, 2)
+ cursor.execute(
+ "CREATE TABLE #pytest_dto_user_test (id INT PRIMARY KEY, Systime DATETIMEOFFSET);"
)
- """)
- db_connection.commit()
-
- # Insert test data
- cursor.execute("""
- INSERT INTO #pytest_decimal_calc_test VALUES (1, 10.25, 5.75)
- """)
db_connection.commit()
- # Test with default separator
- cursor.execute("SELECT value1 + value2 AS sum_result FROM #pytest_decimal_calc_test")
- row = cursor.fetchone()
- assert row.sum_result == decimal.Decimal(
- "16.00"
- ), "Sum calculation incorrect with default separator"
+ # Insert rows similar to user's example
+ test_rows = [
+ (
+ 1,
+ datetime(2025, 5, 14, 12, 35, 52, 501000, tzinfo=timezone(timedelta(hours=1))),
+ ),
+ (
+ 2,
+ datetime(
+ 2025,
+ 5,
+ 14,
+ 15,
+ 20,
+ 30,
+ 123000,
+ tzinfo=timezone(timedelta(hours=-5)),
+ ),
+ ),
+ ]
- # Change to comma separator
- mssql_python.setDecimalSeparator(",")
+ for i, dt in test_rows:
+ cursor.execute("INSERT INTO #pytest_dto_user_test (id, Systime) VALUES (?, ?);", i, dt)
+ db_connection.commit()
- # Calculations should still work correctly
- cursor.execute("SELECT value1 + value2 AS sum_result FROM #pytest_decimal_calc_test")
- row = cursor.fetchone()
- assert row.sum_result == decimal.Decimal(
- "16.00"
- ), "Sum calculation affected by separator change"
+ # Native fetch (like the user's first execute)
+ cursor.execute("SELECT Systime FROM #pytest_dto_user_test WHERE id=1;")
+ dt_native = cursor.fetchone()[0]
+ assert dt_native.tzinfo is not None
+ assert dt_native == test_rows[0][1]
- # But string representation should use comma
- assert "16,00" in str(row), "Sum result not formatted with comma in string representation"
+ # String fetch (like the user's convert to nvarchar)
+ cursor.execute(
+ "SELECT CONVERT(nvarchar(35), Systime, 121) FROM #pytest_dto_user_test WHERE id=1;"
+ )
+ dt_str = cursor.fetchone()[0]
+ assert dt_str.endswith("+01:00") # original offset preserved
finally:
- # Restore original separator
- mssql_python.setDecimalSeparator(original_separator)
-
- # Cleanup
- cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_calc_test")
+ cursor.execute("DROP TABLE IF EXISTS #pytest_dto_user_test;")
db_connection.commit()
-@pytest.mark.skipif(not os.getenv("DB_CONNECTION_STRING"), reason="Requires DB_CONNECTION_STRING")
-def test_decimal_separator_fetch_regression(cursor, db_connection):
- """
- Test that fetchall() dealing with DECIMALS works correctly even when
- setDecimalSeparator is set to something other than '.'
- """
- try:
- # Create a temp table
- cursor.execute("CREATE TABLE #TestDecimal (Val DECIMAL(10, 2))")
- cursor.execute("INSERT INTO #TestDecimal VALUES (1234.56)")
- cursor.execute("INSERT INTO #TestDecimal VALUES (78.90)")
- db_connection.commit()
-
- # Set custom separator
- mssql_python.setDecimalSeparator(",")
+def test_cursor_setinputsizes_basic(db_connection):
+ """Test the basic functionality of setinputsizes"""
- # Test fetchall
- cursor.execute("SELECT Val FROM #TestDecimal ORDER BY Val")
- rows = cursor.fetchall()
+ cursor = db_connection.cursor()
- # Verify fetchall results
- assert len(rows) == 2, f"Expected 2 rows, got {len(rows)}"
- assert isinstance(rows[0][0], decimal.Decimal), f"Expected Decimal, got {type(rows[0][0])}"
- assert rows[0][0] == decimal.Decimal("78.90"), f"Expected 78.90, got {rows[0][0]}"
- assert rows[1][0] == decimal.Decimal("1234.56"), f"Expected 1234.56, got {rows[1][0]}"
+ # Create a test table
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes")
+ cursor.execute("""
+ CREATE TABLE #test_inputsizes (
+ string_col NVARCHAR(100),
+ int_col INT
+ )
+ """)
- # Verify fetchmany
- cursor.execute("SELECT Val FROM #TestDecimal ORDER BY Val")
- batch = cursor.fetchmany(2)
- assert len(batch) == 2
- assert batch[1][0] == decimal.Decimal("1234.56")
+ # Set input sizes for parameters
+ cursor.setinputsizes([(mssql_python.SQL_WVARCHAR, 100, 0), (mssql_python.SQL_INTEGER, 0, 0)])
- # Verify fetchone behavior is consistent
- cursor.execute("SELECT CAST(99.99 AS DECIMAL(10,2))")
- val = cursor.fetchone()[0]
- assert isinstance(val, decimal.Decimal)
- assert val == decimal.Decimal("99.99")
+ # Execute with parameters
+ cursor.execute("INSERT INTO #test_inputsizes VALUES (?, ?)", "Test String", 42)
- finally:
- # Reset separator to default just in case
- mssql_python.setDecimalSeparator(".")
- try:
- cursor.execute("DROP TABLE IF EXISTS #TestDecimal")
- db_connection.commit()
- except Exception:
- pass
+ # Verify data was inserted correctly
+ cursor.execute("SELECT * FROM #test_inputsizes")
+ row = cursor.fetchone()
+ assert row[0] == "Test String"
+ assert row[1] == 42
-def test_datetimeoffset_read_write(cursor, db_connection):
- """Test reading and writing timezone-aware DATETIMEOFFSET values."""
- try:
- test_cases = [
- # Valid timezone-aware datetimes
- datetime(2023, 10, 26, 10, 30, 0, tzinfo=timezone(timedelta(hours=5, minutes=30))),
- datetime(2023, 10, 27, 15, 45, 10, 123456, tzinfo=timezone(timedelta(hours=-8))),
- datetime(2023, 10, 28, 20, 0, 5, 987654, tzinfo=timezone.utc),
- ]
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes")
- cursor.execute(
- "CREATE TABLE #pytest_datetimeoffset_read_write (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);"
- )
- db_connection.commit()
- insert_stmt = (
- "INSERT INTO #pytest_datetimeoffset_read_write (id, dto_column) VALUES (?, ?);"
- )
- for i, dt in enumerate(test_cases):
- cursor.execute(insert_stmt, i, dt)
- db_connection.commit()
+def test_cursor_setinputsizes_with_executemany_float(db_connection):
+ """Test setinputsizes with executemany using float instead of Decimal"""
- cursor.execute("SELECT id, dto_column FROM #pytest_datetimeoffset_read_write ORDER BY id;")
- for i, dt in enumerate(test_cases):
- row = cursor.fetchone()
- assert row is not None
- fetched_id, fetched_dt = row
- assert fetched_dt.tzinfo is not None
- assert fetched_dt == dt
- finally:
- cursor.execute("DROP TABLE IF EXISTS #pytest_datetimeoffset_read_write;")
- db_connection.commit()
+ cursor = db_connection.cursor()
+ # Create a test table
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_float")
+ cursor.execute("""
+ CREATE TABLE #test_inputsizes_float (
+ id INT,
+ name NVARCHAR(50),
+ price REAL /* Use REAL instead of DECIMAL */
+ )
+ """)
-def test_datetimeoffset_max_min_offsets(cursor, db_connection):
- """
- Test inserting and retrieving DATETIMEOFFSET with maximum and minimum allowed offsets (+14:00 and -14:00).
- Uses fetchone() for retrieval.
- """
- try:
- cursor.execute(
- "CREATE TABLE #pytest_datetimeoffset_read_write (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);"
- )
- db_connection.commit()
+ # Prepare data with float values
+ data = [(1, "Item 1", 10.99), (2, "Item 2", 20.50), (3, "Item 3", 30.75)]
- test_cases = [
- (
- 1,
- datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone(timedelta(hours=14))),
- ), # max offset
- (
- 2,
- datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone(timedelta(hours=-14))),
- ), # min offset
+ # Set input sizes for parameters
+ cursor.setinputsizes(
+ [
+ (mssql_python.SQL_INTEGER, 0, 0),
+ (mssql_python.SQL_WVARCHAR, 50, 0),
+ (mssql_python.SQL_REAL, 0, 0),
]
+ )
- insert_stmt = (
- "INSERT INTO #pytest_datetimeoffset_read_write (id, dto_column) VALUES (?, ?);"
- )
- for row_id, dt in test_cases:
- cursor.execute(insert_stmt, row_id, dt)
- db_connection.commit()
-
- cursor.execute("SELECT id, dto_column FROM #pytest_datetimeoffset_read_write ORDER BY id;")
+ # Execute with parameters
+ cursor.executemany("INSERT INTO #test_inputsizes_float VALUES (?, ?, ?)", data)
- for expected_id, expected_dt in test_cases:
- row = cursor.fetchone()
- assert row is not None, f"No row fetched for id {expected_id}."
- fetched_id, fetched_dt = row
+ # Verify all data was inserted correctly
+ cursor.execute("SELECT * FROM #test_inputsizes_float ORDER BY id")
+ rows = cursor.fetchall()
- assert (
- fetched_id == expected_id
- ), f"ID mismatch: expected {expected_id}, got {fetched_id}"
- assert (
- fetched_dt.tzinfo is not None
- ), f"Fetched datetime object is naive for id {fetched_id}"
+ assert len(rows) == 3
+ assert rows[0][0] == 1
+ assert rows[0][1] == "Item 1"
+ assert abs(rows[0][2] - 10.99) < 0.001
- assert (
- fetched_dt == expected_dt
- ), f"Value mismatch for id {expected_id}: expected {expected_dt}, got {fetched_dt}"
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_float")
- finally:
- cursor.execute("DROP TABLE IF EXISTS #pytest_datetimeoffset_read_write;")
- db_connection.commit()
+def test_cursor_setinputsizes_reset(db_connection):
+ """Test that setinputsizes is reset after execution"""
-def test_datetimeoffset_invalid_offsets(cursor, db_connection):
- """Verify driver rejects offsets beyond ±14 hours."""
- try:
- cursor.execute(
- "CREATE TABLE #pytest_datetimeoffset_invalid_offsets (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);"
- )
- db_connection.commit()
+ cursor = db_connection.cursor()
- with pytest.raises(Exception):
- cursor.execute(
- "INSERT INTO #pytest_datetimeoffset_invalid_offsets (id, dto_column) VALUES (?, ?);",
- 1,
- datetime(2025, 1, 1, 12, 0, tzinfo=timezone(timedelta(hours=15))),
- )
+ # Create a test table
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_reset")
+ cursor.execute("""
+ CREATE TABLE #test_inputsizes_reset (
+ col1 NVARCHAR(100),
+ col2 INT
+ )
+ """)
- with pytest.raises(Exception):
- cursor.execute(
- "INSERT INTO #pytest_datetimeoffset_invalid_offsets (id, dto_column) VALUES (?, ?);",
- 2,
- datetime(2025, 1, 1, 12, 0, tzinfo=timezone(timedelta(hours=-15))),
- )
- finally:
- cursor.execute("DROP TABLE IF EXISTS #pytest_datetimeoffset_invalid_offsets;")
- db_connection.commit()
+ # Set input sizes for parameters
+ cursor.setinputsizes([(mssql_python.SQL_WVARCHAR, 100, 0), (mssql_python.SQL_INTEGER, 0, 0)])
+ # Execute with parameters
+ cursor.execute("INSERT INTO #test_inputsizes_reset VALUES (?, ?)", "Test String", 42)
-def test_datetimeoffset_dst_transitions(cursor, db_connection):
- """
- Test inserting and retrieving DATETIMEOFFSET values around DST transitions.
- Ensures that driver handles DST correctly and does not crash.
- """
- try:
- cursor.execute(
- "CREATE TABLE #pytest_datetimeoffset_dst_transitions (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);"
- )
- db_connection.commit()
+ # Verify inputsizes was reset
+ assert cursor._inputsizes is None
- # Example DST transition dates (replace with actual region offset if needed)
- dst_test_cases = [
- (
- 1,
- datetime(2025, 3, 9, 1, 59, 59, tzinfo=timezone(timedelta(hours=-5))),
- ), # Just before spring forward
- (
- 2,
- datetime(2025, 3, 9, 3, 0, 0, tzinfo=timezone(timedelta(hours=-4))),
- ), # Just after spring forward
- (
- 3,
- datetime(2025, 11, 2, 1, 59, 59, tzinfo=timezone(timedelta(hours=-4))),
- ), # Just before fall back
- (
- 4,
- datetime(2025, 11, 2, 1, 0, 0, tzinfo=timezone(timedelta(hours=-5))),
- ), # Just after fall back
- ]
+ # Now execute again without setting input sizes
+ cursor.execute("INSERT INTO #test_inputsizes_reset VALUES (?, ?)", "Another String", 84)
- insert_stmt = (
- "INSERT INTO #pytest_datetimeoffset_dst_transitions (id, dto_column) VALUES (?, ?);"
- )
- for row_id, dt in dst_test_cases:
- cursor.execute(insert_stmt, row_id, dt)
- db_connection.commit()
+ # Verify both rows were inserted correctly
+ cursor.execute("SELECT * FROM #test_inputsizes_reset ORDER BY col2")
+ rows = cursor.fetchall()
- cursor.execute(
- "SELECT id, dto_column FROM #pytest_datetimeoffset_dst_transitions ORDER BY id;"
- )
+ assert len(rows) == 2
+ assert rows[0][0] == "Test String"
+ assert rows[0][1] == 42
+ assert rows[1][0] == "Another String"
+ assert rows[1][1] == 84
- for expected_id, expected_dt in dst_test_cases:
- row = cursor.fetchone()
- assert row is not None, f"No row fetched for id {expected_id}."
- fetched_id, fetched_dt = row
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_reset")
- assert (
- fetched_id == expected_id
- ), f"ID mismatch: expected {expected_id}, got {fetched_id}"
- assert (
- fetched_dt.tzinfo is not None
- ), f"Fetched datetime object is naive for id {fetched_id}"
- assert (
- fetched_dt == expected_dt
- ), f"Value mismatch for id {expected_id}: expected {expected_dt}, got {fetched_dt}"
+def test_cursor_setinputsizes_override_inference(db_connection):
+ """Test that setinputsizes overrides type inference"""
- finally:
- cursor.execute("DROP TABLE IF EXISTS #pytest_datetimeoffset_dst_transitions;")
- db_connection.commit()
+ cursor = db_connection.cursor()
+
+ # Create a test table with specific types
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_override")
+ cursor.execute("""
+ CREATE TABLE #test_inputsizes_override (
+ small_int SMALLINT,
+ big_text NVARCHAR(MAX)
+ )
+ """)
+ # Set input sizes that override the default inference
+ # For SMALLINT, use a valid precision value (5 is typical for SMALLINT)
+ cursor.setinputsizes(
+ [
+ (mssql_python.SQL_SMALLINT, 5, 0), # Use valid precision for SMALLINT
+ (mssql_python.SQL_WVARCHAR, 8000, 0), # Force short string to NVARCHAR(MAX)
+ ]
+ )
+
+ # Test with values that would normally be inferred differently
+ big_number = 30000 # Would normally be INTEGER or BIGINT
+ short_text = "abc" # Would normally be a regular NVARCHAR
-def test_datetimeoffset_leap_second(cursor, db_connection):
- """Ensure driver handles leap-second-like microsecond edge cases without crashing."""
try:
cursor.execute(
- "CREATE TABLE #pytest_datetimeoffset_leap_second (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);"
+ "INSERT INTO #test_inputsizes_override VALUES (?, ?)",
+ big_number,
+ short_text,
)
- db_connection.commit()
- leap_second_sim = datetime(2023, 12, 31, 23, 59, 59, 999999, tzinfo=timezone.utc)
+ # Verify the row was inserted (may have been truncated by SQL Server)
+ cursor.execute("SELECT * FROM #test_inputsizes_override")
+ row = cursor.fetchone()
+
+ # SQL Server would either truncate or round the value
+ assert row[1] == short_text
+
+ except Exception as e:
+ # If an exception occurs, it should be related to the data type conversion
+ # Add "invalid precision" to the expected error messages
+ error_text = str(e).lower()
+ assert any(
+ text in error_text
+ for text in [
+ "overflow",
+ "out of range",
+ "convert",
+ "invalid precision",
+ "precision value",
+ ]
+ ), f"Unexpected error: {e}"
+
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_override")
+
+
+def test_setinputsizes_parameter_count_mismatch_fewer(db_connection):
+ """Test setinputsizes with fewer sizes than parameters"""
+ import warnings
+
+ cursor = db_connection.cursor()
+
+ # Create a test table
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_mismatch")
+ cursor.execute("""
+ CREATE TABLE #test_inputsizes_mismatch (
+ col1 INT,
+ col2 NVARCHAR(100),
+ col3 FLOAT
+ )
+ """)
+
+ # Set fewer input sizes than parameters
+ cursor.setinputsizes(
+ [
+ (mssql_python.SQL_INTEGER, 0, 0),
+ (mssql_python.SQL_WVARCHAR, 100, 0),
+ # Missing third parameter type
+ ]
+ )
+
+ # Execute with more parameters than specified input sizes
+ # This should use automatic type inference for the third parameter
+ with warnings.catch_warnings(record=True) as w:
cursor.execute(
- "INSERT INTO #pytest_datetimeoffset_leap_second (id, dto_column) VALUES (?, ?);",
+ "INSERT INTO #test_inputsizes_mismatch VALUES (?, ?, ?)",
1,
- leap_second_sim,
+ "Test String",
+ 3.14,
)
- db_connection.commit()
+ assert len(w) > 0, "Warning should be issued for parameter count mismatch"
+ assert "number of input sizes" in str(w[0].message).lower()
- row = cursor.execute(
- "SELECT dto_column FROM #pytest_datetimeoffset_leap_second;"
- ).fetchone()
- assert row[0].tzinfo is not None
- finally:
- cursor.execute("DROP TABLE IF EXISTS #pytest_datetimeoffset_leap_second;")
- db_connection.commit()
+ # Verify data was inserted correctly
+ cursor.execute("SELECT * FROM #test_inputsizes_mismatch")
+ row = cursor.fetchone()
+ assert row[0] == 1
+ assert row[1] == "Test String"
+ assert abs(row[2] - 3.14) < 0.0001
-def test_datetimeoffset_malformed_input(cursor, db_connection):
- """Verify driver raises error for invalid datetimeoffset strings."""
- try:
- cursor.execute(
- "CREATE TABLE #pytest_datetimeoffset_malformed_input (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);"
- )
- db_connection.commit()
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_mismatch")
- with pytest.raises(Exception):
- cursor.execute(
- "INSERT INTO #pytest_datetimeoffset_malformed_input (id, dto_column) VALUES (?, ?);",
- 1,
- "2023-13-45 25:61:00 +99:99",
- ) # invalid string
- finally:
- cursor.execute("DROP TABLE IF EXISTS #pytest_datetimeoffset_malformed_input;")
- db_connection.commit()
+def test_setinputsizes_parameter_count_mismatch_more(db_connection):
+ """Test setinputsizes with more sizes than parameters"""
+ import warnings
-def test_datetimeoffset_executemany(cursor, db_connection):
- """
- Test the driver's ability to correctly read and write DATETIMEOFFSET data
- using executemany, including timezone information.
- """
- try:
- datetimeoffset_test_cases = [
- (
- "2023-10-26 10:30:00.0000000 +05:30",
- datetime(
- 2023,
- 10,
- 26,
- 10,
- 30,
- 0,
- 0,
- tzinfo=timezone(timedelta(hours=5, minutes=30)),
- ),
- ),
- (
- "2023-10-27 15:45:10.1234567 -08:00",
- datetime(
- 2023,
- 10,
- 27,
- 15,
- 45,
- 10,
- 123456,
- tzinfo=timezone(timedelta(hours=-8)),
- ),
- ),
- (
- "2023-10-28 20:00:05.9876543 +00:00",
- datetime(2023, 10, 28, 20, 0, 5, 987654, tzinfo=timezone(timedelta(hours=0))),
- ),
- ]
-
- # Create temp table
- cursor.execute(
- "IF OBJECT_ID('tempdb..#pytest_dto', 'U') IS NOT NULL DROP TABLE #pytest_dto;"
- )
- cursor.execute("CREATE TABLE #pytest_dto (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);")
- db_connection.commit()
-
- # Prepare data for executemany
- param_list = [(i, python_dt) for i, (_, python_dt) in enumerate(datetimeoffset_test_cases)]
- cursor.executemany("INSERT INTO #pytest_dto (id, dto_column) VALUES (?, ?);", param_list)
- db_connection.commit()
-
- # Read back and validate
- cursor.execute("SELECT id, dto_column FROM #pytest_dto ORDER BY id;")
- rows = cursor.fetchall()
-
- for i, (sql_str, python_dt) in enumerate(datetimeoffset_test_cases):
- fetched_id, fetched_dto = rows[i]
- assert fetched_dto.tzinfo is not None, "Fetched datetime object is naive."
-
- assert (
- fetched_dto == python_dt
- ), f"Value mismatch for id {fetched_id}: expected {python_dt}, got {fetched_dto}"
- finally:
- cursor.execute(
- "IF OBJECT_ID('tempdb..#pytest_dto', 'U') IS NOT NULL DROP TABLE #pytest_dto;"
- )
- db_connection.commit()
-
-
-def test_datetimeoffset_execute_vs_executemany_consistency(cursor, db_connection):
- """
- Check that execute() and executemany() produce the same stored DATETIMEOFFSET
- for identical timezone-aware datetime objects.
- """
- try:
- test_dt = datetime(
- 2023,
- 10,
- 30,
- 12,
- 0,
- 0,
- microsecond=123456,
- tzinfo=timezone(timedelta(hours=5, minutes=30)),
- )
- cursor.execute(
- "IF OBJECT_ID('tempdb..#pytest_dto', 'U') IS NOT NULL DROP TABLE #pytest_dto;"
- )
- cursor.execute("CREATE TABLE #pytest_dto (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);")
- db_connection.commit()
-
- # Insert using execute()
- cursor.execute("INSERT INTO #pytest_dto (id, dto_column) VALUES (?, ?);", 1, test_dt)
- db_connection.commit()
-
- # Insert using executemany()
- cursor.executemany(
- "INSERT INTO #pytest_dto (id, dto_column) VALUES (?, ?);", [(2, test_dt)]
- )
- db_connection.commit()
-
- cursor.execute("SELECT dto_column FROM #pytest_dto ORDER BY id;")
- rows = cursor.fetchall()
- assert len(rows) == 2
-
- # Compare textual representation to ensure binding semantics match
- cursor.execute("SELECT CONVERT(VARCHAR(35), dto_column, 127) FROM #pytest_dto ORDER BY id;")
- textual_rows = [r[0] for r in cursor.fetchall()]
- assert textual_rows[0] == textual_rows[1], "execute() and executemany() results differ"
-
- finally:
- cursor.execute(
- "IF OBJECT_ID('tempdb..#pytest_dto', 'U') IS NOT NULL DROP TABLE #pytest_dto;"
- )
- db_connection.commit()
-
-
-def test_datetimeoffset_extreme_offsets(cursor, db_connection):
- """
- Test boundary offsets (+14:00 and -12:00) to ensure correct round-trip handling.
- """
- try:
- extreme_offsets = [
- datetime(2023, 10, 30, 0, 0, 0, 0, tzinfo=timezone(timedelta(hours=14))),
- datetime(2023, 10, 30, 0, 0, 0, 0, tzinfo=timezone(timedelta(hours=-12))),
- ]
-
- cursor.execute(
- "IF OBJECT_ID('tempdb..#pytest_dto', 'U') IS NOT NULL DROP TABLE #pytest_dto;"
- )
- cursor.execute("CREATE TABLE #pytest_dto (id INT PRIMARY KEY, dto_column DATETIMEOFFSET);")
- db_connection.commit()
-
- param_list = [(i, dt) for i, dt in enumerate(extreme_offsets)]
- cursor.executemany("INSERT INTO #pytest_dto (id, dto_column) VALUES (?, ?);", param_list)
- db_connection.commit()
-
- cursor.execute("SELECT id, dto_column FROM #pytest_dto ORDER BY id;")
- rows = cursor.fetchall()
-
- for i, dt in enumerate(extreme_offsets):
- _, fetched = rows[i]
- assert fetched.tzinfo is not None
- assert fetched == dt, f"Value mismatch for id {i}: expected {dt}, got {fetched}"
- finally:
- cursor.execute(
- "IF OBJECT_ID('tempdb..#pytest_dto', 'U') IS NOT NULL DROP TABLE #pytest_dto;"
- )
- db_connection.commit()
-
-
-def test_datetimeoffset_native_vs_string_simple(cursor, db_connection):
- """
- Replicates the user's testing scenario: fetch DATETIMEOFFSET as native datetime
- and as string using CONVERT(nvarchar(35), ..., 121).
- """
- try:
- cursor.execute(
- "CREATE TABLE #pytest_dto_user_test (id INT PRIMARY KEY, Systime DATETIMEOFFSET);"
- )
- db_connection.commit()
-
- # Insert rows similar to user's example
- test_rows = [
- (
- 1,
- datetime(2025, 5, 14, 12, 35, 52, 501000, tzinfo=timezone(timedelta(hours=1))),
- ),
- (
- 2,
- datetime(
- 2025,
- 5,
- 14,
- 15,
- 20,
- 30,
- 123000,
- tzinfo=timezone(timedelta(hours=-5)),
- ),
- ),
- ]
-
- for i, dt in test_rows:
- cursor.execute("INSERT INTO #pytest_dto_user_test (id, Systime) VALUES (?, ?);", i, dt)
- db_connection.commit()
-
- # Native fetch (like the user's first execute)
- cursor.execute("SELECT Systime FROM #pytest_dto_user_test WHERE id=1;")
- dt_native = cursor.fetchone()[0]
- assert dt_native.tzinfo is not None
- assert dt_native == test_rows[0][1]
-
- # String fetch (like the user's convert to nvarchar)
- cursor.execute(
- "SELECT CONVERT(nvarchar(35), Systime, 121) FROM #pytest_dto_user_test WHERE id=1;"
- )
- dt_str = cursor.fetchone()[0]
- assert dt_str.endswith("+01:00") # original offset preserved
-
- finally:
- cursor.execute("DROP TABLE IF EXISTS #pytest_dto_user_test;")
- db_connection.commit()
-
-
-def test_lowercase_attribute(cursor, db_connection):
- """Test that the lowercase attribute properly converts column names to lowercase"""
-
- # Store original value to restore after test
- original_lowercase = mssql_python.lowercase
- drop_cursor = None
-
- try:
- # Create a test table with mixed-case column names
- cursor.execute("""
- CREATE TABLE #pytest_lowercase_test (
- ID INT PRIMARY KEY,
- UserName VARCHAR(50),
- EMAIL_ADDRESS VARCHAR(100),
- PhoneNumber VARCHAR(20)
- )
- """)
- db_connection.commit()
-
- # Insert test data
- cursor.execute("""
- INSERT INTO #pytest_lowercase_test (ID, UserName, EMAIL_ADDRESS, PhoneNumber)
- VALUES (1, 'JohnDoe', 'john@example.com', '555-1234')
- """)
- db_connection.commit()
-
- # First test with lowercase=False (default)
- mssql_python.lowercase = False
- cursor1 = db_connection.cursor()
- cursor1.execute("SELECT * FROM #pytest_lowercase_test")
-
- # Description column names should preserve original case
- column_names1 = [desc[0] for desc in cursor1.description]
- assert "ID" in column_names1, "Column 'ID' should be present with original case"
- assert "UserName" in column_names1, "Column 'UserName' should be present with original case"
-
- # Make sure to consume all results and close the cursor
- cursor1.fetchall()
- cursor1.close()
-
- # Now test with lowercase=True
- mssql_python.lowercase = True
- cursor2 = db_connection.cursor()
- cursor2.execute("SELECT * FROM #pytest_lowercase_test")
-
- # Description column names should be lowercase
- column_names2 = [desc[0] for desc in cursor2.description]
- assert "id" in column_names2, "Column names should be lowercase when lowercase=True"
- assert "username" in column_names2, "Column names should be lowercase when lowercase=True"
-
- # Make sure to consume all results and close the cursor
- cursor2.fetchall()
- cursor2.close()
-
- # Create a fresh cursor for cleanup
- drop_cursor = db_connection.cursor()
-
- finally:
- # Restore original value
- mssql_python.lowercase = original_lowercase
-
- try:
- # Use a separate cursor for cleanup
- if drop_cursor:
- drop_cursor.execute("DROP TABLE IF EXISTS #pytest_lowercase_test")
- db_connection.commit()
- drop_cursor.close()
- except Exception as e:
- print(f"Warning: Failed to drop test table: {e}")
-
-
-def test_decimal_separator_function(cursor, db_connection):
- """Test decimal separator functionality with database operations"""
- # Store original value to restore after test
- original_separator = mssql_python.getDecimalSeparator()
-
- try:
- # Create test table
- cursor.execute("""
- CREATE TABLE #pytest_decimal_separator_test (
- id INT PRIMARY KEY,
- decimal_value DECIMAL(10, 2)
- )
- """)
- db_connection.commit()
-
- # Insert test values with default separator (.)
- test_value = decimal.Decimal("123.45")
- cursor.execute(
- """
- INSERT INTO #pytest_decimal_separator_test (id, decimal_value)
- VALUES (1, ?)
- """,
- [test_value],
- )
- db_connection.commit()
-
- # First test with default decimal separator (.)
- cursor.execute("SELECT id, decimal_value FROM #pytest_decimal_separator_test")
- row = cursor.fetchone()
- default_str = str(row)
- assert "123.45" in default_str, "Default separator not found in string representation"
-
- # Now change to comma separator and test string representation
- mssql_python.setDecimalSeparator(",")
- cursor.execute("SELECT id, decimal_value FROM #pytest_decimal_separator_test")
- row = cursor.fetchone()
-
- # This should format the decimal with a comma in the string representation
- comma_str = str(row)
- assert (
- "123,45" in comma_str
- ), f"Expected comma in string representation but got: {comma_str}"
-
- finally:
- # Restore original decimal separator
- mssql_python.setDecimalSeparator(original_separator)
-
- # Cleanup
- cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_separator_test")
- db_connection.commit()
-
-
-def test_decimal_separator_basic_functionality():
- """Test basic decimal separator functionality without database operations"""
- # Store original value to restore after test
- original_separator = mssql_python.getDecimalSeparator()
-
- try:
- # Test default value
- assert mssql_python.getDecimalSeparator() == ".", "Default decimal separator should be '.'"
-
- # Test setting to comma
- mssql_python.setDecimalSeparator(",")
- assert (
- mssql_python.getDecimalSeparator() == ","
- ), "Decimal separator should be ',' after setting"
-
- # Test setting to other valid separators
- mssql_python.setDecimalSeparator(":")
- assert (
- mssql_python.getDecimalSeparator() == ":"
- ), "Decimal separator should be ':' after setting"
-
- # Test invalid inputs
- with pytest.raises(ValueError):
- mssql_python.setDecimalSeparator("") # Empty string
-
- with pytest.raises(ValueError):
- mssql_python.setDecimalSeparator("too_long") # More than one character
-
- with pytest.raises(ValueError):
- mssql_python.setDecimalSeparator(123) # Not a string
-
- finally:
- # Restore original separator
- mssql_python.setDecimalSeparator(original_separator)
-
-
-def test_decimal_separator_with_multiple_values(cursor, db_connection):
- """Test decimal separator with multiple different decimal values"""
- original_separator = mssql_python.getDecimalSeparator()
-
- try:
- # Create test table
- cursor.execute("""
- CREATE TABLE #pytest_decimal_multi_test (
- id INT PRIMARY KEY,
- positive_value DECIMAL(10, 2),
- negative_value DECIMAL(10, 2),
- zero_value DECIMAL(10, 2),
- small_value DECIMAL(10, 4)
- )
- """)
- db_connection.commit()
-
- # Insert test data
- cursor.execute("""
- INSERT INTO #pytest_decimal_multi_test VALUES (1, 123.45, -67.89, 0.00, 0.0001)
- """)
- db_connection.commit()
-
- # Test with default separator first
- cursor.execute("SELECT * FROM #pytest_decimal_multi_test")
- row = cursor.fetchone()
- default_str = str(row)
- assert "123.45" in default_str, "Default positive value formatting incorrect"
- assert "-67.89" in default_str, "Default negative value formatting incorrect"
-
- # Change to comma separator
- mssql_python.setDecimalSeparator(",")
- cursor.execute("SELECT * FROM #pytest_decimal_multi_test")
- row = cursor.fetchone()
- comma_str = str(row)
-
- # Verify comma is used in all decimal values
- assert "123,45" in comma_str, "Positive value not formatted with comma"
- assert "-67,89" in comma_str, "Negative value not formatted with comma"
- assert "0,00" in comma_str, "Zero value not formatted with comma"
- assert "0,0001" in comma_str, "Small value not formatted with comma"
-
- finally:
- # Restore original separator
- mssql_python.setDecimalSeparator(original_separator)
-
- # Cleanup
- cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_multi_test")
- db_connection.commit()
-
-
-def test_decimal_separator_calculations(cursor, db_connection):
- """Test that decimal separator doesn't affect calculations"""
- original_separator = mssql_python.getDecimalSeparator()
-
- try:
- # Create test table
- cursor.execute("""
- CREATE TABLE #pytest_decimal_calc_test (
- id INT PRIMARY KEY,
- value1 DECIMAL(10, 2),
- value2 DECIMAL(10, 2)
- )
- """)
- db_connection.commit()
-
- # Insert test data
- cursor.execute("""
- INSERT INTO #pytest_decimal_calc_test VALUES (1, 10.25, 5.75)
- """)
- db_connection.commit()
-
- # Test with default separator
- cursor.execute("SELECT value1 + value2 AS sum_result FROM #pytest_decimal_calc_test")
- row = cursor.fetchone()
- assert row.sum_result == decimal.Decimal(
- "16.00"
- ), "Sum calculation incorrect with default separator"
-
- # Change to comma separator
- mssql_python.setDecimalSeparator(",")
-
- # Calculations should still work correctly
- cursor.execute("SELECT value1 + value2 AS sum_result FROM #pytest_decimal_calc_test")
- row = cursor.fetchone()
- assert row.sum_result == decimal.Decimal(
- "16.00"
- ), "Sum calculation affected by separator change"
-
- # But string representation should use comma
- assert "16,00" in str(row), "Sum result not formatted with comma in string representation"
-
- finally:
- # Restore original separator
- mssql_python.setDecimalSeparator(original_separator)
-
- # Cleanup
- cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_calc_test")
- db_connection.commit()
-
-
-def test_cursor_setinputsizes_basic(db_connection):
- """Test the basic functionality of setinputsizes"""
-
- cursor = db_connection.cursor()
-
- # Create a test table
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes")
- cursor.execute("""
- CREATE TABLE #test_inputsizes (
- string_col NVARCHAR(100),
- int_col INT
- )
- """)
-
- # Set input sizes for parameters
- cursor.setinputsizes([(mssql_python.SQL_WVARCHAR, 100, 0), (mssql_python.SQL_INTEGER, 0, 0)])
-
- # Execute with parameters
- cursor.execute("INSERT INTO #test_inputsizes VALUES (?, ?)", "Test String", 42)
-
- # Verify data was inserted correctly
- cursor.execute("SELECT * FROM #test_inputsizes")
- row = cursor.fetchone()
-
- assert row[0] == "Test String"
- assert row[1] == 42
-
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes")
-
-
-def test_cursor_setinputsizes_with_executemany_float(db_connection):
- """Test setinputsizes with executemany using float instead of Decimal"""
-
- cursor = db_connection.cursor()
-
- # Create a test table
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_float")
- cursor.execute("""
- CREATE TABLE #test_inputsizes_float (
- id INT,
- name NVARCHAR(50),
- price REAL /* Use REAL instead of DECIMAL */
- )
- """)
-
- # Prepare data with float values
- data = [(1, "Item 1", 10.99), (2, "Item 2", 20.50), (3, "Item 3", 30.75)]
-
- # Set input sizes for parameters
- cursor.setinputsizes(
- [
- (mssql_python.SQL_INTEGER, 0, 0),
- (mssql_python.SQL_WVARCHAR, 50, 0),
- (mssql_python.SQL_REAL, 0, 0),
- ]
- )
-
- # Execute with parameters
- cursor.executemany("INSERT INTO #test_inputsizes_float VALUES (?, ?, ?)", data)
-
- # Verify all data was inserted correctly
- cursor.execute("SELECT * FROM #test_inputsizes_float ORDER BY id")
- rows = cursor.fetchall()
-
- assert len(rows) == 3
- assert rows[0][0] == 1
- assert rows[0][1] == "Item 1"
- assert abs(rows[0][2] - 10.99) < 0.001
-
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_float")
-
-
-def test_cursor_setinputsizes_reset(db_connection):
- """Test that setinputsizes is reset after execution"""
-
- cursor = db_connection.cursor()
-
- # Create a test table
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_reset")
- cursor.execute("""
- CREATE TABLE #test_inputsizes_reset (
- col1 NVARCHAR(100),
- col2 INT
- )
- """)
-
- # Set input sizes for parameters
- cursor.setinputsizes([(mssql_python.SQL_WVARCHAR, 100, 0), (mssql_python.SQL_INTEGER, 0, 0)])
-
- # Execute with parameters
- cursor.execute("INSERT INTO #test_inputsizes_reset VALUES (?, ?)", "Test String", 42)
-
- # Verify inputsizes was reset
- assert cursor._inputsizes is None
-
- # Now execute again without setting input sizes
- cursor.execute("INSERT INTO #test_inputsizes_reset VALUES (?, ?)", "Another String", 84)
-
- # Verify both rows were inserted correctly
- cursor.execute("SELECT * FROM #test_inputsizes_reset ORDER BY col2")
- rows = cursor.fetchall()
-
- assert len(rows) == 2
- assert rows[0][0] == "Test String"
- assert rows[0][1] == 42
- assert rows[1][0] == "Another String"
- assert rows[1][1] == 84
-
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_reset")
-
-
-def test_cursor_setinputsizes_override_inference(db_connection):
- """Test that setinputsizes overrides type inference"""
-
- cursor = db_connection.cursor()
-
- # Create a test table with specific types
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_override")
- cursor.execute("""
- CREATE TABLE #test_inputsizes_override (
- small_int SMALLINT,
- big_text NVARCHAR(MAX)
- )
- """)
-
- # Set input sizes that override the default inference
- # For SMALLINT, use a valid precision value (5 is typical for SMALLINT)
- cursor.setinputsizes(
- [
- (mssql_python.SQL_SMALLINT, 5, 0), # Use valid precision for SMALLINT
- (mssql_python.SQL_WVARCHAR, 8000, 0), # Force short string to NVARCHAR(MAX)
- ]
- )
-
- # Test with values that would normally be inferred differently
- big_number = 30000 # Would normally be INTEGER or BIGINT
- short_text = "abc" # Would normally be a regular NVARCHAR
-
- try:
- cursor.execute(
- "INSERT INTO #test_inputsizes_override VALUES (?, ?)",
- big_number,
- short_text,
- )
-
- # Verify the row was inserted (may have been truncated by SQL Server)
- cursor.execute("SELECT * FROM #test_inputsizes_override")
- row = cursor.fetchone()
-
- # SQL Server would either truncate or round the value
- assert row[1] == short_text
-
- except Exception as e:
- # If an exception occurs, it should be related to the data type conversion
- # Add "invalid precision" to the expected error messages
- error_text = str(e).lower()
- assert any(
- text in error_text
- for text in [
- "overflow",
- "out of range",
- "convert",
- "invalid precision",
- "precision value",
- ]
- ), f"Unexpected error: {e}"
-
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_override")
-
-
-def test_setinputsizes_parameter_count_mismatch_fewer(db_connection):
- """Test setinputsizes with fewer sizes than parameters"""
- import warnings
-
- cursor = db_connection.cursor()
-
- # Create a test table
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_mismatch")
- cursor.execute("""
- CREATE TABLE #test_inputsizes_mismatch (
- col1 INT,
- col2 NVARCHAR(100),
- col3 FLOAT
- )
- """)
-
- # Set fewer input sizes than parameters
- cursor.setinputsizes(
- [
- (mssql_python.SQL_INTEGER, 0, 0),
- (mssql_python.SQL_WVARCHAR, 100, 0),
- # Missing third parameter type
- ]
- )
-
- # Execute with more parameters than specified input sizes
- # This should use automatic type inference for the third parameter
- with warnings.catch_warnings(record=True) as w:
- cursor.execute(
- "INSERT INTO #test_inputsizes_mismatch VALUES (?, ?, ?)",
- 1,
- "Test String",
- 3.14,
- )
- assert len(w) > 0, "Warning should be issued for parameter count mismatch"
- assert "number of input sizes" in str(w[0].message).lower()
-
- # Verify data was inserted correctly
- cursor.execute("SELECT * FROM #test_inputsizes_mismatch")
- row = cursor.fetchone()
-
- assert row[0] == 1
- assert row[1] == "Test String"
- assert abs(row[2] - 3.14) < 0.0001
-
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_mismatch")
-
-
-def test_setinputsizes_parameter_count_mismatch_more(db_connection):
- """Test setinputsizes with more sizes than parameters"""
- import warnings
-
- cursor = db_connection.cursor()
-
- # Create a test table
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_mismatch")
- cursor.execute("""
- CREATE TABLE #test_inputsizes_mismatch (
- col1 INT,
- col2 NVARCHAR(100)
- )
- """)
-
- # Set more input sizes than parameters
- cursor.setinputsizes(
- [
- (mssql_python.SQL_INTEGER, 0, 0),
- (mssql_python.SQL_WVARCHAR, 100, 0),
- (mssql_python.SQL_FLOAT, 0, 0), # Extra parameter type
- ]
- )
-
- # Execute with fewer parameters than specified input sizes
- with warnings.catch_warnings(record=True) as w:
- cursor.execute("INSERT INTO #test_inputsizes_mismatch VALUES (?, ?)", 1, "Test String")
- assert len(w) > 0, "Warning should be issued for parameter count mismatch"
- assert "number of input sizes" in str(w[0].message).lower()
-
- # Verify data was inserted correctly
- cursor.execute("SELECT * FROM #test_inputsizes_mismatch")
- row = cursor.fetchone()
-
- assert row[0] == 1
- assert row[1] == "Test String"
-
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_mismatch")
-
-
-def test_setinputsizes_with_null_values(db_connection):
- """Test setinputsizes with NULL values for various data types"""
-
- cursor = db_connection.cursor()
-
- # Create a test table with multiple data types
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_null")
- cursor.execute("""
- CREATE TABLE #test_inputsizes_null (
- int_col INT,
- string_col NVARCHAR(100),
- float_col FLOAT,
- date_col DATE,
- binary_col VARBINARY(100)
- )
- """)
-
- # Set input sizes for all columns
- cursor.setinputsizes(
- [
- (mssql_python.SQL_INTEGER, 0, 0),
- (mssql_python.SQL_WVARCHAR, 100, 0),
- (mssql_python.SQL_FLOAT, 0, 0),
- (mssql_python.SQL_DATE, 0, 0),
- (mssql_python.SQL_VARBINARY, 100, 0),
- ]
- )
-
- # Insert row with all NULL values
- cursor.execute(
- "INSERT INTO #test_inputsizes_null VALUES (?, ?, ?, ?, ?)",
- None,
- None,
- None,
- None,
- None,
- )
-
- # Insert row with mix of NULL and non-NULL values
- cursor.execute(
- "INSERT INTO #test_inputsizes_null VALUES (?, ?, ?, ?, ?)",
- 42,
- None,
- 3.14,
- None,
- b"binary data",
- )
-
- # Verify data was inserted correctly
- cursor.execute(
- "SELECT * FROM #test_inputsizes_null ORDER BY CASE WHEN int_col IS NULL THEN 0 ELSE 1 END"
- )
- rows = cursor.fetchall()
-
- # First row should be all NULLs
- assert len(rows) == 2
- assert rows[0][0] is None
- assert rows[0][1] is None
- assert rows[0][2] is None
- assert rows[0][3] is None
- assert rows[0][4] is None
-
- # Second row should have mix of NULL and non-NULL
- assert rows[1][0] == 42
- assert rows[1][1] is None
- assert abs(rows[1][2] - 3.14) < 0.0001
- assert rows[1][3] is None
- assert rows[1][4] == b"binary data"
-
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_null")
-
-
-def test_setinputsizes_sql_injection_protection(db_connection):
- """Test that setinputsizes doesn't allow SQL injection"""
- cursor = db_connection.cursor()
-
- # Create a test table
- cursor.execute("CREATE TABLE #test_sql_injection (id INT, name VARCHAR(100))")
-
- # Insert legitimate data
- cursor.execute("INSERT INTO #test_sql_injection VALUES (1, 'safe')")
-
- # Set input sizes with potentially malicious SQL types and sizes
- try:
- # This should fail with a validation error
- cursor.setinputsizes([(999999, 1000000, 1000000)]) # Invalid SQL type
- except ValueError:
- pass # Expected
-
- # Test with valid types but attempt SQL injection in parameter
- cursor.setinputsizes([(mssql_python.SQL_VARCHAR, 100, 0)])
- injection_attempt = "x'; DROP TABLE #test_sql_injection; --"
-
- # This should safely parameterize without executing the injection
- cursor.execute("SELECT * FROM #test_sql_injection WHERE name = ?", injection_attempt)
-
- # Verify table still exists and injection didn't work
- cursor.execute("SELECT COUNT(*) FROM #test_sql_injection")
- count = cursor.fetchone()[0]
- assert count == 1, "SQL injection protection failed"
-
- # Clean up
- cursor.execute("DROP TABLE #test_sql_injection")
-
-
-def test_gettypeinfo_all_types(cursor):
- """Test getTypeInfo with no arguments returns all data types"""
- # Get all type information
- type_info = cursor.getTypeInfo().fetchall()
-
- # Verify we got results
- assert type_info is not None, "getTypeInfo() should return results"
- assert len(type_info) > 0, "getTypeInfo() should return at least one data type"
-
- # Verify common data types are present
- type_names = [str(row.type_name).upper() for row in type_info]
- assert any("VARCHAR" in name for name in type_names), "VARCHAR type should be in results"
- assert any("INT" in name for name in type_names), "INTEGER type should be in results"
-
- # Verify first row has expected columns
- first_row = type_info[0]
- assert hasattr(first_row, "type_name"), "Result should have type_name column"
- assert hasattr(first_row, "data_type"), "Result should have data_type column"
- assert hasattr(first_row, "column_size"), "Result should have column_size column"
- assert hasattr(first_row, "nullable"), "Result should have nullable column"
-
-
-def test_gettypeinfo_specific_type(cursor):
- """Test getTypeInfo with specific type argument"""
- from mssql_python.constants import ConstantsDDBC
-
- # Test with VARCHAR type (SQL_VARCHAR)
- varchar_info = cursor.getTypeInfo(ConstantsDDBC.SQL_VARCHAR.value).fetchall()
-
- # Verify we got results specific to VARCHAR
- assert varchar_info is not None, "getTypeInfo(SQL_VARCHAR) should return results"
- assert len(varchar_info) > 0, "getTypeInfo(SQL_VARCHAR) should return at least one row"
-
- # All rows should be related to VARCHAR type
- for row in varchar_info:
- assert (
- "varchar" in row.type_name or "char" in row.type_name
- ), f"Expected VARCHAR type, got {row.type_name}"
- assert (
- row.data_type == ConstantsDDBC.SQL_VARCHAR.value
- ), f"Expected data_type={ConstantsDDBC.SQL_VARCHAR.value}, got {row.data_type}"
-
-
-def test_gettypeinfo_result_structure(cursor):
- """Test the structure of getTypeInfo result rows"""
- # Get info for a common type like INTEGER
- from mssql_python.constants import ConstantsDDBC
-
- int_info = cursor.getTypeInfo(ConstantsDDBC.SQL_INTEGER.value).fetchall()
-
- # Make sure we have at least one result
- assert len(int_info) > 0, "getTypeInfo for INTEGER should return results"
-
- # Check for all required columns in the result
- first_row = int_info[0]
- required_columns = [
- "type_name",
- "data_type",
- "column_size",
- "literal_prefix",
- "literal_suffix",
- "create_params",
- "nullable",
- "case_sensitive",
- "searchable",
- "unsigned_attribute",
- "fixed_prec_scale",
- "auto_unique_value",
- "local_type_name",
- "minimum_scale",
- "maximum_scale",
- "sql_data_type",
- "sql_datetime_sub",
- "num_prec_radix",
- "interval_precision",
- ]
-
- for column in required_columns:
- assert hasattr(first_row, column), f"Result missing required column: {column}"
-
-
-def test_gettypeinfo_numeric_type(cursor):
- """Test getTypeInfo for numeric data types"""
- from mssql_python.constants import ConstantsDDBC
-
- # Get information about DECIMAL type
- decimal_info = cursor.getTypeInfo(ConstantsDDBC.SQL_DECIMAL.value).fetchall()
-
- # Verify decimal-specific attributes
- assert len(decimal_info) > 0, "getTypeInfo for DECIMAL should return results"
-
- decimal_row = decimal_info[0]
- # DECIMAL should have precision and scale parameters
- assert decimal_row.create_params is not None, "DECIMAL should have create_params"
- assert (
- "PRECISION" in decimal_row.create_params.upper()
- or "SCALE" in decimal_row.create_params.upper()
- ), "DECIMAL create_params should mention precision/scale"
-
- # Numeric types typically use base 10 for the num_prec_radix
- assert (
- decimal_row.num_prec_radix == 10
- ), f"Expected num_prec_radix=10 for DECIMAL, got {decimal_row.num_prec_radix}"
-
-
-def test_gettypeinfo_datetime_types(cursor):
- """Test getTypeInfo for datetime types"""
- from mssql_python.constants import ConstantsDDBC
-
- # Get information about TIMESTAMP type instead of DATETIME
- # SQL_TYPE_TIMESTAMP (93) is more commonly used for datetime in ODBC
- datetime_info = cursor.getTypeInfo(ConstantsDDBC.SQL_TYPE_TIMESTAMP.value).fetchall()
-
- # Verify we got datetime-related results
- assert len(datetime_info) > 0, "getTypeInfo for TIMESTAMP should return results"
-
- # Check for datetime-specific attributes
- first_row = datetime_info[0]
- assert hasattr(first_row, "type_name"), "Result should have type_name column"
-
- # Datetime type names often contain 'date', 'time', or 'datetime'
- type_name_lower = first_row.type_name.lower()
- assert any(
- term in type_name_lower for term in ["date", "time", "timestamp", "datetime"]
- ), f"Expected datetime-related type name, got {first_row.type_name}"
-
-
-def test_gettypeinfo_multiple_calls(cursor):
- """Test calling getTypeInfo multiple times in succession"""
- from mssql_python.constants import ConstantsDDBC
-
- # First call - get all types
- all_types = cursor.getTypeInfo().fetchall()
- assert len(all_types) > 0, "First call to getTypeInfo should return results"
-
- # Second call - get VARCHAR type
- varchar_info = cursor.getTypeInfo(ConstantsDDBC.SQL_VARCHAR.value).fetchall()
- assert len(varchar_info) > 0, "Second call to getTypeInfo should return results"
-
- # Third call - get INTEGER type
- int_info = cursor.getTypeInfo(ConstantsDDBC.SQL_INTEGER.value).fetchall()
- assert len(int_info) > 0, "Third call to getTypeInfo should return results"
-
- # Verify the results are different between calls
- assert len(all_types) > len(
- varchar_info
- ), "All types should return more rows than specific type"
-
-
-def test_gettypeinfo_binary_types(cursor):
- """Test getTypeInfo for binary data types"""
- from mssql_python.constants import ConstantsDDBC
-
- # Get information about BINARY or VARBINARY type
- binary_info = cursor.getTypeInfo(ConstantsDDBC.SQL_BINARY.value).fetchall()
-
- # Verify we got binary-related results
- assert len(binary_info) > 0, "getTypeInfo for BINARY should return results"
-
- # Check for binary-specific attributes
- for row in binary_info:
- type_name_lower = row.type_name.lower()
- # Include 'timestamp' as SQL Server reports it as a binary type
- assert any(
- term in type_name_lower for term in ["binary", "blob", "image", "timestamp"]
- ), f"Expected binary-related type name, got {row.type_name}"
-
- # Binary types typically don't support case sensitivity
- assert (
- row.case_sensitive == 0
- ), f"Binary types should not be case sensitive, got {row.case_sensitive}"
-
-
-def test_gettypeinfo_cached_results(cursor):
- """Test that multiple identical calls to getTypeInfo are efficient"""
- from mssql_python.constants import ConstantsDDBC
- import time
-
- # First call - might be slower
- start_time = time.time()
- first_result = cursor.getTypeInfo(ConstantsDDBC.SQL_VARCHAR.value).fetchall()
- first_duration = time.time() - start_time
-
- # Give the system a moment
- time.sleep(0.1)
-
- # Second call with same type - should be similar or faster
- start_time = time.time()
- second_result = cursor.getTypeInfo(ConstantsDDBC.SQL_VARCHAR.value).fetchall()
- second_duration = time.time() - start_time
-
- # Results should be consistent
- assert len(first_result) == len(
- second_result
- ), "Multiple calls should return same number of results"
-
- # Both calls should return the correct type info
- for row in second_result:
- assert (
- row.data_type == ConstantsDDBC.SQL_VARCHAR.value
- ), f"Expected SQL_VARCHAR type, got {row.data_type}"
-
-
-def test_procedures_setup(cursor, db_connection):
- """Create a test schema and procedures for testing"""
- try:
- # Create a test schema for isolation
- cursor.execute(
- "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_proc_schema') EXEC('CREATE SCHEMA pytest_proc_schema')"
- )
-
- # Create test stored procedures
- cursor.execute("""
- CREATE OR ALTER PROCEDURE pytest_proc_schema.test_proc1
- AS
- BEGIN
- SELECT 1 AS result
- END
- """)
-
- cursor.execute("""
- CREATE OR ALTER PROCEDURE pytest_proc_schema.test_proc2
- @param1 INT,
- @param2 VARCHAR(50) OUTPUT
- AS
- BEGIN
- SELECT @param2 = 'Output ' + CAST(@param1 AS VARCHAR(10))
- RETURN @param1
- END
- """)
-
- db_connection.commit()
- except Exception as e:
- pytest.fail(f"Test setup failed: {e}")
-
-
-def test_procedures_all(cursor, db_connection):
- """Test getting information about all procedures"""
- # First set up our test procedures
- test_procedures_setup(cursor, db_connection)
-
- try:
- # Get all procedures
- procs = cursor.procedures().fetchall()
-
- # Verify we got results
- assert procs is not None, "procedures() should return results"
- assert len(procs) > 0, "procedures() should return at least one procedure"
-
- # Verify structure of results
- first_row = procs[0]
- assert hasattr(first_row, "procedure_cat"), "Result should have procedure_cat column"
- assert hasattr(first_row, "procedure_schem"), "Result should have procedure_schem column"
- assert hasattr(first_row, "procedure_name"), "Result should have procedure_name column"
- assert hasattr(first_row, "num_input_params"), "Result should have num_input_params column"
- assert hasattr(
- first_row, "num_output_params"
- ), "Result should have num_output_params column"
- assert hasattr(first_row, "num_result_sets"), "Result should have num_result_sets column"
- assert hasattr(first_row, "remarks"), "Result should have remarks column"
- assert hasattr(first_row, "procedure_type"), "Result should have procedure_type column"
-
- finally:
- # Clean up happens in test_procedures_cleanup
- pass
-
-
-def test_procedures_specific(cursor, db_connection):
- """Test getting information about a specific procedure"""
- try:
- # Get specific procedure
- procs = cursor.procedures(procedure="test_proc1", schema="pytest_proc_schema").fetchall()
-
- # Verify we got the correct procedure
- assert len(procs) == 1, "Should find exactly one procedure"
- proc = procs[0]
- assert proc.procedure_name == "test_proc1;1", "Wrong procedure name returned"
- assert proc.procedure_schem == "pytest_proc_schema", "Wrong schema returned"
-
- finally:
- # Clean up happens in test_procedures_cleanup
- pass
-
-
-def test_procedures_with_schema(cursor, db_connection):
- """Test getting procedures with schema filter"""
- try:
- # Get procedures for our test schema
- procs = cursor.procedures(schema="pytest_proc_schema").fetchall()
-
- # Verify schema filter worked
- assert len(procs) >= 2, "Should find at least two procedures in schema"
- for proc in procs:
- assert (
- proc.procedure_schem == "pytest_proc_schema"
- ), f"Expected schema pytest_proc_schema, got {proc.procedure_schem}"
-
- # Verify our specific procedures are in the results
- proc_names = [p.procedure_name for p in procs]
- assert "test_proc1;1" in proc_names, "test_proc1;1 should be in results"
- assert "test_proc2;1" in proc_names, "test_proc2;1 should be in results"
-
- finally:
- # Clean up happens in test_procedures_cleanup
- pass
-
-
-def test_procedures_nonexistent(cursor):
- """Test procedures() with non-existent procedure name"""
- # Use a procedure name that's highly unlikely to exist
- procs = cursor.procedures(procedure="nonexistent_procedure_xyz123").fetchall()
-
- # Should return empty list, not error
- assert isinstance(procs, list), "Should return a list for non-existent procedure"
- assert len(procs) == 0, "Should return empty list for non-existent procedure"
-
-
-def test_procedures_catalog_filter(cursor, db_connection):
- """Test procedures() with catalog filter"""
- # Get current database name
- cursor.execute("SELECT DB_NAME() AS current_db")
- current_db = cursor.fetchone().current_db
-
- try:
- # Get procedures with current catalog
- procs = cursor.procedures(catalog=current_db, schema="pytest_proc_schema").fetchall()
-
- # Verify catalog filter worked
- assert len(procs) >= 2, "Should find procedures in current catalog"
- for proc in procs:
- assert (
- proc.procedure_cat == current_db
- ), f"Expected catalog {current_db}, got {proc.procedure_cat}"
-
- # Get procedures with non-existent catalog
- fake_procs = cursor.procedures(catalog="nonexistent_db_xyz123").fetchall()
- assert len(fake_procs) == 0, "Should return empty list for non-existent catalog"
-
- finally:
- # Clean up happens in test_procedures_cleanup
- pass
-
-
-def test_procedures_with_parameters(cursor, db_connection):
- """Test that procedures() correctly reports parameter information"""
- try:
- # Create a simpler procedure with basic parameters
- cursor.execute("""
- CREATE OR ALTER PROCEDURE pytest_proc_schema.test_params_proc
- @in1 INT,
- @in2 VARCHAR(50)
- AS
- BEGIN
- SELECT @in1 AS value1, @in2 AS value2
- END
- """)
- db_connection.commit()
-
- # Get procedure info
- procs = cursor.procedures(
- procedure="test_params_proc", schema="pytest_proc_schema"
- ).fetchall()
-
- # Verify we found the procedure
- assert len(procs) == 1, "Should find exactly one procedure"
- proc = procs[0]
-
- # Just check if columns exist, don't check specific values
- assert hasattr(proc, "num_input_params"), "Result should have num_input_params column"
- assert hasattr(proc, "num_output_params"), "Result should have num_output_params column"
-
- # Test simple execution without output parameters
- cursor.execute("EXEC pytest_proc_schema.test_params_proc 10, 'Test'")
-
- # Verify the procedure returned expected values
- row = cursor.fetchone()
- assert row is not None, "Procedure should return results"
- assert row[0] == 10, "First parameter value incorrect"
- assert row[1] == "Test", "Second parameter value incorrect"
-
- finally:
- cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_params_proc")
- db_connection.commit()
-
-
-def test_procedures_result_set_info(cursor, db_connection):
- """Test that procedures() reports information about result sets"""
- try:
- # Create procedures with different result set patterns
- cursor.execute("""
- CREATE OR ALTER PROCEDURE pytest_proc_schema.test_no_results
- AS
- BEGIN
- DECLARE @x INT = 1
- END
- """)
-
- cursor.execute("""
- CREATE OR ALTER PROCEDURE pytest_proc_schema.test_one_result
- AS
- BEGIN
- SELECT 1 AS col1, 'test' AS col2
- END
- """)
-
- cursor.execute("""
- CREATE OR ALTER PROCEDURE pytest_proc_schema.test_multiple_results
- AS
- BEGIN
- SELECT 1 AS result1
- SELECT 'test' AS result2
- SELECT GETDATE() AS result3
- END
- """)
- db_connection.commit()
-
- # Get procedure info for all test procedures
- procs = cursor.procedures(schema="pytest_proc_schema", procedure="test_%").fetchall()
+ cursor = db_connection.cursor()
- # Verify we found at least some procedures
- assert len(procs) > 0, "Should find at least some test procedures"
+ # Create a test table
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_mismatch")
+ cursor.execute("""
+ CREATE TABLE #test_inputsizes_mismatch (
+ col1 INT,
+ col2 NVARCHAR(100)
+ )
+ """)
- # Get the procedure names we found
- result_proc_names = [
- p.procedure_name
- for p in procs
- if p.procedure_name.startswith("test_") and "results" in p.procedure_name
+ # Set more input sizes than parameters
+ cursor.setinputsizes(
+ [
+ (mssql_python.SQL_INTEGER, 0, 0),
+ (mssql_python.SQL_WVARCHAR, 100, 0),
+ (mssql_python.SQL_FLOAT, 0, 0), # Extra parameter type
]
- print(f"Found result procedures: {result_proc_names}")
-
- # The num_result_sets column exists but might not have correct values
- for proc in procs:
- assert hasattr(proc, "num_result_sets"), "Result should have num_result_sets column"
-
- # Test execution of the procedures to verify they work
- cursor.execute("EXEC pytest_proc_schema.test_no_results")
- # Procedures with no results should have no description and calling fetchall() should raise an error
- assert (
- cursor.description is None
- ), "test_no_results should have no description (no result set)"
- # Don't call fetchall() on procedures with no results - this is invalid in ODBC
-
- cursor.execute("EXEC pytest_proc_schema.test_one_result")
- rows = cursor.fetchall()
- assert len(rows) == 1, "test_one_result should return one row"
- assert len(rows[0]) == 2, "test_one_result row should have two columns"
-
- cursor.execute("EXEC pytest_proc_schema.test_multiple_results")
- rows1 = cursor.fetchall()
- assert len(rows1) == 1, "First result set should have one row"
- assert cursor.nextset(), "Should have a second result set"
- rows2 = cursor.fetchall()
- assert len(rows2) == 1, "Second result set should have one row"
- assert cursor.nextset(), "Should have a third result set"
- rows3 = cursor.fetchall()
- assert len(rows3) == 1, "Third result set should have one row"
-
- finally:
- cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_no_results")
- cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_one_result")
- cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_multiple_results")
- db_connection.commit()
-
-
-def test_procedures_cleanup(cursor, db_connection):
- """Clean up all test procedures and schema after testing"""
- try:
- # Drop all test procedures
- cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_proc1")
- cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_proc2")
- cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_params_proc")
- cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_no_results")
- cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_one_result")
- cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_multiple_results")
-
- # Drop the test schema
- cursor.execute("DROP SCHEMA IF EXISTS pytest_proc_schema")
- db_connection.commit()
- except Exception as e:
- pytest.fail(f"Test cleanup failed: {e}")
-
-
-def test_foreignkeys_setup(cursor, db_connection):
- """Create tables with foreign key relationships for testing"""
- try:
- # Create a test schema for isolation
- cursor.execute(
- "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_fk_schema') EXEC('CREATE SCHEMA pytest_fk_schema')"
- )
-
- # Drop tables if they exist (in reverse order to avoid constraint conflicts)
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
-
- # Create parent table
- cursor.execute("""
- CREATE TABLE pytest_fk_schema.customers (
- customer_id INT PRIMARY KEY,
- customer_name VARCHAR(100) NOT NULL
- )
- """)
-
- # Create child table with foreign key
- cursor.execute("""
- CREATE TABLE pytest_fk_schema.orders (
- order_id INT PRIMARY KEY,
- order_date DATETIME NOT NULL,
- customer_id INT NOT NULL,
- total_amount DECIMAL(10, 2) NOT NULL,
- CONSTRAINT FK_Orders_Customers FOREIGN KEY (customer_id)
- REFERENCES pytest_fk_schema.customers (customer_id)
- )
- """)
-
- # Insert test data
- cursor.execute("""
- INSERT INTO pytest_fk_schema.customers (customer_id, customer_name)
- VALUES (1, 'Test Customer 1'), (2, 'Test Customer 2')
- """)
-
- cursor.execute("""
- INSERT INTO pytest_fk_schema.orders (order_id, order_date, customer_id, total_amount)
- VALUES (101, GETDATE(), 1, 150.00), (102, GETDATE(), 2, 250.50)
- """)
-
- db_connection.commit()
- except Exception as e:
- pytest.fail(f"Test setup failed: {e}")
-
-
-def test_foreignkeys_all(cursor, db_connection):
- """Test getting all foreign keys"""
- try:
- # First set up our test tables
- test_foreignkeys_setup(cursor, db_connection)
-
- # Get all foreign keys
- fks = cursor.foreignKeys(table="orders", schema="pytest_fk_schema").fetchall()
-
- # Verify we got results
- assert fks is not None, "foreignKeys() should return results"
- assert len(fks) > 0, "foreignKeys() should return at least one foreign key"
-
- # Verify our test FK is in the results
- # Search case-insensitively since the database might return different case
- found_test_fk = False
- for fk in fks:
- if fk.fktable_name.lower() == "orders" and fk.pktable_name.lower() == "customers":
- found_test_fk = True
- break
-
- assert found_test_fk, "Could not find the test foreign key in results"
-
- finally:
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
- db_connection.commit()
-
-
-def test_foreignkeys_specific_table(cursor, db_connection):
- """Test getting foreign keys for a specific table"""
- try:
- # First set up our test tables
- test_foreignkeys_setup(cursor, db_connection)
-
- # Get foreign keys for the orders table
- fks = cursor.foreignKeys(table="orders", schema="pytest_fk_schema").fetchall()
-
- # Verify we got results
- assert len(fks) == 1, "Should find exactly one foreign key for orders table"
+ )
- # Verify the foreign key details
- fk = fks[0]
- assert fk.fktable_name.lower() == "orders", "Wrong foreign key table name"
- assert fk.pktable_name.lower() == "customers", "Wrong primary key table name"
- assert fk.fkcolumn_name.lower() == "customer_id", "Wrong foreign key column name"
- assert fk.pkcolumn_name.lower() == "customer_id", "Wrong primary key column name"
+ # Execute with fewer parameters than specified input sizes
+ with warnings.catch_warnings(record=True) as w:
+ cursor.execute("INSERT INTO #test_inputsizes_mismatch VALUES (?, ?)", 1, "Test String")
+ assert len(w) > 0, "Warning should be issued for parameter count mismatch"
+ assert "number of input sizes" in str(w[0].message).lower()
- finally:
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
- db_connection.commit()
+ # Verify data was inserted correctly
+ cursor.execute("SELECT * FROM #test_inputsizes_mismatch")
+ row = cursor.fetchone()
+ assert row[0] == 1
+ assert row[1] == "Test String"
-def test_foreignkeys_specific_foreign_table(cursor, db_connection):
- """Test getting foreign keys that reference a specific table"""
- try:
- # First set up our test tables
- test_foreignkeys_setup(cursor, db_connection)
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_mismatch")
- # Get foreign keys that reference the customers table
- fks = cursor.foreignKeys(
- foreignTable="customers", foreignSchema="pytest_fk_schema"
- ).fetchall()
- # Verify we got results
- assert len(fks) > 0, "Should find at least one foreign key referencing customers table"
+def test_setinputsizes_with_null_values(db_connection):
+ """Test setinputsizes with NULL values for various data types"""
- # Verify our test FK is in the results
- found_test_fk = False
- for fk in fks:
- if fk.fktable_name.lower() == "orders" and fk.pktable_name.lower() == "customers":
- found_test_fk = True
- break
+ cursor = db_connection.cursor()
- assert found_test_fk, "Could not find the test foreign key in results"
+ # Create a test table with multiple data types
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_null")
+ cursor.execute("""
+ CREATE TABLE #test_inputsizes_null (
+ int_col INT,
+ string_col NVARCHAR(100),
+ float_col FLOAT,
+ date_col DATE,
+ binary_col VARBINARY(100)
+ )
+ """)
- finally:
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
- db_connection.commit()
+ # Set input sizes for all columns
+ cursor.setinputsizes(
+ [
+ (mssql_python.SQL_INTEGER, 0, 0),
+ (mssql_python.SQL_WVARCHAR, 100, 0),
+ (mssql_python.SQL_FLOAT, 0, 0),
+ (mssql_python.SQL_DATE, 0, 0),
+ (mssql_python.SQL_VARBINARY, 100, 0),
+ ]
+ )
+ # Insert row with all NULL values
+ cursor.execute(
+ "INSERT INTO #test_inputsizes_null VALUES (?, ?, ?, ?, ?)",
+ None,
+ None,
+ None,
+ None,
+ None,
+ )
-def test_foreignkeys_both_tables(cursor, db_connection):
- """Test getting foreign keys with both table and foreignTable specified"""
- try:
- # First set up our test tables
- test_foreignkeys_setup(cursor, db_connection)
+ # Insert row with mix of NULL and non-NULL values
+ cursor.execute(
+ "INSERT INTO #test_inputsizes_null VALUES (?, ?, ?, ?, ?)",
+ 42,
+ None,
+ 3.14,
+ None,
+ b"binary data",
+ )
- # Get foreign keys between the two tables
- fks = cursor.foreignKeys(
- table="orders",
- schema="pytest_fk_schema",
- foreignTable="customers",
- foreignSchema="pytest_fk_schema",
- ).fetchall()
+ # Verify data was inserted correctly
+ cursor.execute(
+ "SELECT * FROM #test_inputsizes_null ORDER BY CASE WHEN int_col IS NULL THEN 0 ELSE 1 END"
+ )
+ rows = cursor.fetchall()
- # Verify we got results
- assert len(fks) == 1, "Should find exactly one foreign key between specified tables"
+ # First row should be all NULLs
+ assert len(rows) == 2
+ assert rows[0][0] is None
+ assert rows[0][1] is None
+ assert rows[0][2] is None
+ assert rows[0][3] is None
+ assert rows[0][4] is None
- # Verify the foreign key details
- fk = fks[0]
- assert fk.fktable_name.lower() == "orders", "Wrong foreign key table name"
- assert fk.pktable_name.lower() == "customers", "Wrong primary key table name"
- assert fk.fkcolumn_name.lower() == "customer_id", "Wrong foreign key column name"
- assert fk.pkcolumn_name.lower() == "customer_id", "Wrong primary key column name"
+ # Second row should have mix of NULL and non-NULL
+ assert rows[1][0] == 42
+ assert rows[1][1] is None
+ assert abs(rows[1][2] - 3.14) < 0.0001
+ assert rows[1][3] is None
+ assert rows[1][4] == b"binary data"
- finally:
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
- db_connection.commit()
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS #test_inputsizes_null")
-def test_foreignkeys_nonexistent(cursor):
- """Test foreignKeys() with non-existent table name"""
- # Use a table name that's highly unlikely to exist
- fks = cursor.foreignKeys(table="nonexistent_table_xyz123").fetchall()
+def test_setinputsizes_sql_injection_protection(db_connection):
+ """Test that setinputsizes doesn't allow SQL injection"""
+ cursor = db_connection.cursor()
- # Should return empty list, not error
- assert isinstance(fks, list), "Should return a list for non-existent table"
- assert len(fks) == 0, "Should return empty list for non-existent table"
+ # Create a test table
+ cursor.execute("CREATE TABLE #test_sql_injection (id INT, name VARCHAR(100))")
+ # Insert legitimate data
+ cursor.execute("INSERT INTO #test_sql_injection VALUES (1, 'safe')")
-def test_foreignkeys_catalog_schema(cursor, db_connection):
- """Test foreignKeys() with catalog and schema filters"""
+ # Set input sizes with potentially malicious SQL types and sizes
try:
- # First set up our test tables
- test_foreignkeys_setup(cursor, db_connection)
+ # This should fail with a validation error
+ cursor.setinputsizes([(999999, 1000000, 1000000)]) # Invalid SQL type
+ except ValueError:
+ pass # Expected
- # Get current database name
- cursor.execute("SELECT DB_NAME() AS current_db")
- row = cursor.fetchone()
- current_db = row.current_db
+ # Test with valid types but attempt SQL injection in parameter
+ cursor.setinputsizes([(mssql_python.SQL_VARCHAR, 100, 0)])
+ injection_attempt = "x'; DROP TABLE #test_sql_injection; --"
- # Get foreign keys with current catalog and pytest schema
- fks = cursor.foreignKeys(
- table="orders", catalog=current_db, schema="pytest_fk_schema"
- ).fetchall()
+ # This should safely parameterize without executing the injection
+ cursor.execute("SELECT * FROM #test_sql_injection WHERE name = ?", injection_attempt)
- # Verify we got results
- assert len(fks) > 0, "Should find foreign keys with correct catalog/schema"
+ # Verify table still exists and injection didn't work
+ cursor.execute("SELECT COUNT(*) FROM #test_sql_injection")
+ count = cursor.fetchone()[0]
+ assert count == 1, "SQL injection protection failed"
- # Verify catalog/schema in results
- for fk in fks:
- assert fk.fktable_cat == current_db, "Wrong foreign key table catalog"
- assert fk.fktable_schem == "pytest_fk_schema", "Wrong foreign key table schema"
+ # Clean up
+ cursor.execute("DROP TABLE #test_sql_injection")
- finally:
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
- db_connection.commit()
+def test_gettypeinfo_all_types(cursor):
+ """Test getTypeInfo with no arguments returns all data types"""
+ # Get all type information
+ type_info = cursor.getTypeInfo().fetchall()
-def test_foreignkeys_result_structure(cursor, db_connection):
- """Test the structure of foreignKeys result rows"""
- try:
- # First set up our test tables
- test_foreignkeys_setup(cursor, db_connection)
+ # Verify we got results
+ assert type_info is not None, "getTypeInfo() should return results"
+ assert len(type_info) > 0, "getTypeInfo() should return at least one data type"
- # Get foreign keys for the orders table
- fks = cursor.foreignKeys(table="orders", schema="pytest_fk_schema").fetchall()
+ # Verify common data types are present
+ type_names = [str(row.type_name).upper() for row in type_info]
+ assert any("VARCHAR" in name for name in type_names), "VARCHAR type should be in results"
+ assert any("INT" in name for name in type_names), "INTEGER type should be in results"
- # Verify we got results
- assert len(fks) > 0, "Should find at least one foreign key"
+ # Verify first row has expected columns
+ first_row = type_info[0]
+ assert hasattr(first_row, "type_name"), "Result should have type_name column"
+ assert hasattr(first_row, "data_type"), "Result should have data_type column"
+ assert hasattr(first_row, "column_size"), "Result should have column_size column"
+ assert hasattr(first_row, "nullable"), "Result should have nullable column"
- # Check for all required columns in the result
- first_row = fks[0]
- required_columns = [
- "pktable_cat",
- "pktable_schem",
- "pktable_name",
- "pkcolumn_name",
- "fktable_cat",
- "fktable_schem",
- "fktable_name",
- "fkcolumn_name",
- "key_seq",
- "update_rule",
- "delete_rule",
- "fk_name",
- "pk_name",
- "deferrability",
- ]
- for column in required_columns:
- assert hasattr(first_row, column), f"Result missing required column: {column}"
+def test_gettypeinfo_specific_type(cursor):
+ """Test getTypeInfo with specific type argument"""
+ from mssql_python.constants import ConstantsDDBC
- # Verify specific values
- assert first_row.fktable_name.lower() == "orders", "Wrong foreign key table name"
- assert first_row.pktable_name.lower() == "customers", "Wrong primary key table name"
- assert first_row.fkcolumn_name.lower() == "customer_id", "Wrong foreign key column name"
- assert first_row.pkcolumn_name.lower() == "customer_id", "Wrong primary key column name"
- assert first_row.key_seq == 1, "Wrong key sequence number"
- assert first_row.fk_name is not None, "Foreign key name should not be None"
- assert first_row.pk_name is not None, "Primary key name should not be None"
+ # Test with VARCHAR type (SQL_VARCHAR)
+ varchar_info = cursor.getTypeInfo(ConstantsDDBC.SQL_VARCHAR.value).fetchall()
- finally:
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
- db_connection.commit()
+ # Verify we got results specific to VARCHAR
+ assert varchar_info is not None, "getTypeInfo(SQL_VARCHAR) should return results"
+ assert len(varchar_info) > 0, "getTypeInfo(SQL_VARCHAR) should return at least one row"
+ # All rows should be related to VARCHAR type
+ for row in varchar_info:
+ assert (
+ "varchar" in row.type_name or "char" in row.type_name
+ ), f"Expected VARCHAR type, got {row.type_name}"
+ assert (
+ row.data_type == ConstantsDDBC.SQL_VARCHAR.value
+ ), f"Expected data_type={ConstantsDDBC.SQL_VARCHAR.value}, got {row.data_type}"
-def test_foreignkeys_multiple_column_fk(cursor, db_connection):
- """Test foreignKeys() with a multi-column foreign key"""
- try:
- # First create the schema if needed
- cursor.execute(
- "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_fk_schema') EXEC('CREATE SCHEMA pytest_fk_schema')"
- )
- # Drop tables if they exist (in reverse order to avoid constraint conflicts)
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.order_details")
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.product_variants")
+def test_gettypeinfo_result_structure(cursor):
+ """Test the structure of getTypeInfo result rows"""
+ # Get info for a common type like INTEGER
+ from mssql_python.constants import ConstantsDDBC
- # Create parent table with composite primary key
- cursor.execute("""
- CREATE TABLE pytest_fk_schema.product_variants (
- product_id INT NOT NULL,
- variant_id INT NOT NULL,
- variant_name VARCHAR(100) NOT NULL,
- PRIMARY KEY (product_id, variant_id)
- )
- """)
+ int_info = cursor.getTypeInfo(ConstantsDDBC.SQL_INTEGER.value).fetchall()
- # Create child table with composite foreign key
- cursor.execute("""
- CREATE TABLE pytest_fk_schema.order_details (
- order_id INT NOT NULL,
- product_id INT NOT NULL,
- variant_id INT NOT NULL,
- quantity INT NOT NULL,
- PRIMARY KEY (order_id, product_id, variant_id),
- CONSTRAINT FK_OrderDetails_ProductVariants FOREIGN KEY (product_id, variant_id)
- REFERENCES pytest_fk_schema.product_variants (product_id, variant_id)
- )
- """)
+ # Make sure we have at least one result
+ assert len(int_info) > 0, "getTypeInfo for INTEGER should return results"
- db_connection.commit()
+ # Check for all required columns in the result
+ first_row = int_info[0]
+ required_columns = [
+ "type_name",
+ "data_type",
+ "column_size",
+ "literal_prefix",
+ "literal_suffix",
+ "create_params",
+ "nullable",
+ "case_sensitive",
+ "searchable",
+ "unsigned_attribute",
+ "fixed_prec_scale",
+ "auto_unique_value",
+ "local_type_name",
+ "minimum_scale",
+ "maximum_scale",
+ "sql_data_type",
+ "sql_datetime_sub",
+ "num_prec_radix",
+ "interval_precision",
+ ]
- # Get foreign keys for the order_details table
- fks = cursor.foreignKeys(table="order_details", schema="pytest_fk_schema").fetchall()
+ for column in required_columns:
+ assert hasattr(first_row, column), f"Result missing required column: {column}"
- # Verify we got results
- assert len(fks) == 2, "Should find two rows for the composite foreign key (one per column)"
- # Group by key_seq to verify both columns
- fk_columns = {}
- for fk in fks:
- fk_columns[fk.key_seq] = {
- "pkcolumn": fk.pkcolumn_name.lower(),
- "fkcolumn": fk.fkcolumn_name.lower(),
- }
+def test_gettypeinfo_numeric_type(cursor):
+ """Test getTypeInfo for numeric data types"""
+ from mssql_python.constants import ConstantsDDBC
- # Verify both columns are present
- assert 1 in fk_columns, "First column of composite key missing"
- assert 2 in fk_columns, "Second column of composite key missing"
+ # Get information about DECIMAL type
+ decimal_info = cursor.getTypeInfo(ConstantsDDBC.SQL_DECIMAL.value).fetchall()
- # Verify column mappings
- assert fk_columns[1]["pkcolumn"] == "product_id", "Wrong primary key column 1"
- assert fk_columns[1]["fkcolumn"] == "product_id", "Wrong foreign key column 1"
- assert fk_columns[2]["pkcolumn"] == "variant_id", "Wrong primary key column 2"
- assert fk_columns[2]["fkcolumn"] == "variant_id", "Wrong foreign key column 2"
+ # Verify decimal-specific attributes
+ assert len(decimal_info) > 0, "getTypeInfo for DECIMAL should return results"
- finally:
- # Clean up
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.order_details")
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.product_variants")
- db_connection.commit()
+ decimal_row = decimal_info[0]
+ # DECIMAL should have precision and scale parameters
+ assert decimal_row.create_params is not None, "DECIMAL should have create_params"
+ assert (
+ "PRECISION" in decimal_row.create_params.upper()
+ or "SCALE" in decimal_row.create_params.upper()
+ ), "DECIMAL create_params should mention precision/scale"
+ # Numeric types typically use base 10 for the num_prec_radix
+ assert (
+ decimal_row.num_prec_radix == 10
+ ), f"Expected num_prec_radix=10 for DECIMAL, got {decimal_row.num_prec_radix}"
-def test_cleanup_schema(cursor, db_connection):
- """Clean up the test schema after all tests"""
- try:
- # Make sure no tables remain
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.order_details")
- cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.product_variants")
- db_connection.commit()
- # Drop the schema
- cursor.execute("DROP SCHEMA IF EXISTS pytest_fk_schema")
- db_connection.commit()
- except Exception as e:
- pytest.fail(f"Schema cleanup failed: {e}")
+def test_gettypeinfo_datetime_types(cursor):
+ """Test getTypeInfo for datetime types"""
+ from mssql_python.constants import ConstantsDDBC
+ # Get information about TIMESTAMP type instead of DATETIME
+ # SQL_TYPE_TIMESTAMP (93) is more commonly used for datetime in ODBC
+ datetime_info = cursor.getTypeInfo(ConstantsDDBC.SQL_TYPE_TIMESTAMP.value).fetchall()
-def test_primarykeys_setup(cursor, db_connection):
- """Create tables with primary keys for testing"""
- try:
- # Create a test schema for isolation
- cursor.execute(
- "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_pk_schema') EXEC('CREATE SCHEMA pytest_pk_schema')"
- )
+ # Verify we got datetime-related results
+ assert len(datetime_info) > 0, "getTypeInfo for TIMESTAMP should return results"
- # Drop tables if they exist
- cursor.execute("DROP TABLE IF EXISTS pytest_pk_schema.single_pk_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_pk_schema.composite_pk_test")
+ # Check for datetime-specific attributes
+ first_row = datetime_info[0]
+ assert hasattr(first_row, "type_name"), "Result should have type_name column"
- # Create table with simple primary key
- cursor.execute("""
- CREATE TABLE pytest_pk_schema.single_pk_test (
- id INT PRIMARY KEY,
- name VARCHAR(100) NOT NULL,
- description VARCHAR(200) NULL
- )
- """)
+ # Datetime type names often contain 'date', 'time', or 'datetime'
+ type_name_lower = first_row.type_name.lower()
+ assert any(
+ term in type_name_lower for term in ["date", "time", "timestamp", "datetime"]
+ ), f"Expected datetime-related type name, got {first_row.type_name}"
- # Create table with composite primary key
- cursor.execute("""
- CREATE TABLE pytest_pk_schema.composite_pk_test (
- dept_id INT NOT NULL,
- emp_id INT NOT NULL,
- hire_date DATE NOT NULL,
- CONSTRAINT PK_composite_test PRIMARY KEY (dept_id, emp_id)
- )
- """)
- db_connection.commit()
- except Exception as e:
- pytest.fail(f"Test setup failed: {e}")
+def test_gettypeinfo_multiple_calls(cursor):
+ """Test calling getTypeInfo multiple times in succession"""
+ from mssql_python.constants import ConstantsDDBC
+ # First call - get all types
+ all_types = cursor.getTypeInfo().fetchall()
+ assert len(all_types) > 0, "First call to getTypeInfo should return results"
-def test_primarykeys_simple(cursor, db_connection):
- """Test primaryKeys returns information about a simple primary key"""
- try:
- # First set up our test tables
- test_primarykeys_setup(cursor, db_connection)
+ # Second call - get VARCHAR type
+ varchar_info = cursor.getTypeInfo(ConstantsDDBC.SQL_VARCHAR.value).fetchall()
+ assert len(varchar_info) > 0, "Second call to getTypeInfo should return results"
- # Get primary key information
- pks = cursor.primaryKeys("single_pk_test", schema="pytest_pk_schema").fetchall()
+ # Third call - get INTEGER type
+ int_info = cursor.getTypeInfo(ConstantsDDBC.SQL_INTEGER.value).fetchall()
+ assert len(int_info) > 0, "Third call to getTypeInfo should return results"
- # Verify we got results
- assert len(pks) == 1, "Should find exactly one primary key column"
- pk = pks[0]
+ # Verify the results are different between calls
+ assert len(all_types) > len(
+ varchar_info
+ ), "All types should return more rows than specific type"
- # Verify primary key details
- assert pk.table_name.lower() == "single_pk_test", "Wrong table name"
- assert pk.column_name.lower() == "id", "Wrong primary key column name"
- assert pk.key_seq == 1, "Wrong key sequence number"
- assert pk.pk_name is not None, "Primary key name should not be None"
- finally:
- # Clean up happens in test_primarykeys_cleanup
- pass
+def test_gettypeinfo_binary_types(cursor):
+ """Test getTypeInfo for binary data types"""
+ from mssql_python.constants import ConstantsDDBC
+ # Get information about BINARY or VARBINARY type
+ binary_info = cursor.getTypeInfo(ConstantsDDBC.SQL_BINARY.value).fetchall()
-def test_primarykeys_composite(cursor, db_connection):
- """Test primaryKeys with a composite primary key"""
- try:
- # Get primary key information
- pks = cursor.primaryKeys("composite_pk_test", schema="pytest_pk_schema").fetchall()
+ # Verify we got binary-related results
+ assert len(binary_info) > 0, "getTypeInfo for BINARY should return results"
- # Verify we got results for both columns
- assert len(pks) == 2, "Should find two primary key columns"
+ # Check for binary-specific attributes
+ for row in binary_info:
+ type_name_lower = row.type_name.lower()
+ # Include 'timestamp' as SQL Server reports it as a binary type
+ assert any(
+ term in type_name_lower for term in ["binary", "blob", "image", "timestamp"]
+ ), f"Expected binary-related type name, got {row.type_name}"
- # Sort by key_seq to ensure consistent order
- pks = sorted(pks, key=lambda row: row.key_seq)
+ # Binary types typically don't support case sensitivity
+ assert (
+ row.case_sensitive == 0
+ ), f"Binary types should not be case sensitive, got {row.case_sensitive}"
- # Verify first column
- assert pks[0].table_name.lower() == "composite_pk_test", "Wrong table name"
- assert pks[0].column_name.lower() == "dept_id", "Wrong first primary key column name"
- assert pks[0].key_seq == 1, "Wrong key sequence number for first column"
- # Verify second column
- assert pks[1].table_name.lower() == "composite_pk_test", "Wrong table name"
- assert pks[1].column_name.lower() == "emp_id", "Wrong second primary key column name"
- assert pks[1].key_seq == 2, "Wrong key sequence number for second column"
+def test_gettypeinfo_cached_results(cursor):
+ """Test that multiple identical calls to getTypeInfo are efficient"""
+ from mssql_python.constants import ConstantsDDBC
+ import time
- # Both should have the same PK name
- assert (
- pks[0].pk_name == pks[1].pk_name
- ), "Both columns should have the same primary key name"
+ # First call - might be slower
+ start_time = time.time()
+ first_result = cursor.getTypeInfo(ConstantsDDBC.SQL_VARCHAR.value).fetchall()
+ first_duration = time.time() - start_time
- finally:
- # Clean up happens in test_primarykeys_cleanup
- pass
+ # Give the system a moment
+ time.sleep(0.1)
+ # Second call with same type - should be similar or faster
+ start_time = time.time()
+ second_result = cursor.getTypeInfo(ConstantsDDBC.SQL_VARCHAR.value).fetchall()
+ second_duration = time.time() - start_time
-def test_primarykeys_column_info(cursor, db_connection):
- """Test that primaryKeys returns correct column information"""
- try:
- # Get primary key information
- pks = cursor.primaryKeys("single_pk_test", schema="pytest_pk_schema").fetchall()
+ # Results should be consistent
+ assert len(first_result) == len(
+ second_result
+ ), "Multiple calls should return same number of results"
- # Verify column information
- assert len(pks) == 1, "Should find exactly one primary key column"
- pk = pks[0]
+ # Both calls should return the correct type info
+ for row in second_result:
+ assert (
+ row.data_type == ConstantsDDBC.SQL_VARCHAR.value
+ ), f"Expected SQL_VARCHAR type, got {row.data_type}"
- # Verify expected columns are present
- assert hasattr(pk, "table_cat"), "Result should have table_cat column"
- assert hasattr(pk, "table_schem"), "Result should have table_schem column"
- assert hasattr(pk, "table_name"), "Result should have table_name column"
- assert hasattr(pk, "column_name"), "Result should have column_name column"
- assert hasattr(pk, "key_seq"), "Result should have key_seq column"
- assert hasattr(pk, "pk_name"), "Result should have pk_name column"
- # Verify values are correct
- assert pk.table_schem.lower() == "pytest_pk_schema", "Wrong schema name"
- assert pk.table_name.lower() == "single_pk_test", "Wrong table name"
- assert pk.column_name.lower() == "id", "Wrong column name"
- assert isinstance(pk.key_seq, int), "key_seq should be an integer"
+def test_procedures_setup(cursor, db_connection):
+ """Create a test schema and procedures for testing"""
+ try:
+ # Create a test schema for isolation
+ cursor.execute(
+ "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_proc_schema') EXEC('CREATE SCHEMA pytest_proc_schema')"
+ )
- finally:
- # Clean up happens in test_primarykeys_cleanup
- pass
+ # Create test stored procedures
+ cursor.execute("""
+ CREATE OR ALTER PROCEDURE pytest_proc_schema.test_proc1
+ AS
+ BEGIN
+ SELECT 1 AS result
+ END
+ """)
+ cursor.execute("""
+ CREATE OR ALTER PROCEDURE pytest_proc_schema.test_proc2
+ @param1 INT,
+ @param2 VARCHAR(50) OUTPUT
+ AS
+ BEGIN
+ SELECT @param2 = 'Output ' + CAST(@param1 AS VARCHAR(10))
+ RETURN @param1
+ END
+ """)
-def test_primarykeys_nonexistent(cursor):
- """Test primaryKeys() with non-existent table name"""
- # Use a table name that's highly unlikely to exist
- pks = cursor.primaryKeys("nonexistent_table_xyz123").fetchall()
+ db_connection.commit()
+ except Exception as e:
+ pytest.fail(f"Test setup failed: {e}")
- # Should return empty list, not error
- assert isinstance(pks, list), "Should return a list for non-existent table"
- assert len(pks) == 0, "Should return empty list for non-existent table"
+def test_procedures_all(cursor, db_connection):
+ """Test getting information about all procedures"""
+ # First set up our test procedures
+ test_procedures_setup(cursor, db_connection)
-def test_primarykeys_catalog_filter(cursor, db_connection):
- """Test primaryKeys() with catalog filter"""
try:
- # Get current database name
- cursor.execute("SELECT DB_NAME() AS current_db")
- current_db = cursor.fetchone().current_db
-
- # Get primary keys with current catalog
- pks = cursor.primaryKeys(
- "single_pk_test", catalog=current_db, schema="pytest_pk_schema"
- ).fetchall()
+ # Get all procedures
+ procs = cursor.procedures().fetchall()
- # Verify catalog filter worked
- assert len(pks) == 1, "Should find exactly one primary key column"
- pk = pks[0]
- assert pk.table_cat == current_db, f"Expected catalog {current_db}, got {pk.table_cat}"
+ # Verify we got results
+ assert procs is not None, "procedures() should return results"
+ assert len(procs) > 0, "procedures() should return at least one procedure"
- # Get primary keys with non-existent catalog
- fake_pks = cursor.primaryKeys("single_pk_test", catalog="nonexistent_db_xyz123").fetchall()
- assert len(fake_pks) == 0, "Should return empty list for non-existent catalog"
+ # Verify structure of results
+ first_row = procs[0]
+ assert hasattr(first_row, "procedure_cat"), "Result should have procedure_cat column"
+ assert hasattr(first_row, "procedure_schem"), "Result should have procedure_schem column"
+ assert hasattr(first_row, "procedure_name"), "Result should have procedure_name column"
+ assert hasattr(first_row, "num_input_params"), "Result should have num_input_params column"
+ assert hasattr(
+ first_row, "num_output_params"
+ ), "Result should have num_output_params column"
+ assert hasattr(first_row, "num_result_sets"), "Result should have num_result_sets column"
+ assert hasattr(first_row, "remarks"), "Result should have remarks column"
+ assert hasattr(first_row, "procedure_type"), "Result should have procedure_type column"
finally:
- # Clean up happens in test_primarykeys_cleanup
+ # Clean up happens in test_procedures_cleanup
pass
-def test_primarykeys_cleanup(cursor, db_connection):
- """Clean up test tables after testing"""
+def test_procedures_specific(cursor, db_connection):
+ """Test getting information about a specific procedure"""
try:
- # Drop all test tables
- cursor.execute("DROP TABLE IF EXISTS pytest_pk_schema.single_pk_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_pk_schema.composite_pk_test")
-
- # Drop the test schema
- cursor.execute("DROP SCHEMA IF EXISTS pytest_pk_schema")
- db_connection.commit()
- except Exception as e:
- pytest.fail(f"Test cleanup failed: {e}")
-
+ # Get specific procedure
+ procs = cursor.procedures(procedure="test_proc1", schema="pytest_proc_schema").fetchall()
-def test_rowcount_after_fetch_operations(cursor, db_connection):
- """Test that rowcount is updated correctly after various fetch operations."""
- try:
- # Create a test table
- cursor.execute("CREATE TABLE #rowcount_fetch_test (id INT PRIMARY KEY, name NVARCHAR(100))")
+ # Verify we got the correct procedure
+ assert len(procs) == 1, "Should find exactly one procedure"
+ proc = procs[0]
+ assert proc.procedure_name == "test_proc1;1", "Wrong procedure name returned"
+ assert proc.procedure_schem == "pytest_proc_schema", "Wrong schema returned"
- # Insert some test data
- cursor.execute("INSERT INTO #rowcount_fetch_test VALUES (1, 'Row 1')")
- cursor.execute("INSERT INTO #rowcount_fetch_test VALUES (2, 'Row 2')")
- cursor.execute("INSERT INTO #rowcount_fetch_test VALUES (3, 'Row 3')")
- cursor.execute("INSERT INTO #rowcount_fetch_test VALUES (4, 'Row 4')")
- cursor.execute("INSERT INTO #rowcount_fetch_test VALUES (5, 'Row 5')")
- db_connection.commit()
+ finally:
+ # Clean up happens in test_procedures_cleanup
+ pass
- # Test fetchone
- cursor.execute("SELECT * FROM #rowcount_fetch_test ORDER BY id")
- # Initially, rowcount should be -1 after a SELECT statement
- assert cursor.rowcount == -1, "rowcount should be -1 right after SELECT statement"
- # After fetchone, rowcount should be 1
- row = cursor.fetchone()
- assert row is not None, "Should fetch one row"
- assert cursor.rowcount == 1, "rowcount should be 1 after fetchone"
+def test_procedures_with_schema(cursor, db_connection):
+ """Test getting procedures with schema filter"""
+ try:
+ # Get procedures for our test schema
+ procs = cursor.procedures(schema="pytest_proc_schema").fetchall()
- # After another fetchone, rowcount should be 2
- row = cursor.fetchone()
- assert row is not None, "Should fetch second row"
- assert cursor.rowcount == 2, "rowcount should be 2 after second fetchone"
+ # Verify schema filter worked
+ assert len(procs) >= 2, "Should find at least two procedures in schema"
+ for proc in procs:
+ assert (
+ proc.procedure_schem == "pytest_proc_schema"
+ ), f"Expected schema pytest_proc_schema, got {proc.procedure_schem}"
- # Test fetchmany
- cursor.execute("SELECT * FROM #rowcount_fetch_test ORDER BY id")
- assert cursor.rowcount == -1, "rowcount should be -1 right after SELECT statement"
+ # Verify our specific procedures are in the results
+ proc_names = [p.procedure_name for p in procs]
+ assert "test_proc1;1" in proc_names, "test_proc1;1 should be in results"
+ assert "test_proc2;1" in proc_names, "test_proc2;1 should be in results"
- # After fetchmany(2), rowcount should be 2
- rows = cursor.fetchmany(2)
- assert len(rows) == 2, "Should fetch two rows"
- assert cursor.rowcount == 2, "rowcount should be 2 after fetchmany(2)"
+ finally:
+ # Clean up happens in test_procedures_cleanup
+ pass
- # After another fetchmany(2), rowcount should be 4
- rows = cursor.fetchmany(2)
- assert len(rows) == 2, "Should fetch two more rows"
- assert cursor.rowcount == 4, "rowcount should be 4 after second fetchmany(2)"
- # Test fetchall
- cursor.execute("SELECT * FROM #rowcount_fetch_test ORDER BY id")
- assert cursor.rowcount == -1, "rowcount should be -1 right after SELECT statement"
+def test_procedures_nonexistent(cursor):
+ """Test procedures() with non-existent procedure name"""
+ # Use a procedure name that's highly unlikely to exist
+ procs = cursor.procedures(procedure="nonexistent_procedure_xyz123").fetchall()
- # After fetchall, rowcount should be the total number of rows fetched (5)
- rows = cursor.fetchall()
- assert len(rows) == 5, "Should fetch all rows"
- assert cursor.rowcount == 5, "rowcount should be 5 after fetchall"
+ # Should return empty list, not error
+ assert isinstance(procs, list), "Should return a list for non-existent procedure"
+ assert len(procs) == 0, "Should return empty list for non-existent procedure"
- # Test mixed fetch operations
- cursor.execute("SELECT * FROM #rowcount_fetch_test ORDER BY id")
- # Fetch one row
- row = cursor.fetchone()
- assert row is not None, "Should fetch one row"
- assert cursor.rowcount == 1, "rowcount should be 1 after fetchone"
+def test_procedures_catalog_filter(cursor, db_connection):
+ """Test procedures() with catalog filter"""
+ # Get current database name
+ cursor.execute("SELECT DB_NAME() AS current_db")
+ current_db = cursor.fetchone().current_db
- # Fetch two more rows with fetchmany
- rows = cursor.fetchmany(2)
- assert len(rows) == 2, "Should fetch two more rows"
- assert cursor.rowcount == 3, "rowcount should be 3 after fetchone + fetchmany(2)"
+ try:
+ # Get procedures with current catalog
+ procs = cursor.procedures(catalog=current_db, schema="pytest_proc_schema").fetchall()
- # Fetch remaining rows with fetchall
- rows = cursor.fetchall()
- assert len(rows) == 2, "Should fetch remaining two rows"
- assert cursor.rowcount == 5, "rowcount should be 5 after fetchone + fetchmany(2) + fetchall"
+ # Verify catalog filter worked
+ assert len(procs) >= 2, "Should find procedures in current catalog"
+ for proc in procs:
+ assert (
+ proc.procedure_cat == current_db
+ ), f"Expected catalog {current_db}, got {proc.procedure_cat}"
- # Test fetchall on an empty result
- cursor.execute("SELECT * FROM #rowcount_fetch_test WHERE id > 100")
- rows = cursor.fetchall()
- assert len(rows) == 0, "Should fetch zero rows"
- assert cursor.rowcount == 0, "rowcount should be 0 after fetchall on empty result"
+ # Get procedures with non-existent catalog
+ fake_procs = cursor.procedures(catalog="nonexistent_db_xyz123").fetchall()
+ assert len(fake_procs) == 0, "Should return empty list for non-existent catalog"
finally:
- # Clean up
- try:
- cursor.execute("DROP TABLE #rowcount_fetch_test")
- db_connection.commit()
- except:
- pass
+ # Clean up happens in test_procedures_cleanup
+ pass
-def test_rowcount_guid_table(cursor, db_connection):
- """Test rowcount with GUID/uniqueidentifier columns to match the GitHub issue scenario."""
+def test_procedures_with_parameters(cursor, db_connection):
+ """Test that procedures() correctly reports parameter information"""
try:
- # Create a test table similar to the one in the GitHub issue
- cursor.execute(
- "CREATE TABLE #test_log (id uniqueidentifier PRIMARY KEY DEFAULT NEWID(), message VARCHAR(100))"
- )
-
- # Insert test data
- cursor.execute("INSERT INTO #test_log (message) VALUES ('Log 1')")
- cursor.execute("INSERT INTO #test_log (message) VALUES ('Log 2')")
- cursor.execute("INSERT INTO #test_log (message) VALUES ('Log 3')")
+ # Create a simpler procedure with basic parameters
+ cursor.execute("""
+ CREATE OR ALTER PROCEDURE pytest_proc_schema.test_params_proc
+ @in1 INT,
+ @in2 VARCHAR(50)
+ AS
+ BEGIN
+ SELECT @in1 AS value1, @in2 AS value2
+ END
+ """)
db_connection.commit()
- # Execute SELECT query
- cursor.execute("SELECT * FROM #test_log")
- assert (
- cursor.rowcount == -1
- ), "Rowcount should be -1 after a SELECT statement (before fetch)"
-
- # Test fetchall
- rows = cursor.fetchall()
- assert len(rows) == 3, "Should fetch 3 rows"
- assert cursor.rowcount == 3, "Rowcount should be 3 after fetchall"
-
- # Execute SELECT again
- cursor.execute("SELECT * FROM #test_log")
-
- # Test fetchmany
- rows = cursor.fetchmany(2)
- assert len(rows) == 2, "Should fetch 2 rows"
- assert cursor.rowcount == 2, "Rowcount should be 2 after fetchmany(2)"
-
- # Fetch remaining row
- rows = cursor.fetchall()
- assert len(rows) == 1, "Should fetch 1 remaining row"
- assert cursor.rowcount == 3, "Rowcount should be 3 after fetchmany(2) + fetchall"
-
- # Execute SELECT again
- cursor.execute("SELECT * FROM #test_log")
+ # Get procedure info
+ procs = cursor.procedures(
+ procedure="test_params_proc", schema="pytest_proc_schema"
+ ).fetchall()
- # Test individual fetchone calls
- row1 = cursor.fetchone()
- assert row1 is not None, "First row should not be None"
- assert cursor.rowcount == 1, "Rowcount should be 1 after first fetchone"
+ # Verify we found the procedure
+ assert len(procs) == 1, "Should find exactly one procedure"
+ proc = procs[0]
- row2 = cursor.fetchone()
- assert row2 is not None, "Second row should not be None"
- assert cursor.rowcount == 2, "Rowcount should be 2 after second fetchone"
+ # Just check if columns exist, don't check specific values
+ assert hasattr(proc, "num_input_params"), "Result should have num_input_params column"
+ assert hasattr(proc, "num_output_params"), "Result should have num_output_params column"
- row3 = cursor.fetchone()
- assert row3 is not None, "Third row should not be None"
- assert cursor.rowcount == 3, "Rowcount should be 3 after third fetchone"
+ # Test simple execution without output parameters
+ cursor.execute("EXEC pytest_proc_schema.test_params_proc 10, 'Test'")
- row4 = cursor.fetchone()
- assert row4 is None, "Fourth row should be None (no more rows)"
- assert cursor.rowcount == 3, "Rowcount should remain 3 when fetchone returns None"
+ # Verify the procedure returned expected values
+ row = cursor.fetchone()
+ assert row is not None, "Procedure should return results"
+ assert row[0] == 10, "First parameter value incorrect"
+ assert row[1] == "Test", "Second parameter value incorrect"
finally:
- # Clean up
- try:
- cursor.execute("DROP TABLE #test_log")
- db_connection.commit()
- except:
- pass
+ cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_params_proc")
+ db_connection.commit()
-def test_rowcount(cursor, db_connection):
- """Test rowcount after various operations"""
+def test_procedures_result_set_info(cursor, db_connection):
+ """Test that procedures() reports information about result sets"""
try:
- cursor.execute(
- "CREATE TABLE #pytest_test_rowcount (id INT IDENTITY(1,1) PRIMARY KEY, name NVARCHAR(100))"
- )
+ # Create procedures with different result set patterns
+ cursor.execute("""
+ CREATE OR ALTER PROCEDURE pytest_proc_schema.test_no_results
+ AS
+ BEGIN
+ DECLARE @x INT = 1
+ END
+ """)
+
+ cursor.execute("""
+ CREATE OR ALTER PROCEDURE pytest_proc_schema.test_one_result
+ AS
+ BEGIN
+ SELECT 1 AS col1, 'test' AS col2
+ END
+ """)
+
+ cursor.execute("""
+ CREATE OR ALTER PROCEDURE pytest_proc_schema.test_multiple_results
+ AS
+ BEGIN
+ SELECT 1 AS result1
+ SELECT 'test' AS result2
+ SELECT GETDATE() AS result3
+ END
+ """)
db_connection.commit()
- cursor.execute("INSERT INTO #pytest_test_rowcount (name) VALUES ('JohnDoe1');")
- assert cursor.rowcount == 1, "Rowcount should be 1 after first insert"
+ # Get procedure info for all test procedures
+ procs = cursor.procedures(schema="pytest_proc_schema", procedure="test_%").fetchall()
- cursor.execute("INSERT INTO #pytest_test_rowcount (name) VALUES ('JohnDoe2');")
- assert cursor.rowcount == 1, "Rowcount should be 1 after second insert"
+ # Verify we found at least some procedures
+ assert len(procs) > 0, "Should find at least some test procedures"
- cursor.execute("INSERT INTO #pytest_test_rowcount (name) VALUES ('JohnDoe3');")
- assert cursor.rowcount == 1, "Rowcount should be 1 after third insert"
+ # Get the procedure names we found
+ result_proc_names = [
+ p.procedure_name
+ for p in procs
+ if p.procedure_name.startswith("test_") and "results" in p.procedure_name
+ ]
+ print(f"Found result procedures: {result_proc_names}")
- cursor.execute("""
- INSERT INTO #pytest_test_rowcount (name)
- VALUES
- ('JohnDoe4'),
- ('JohnDoe5'),
- ('JohnDoe6');
- """)
- assert cursor.rowcount == 3, "Rowcount should be 3 after inserting multiple rows"
+ # The num_result_sets column exists but might not have correct values
+ for proc in procs:
+ assert hasattr(proc, "num_result_sets"), "Result should have num_result_sets column"
- cursor.execute("SELECT * FROM #pytest_test_rowcount;")
+ # Test execution of the procedures to verify they work
+ cursor.execute("EXEC pytest_proc_schema.test_no_results")
+ # Procedures with no results should have no description and calling fetchall() should raise an error
assert (
- cursor.rowcount == -1
- ), "Rowcount should be -1 after a SELECT statement (before fetch)"
+ cursor.description is None
+ ), "test_no_results should have no description (no result set)"
+ # Don't call fetchall() on procedures with no results - this is invalid in ODBC
- # After fetchall, rowcount should be updated to match the number of rows fetched
+ cursor.execute("EXEC pytest_proc_schema.test_one_result")
rows = cursor.fetchall()
- assert len(rows) == 6, "Should have fetched 6 rows"
- assert cursor.rowcount == 6, "Rowcount should be updated to 6 after fetchall"
+ assert len(rows) == 1, "test_one_result should return one row"
+ assert len(rows[0]) == 2, "test_one_result row should have two columns"
+
+ cursor.execute("EXEC pytest_proc_schema.test_multiple_results")
+ rows1 = cursor.fetchall()
+ assert len(rows1) == 1, "First result set should have one row"
+ assert cursor.nextset(), "Should have a second result set"
+ rows2 = cursor.fetchall()
+ assert len(rows2) == 1, "Second result set should have one row"
+ assert cursor.nextset(), "Should have a third result set"
+ rows3 = cursor.fetchall()
+ assert len(rows3) == 1, "Third result set should have one row"
+
+ finally:
+ cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_no_results")
+ cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_one_result")
+ cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_multiple_results")
+ db_connection.commit()
+
+
+def test_procedures_cleanup(cursor, db_connection):
+ """Clean up all test procedures and schema after testing"""
+ try:
+ # Drop all test procedures
+ cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_proc1")
+ cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_proc2")
+ cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_params_proc")
+ cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_no_results")
+ cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_one_result")
+ cursor.execute("DROP PROCEDURE IF EXISTS pytest_proc_schema.test_multiple_results")
+ # Drop the test schema
+ cursor.execute("DROP SCHEMA IF EXISTS pytest_proc_schema")
db_connection.commit()
except Exception as e:
- pytest.fail(f"Rowcount test failed: {e}")
- finally:
- cursor.execute("DROP TABLE #pytest_test_rowcount")
+ pytest.fail(f"Test cleanup failed: {e}")
-def test_specialcolumns_setup(cursor, db_connection):
- """Create test tables for testing rowIdColumns and rowVerColumns"""
+def test_foreignkeys_setup(cursor, db_connection):
+ """Create tables with foreign key relationships for testing"""
try:
# Create a test schema for isolation
cursor.execute(
- "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_special_schema') EXEC('CREATE SCHEMA pytest_special_schema')"
+ "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_fk_schema') EXEC('CREATE SCHEMA pytest_fk_schema')"
)
- # Drop tables if they exist
- cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.rowid_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.timestamp_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.multiple_unique_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.identity_test")
+ # Drop tables if they exist (in reverse order to avoid constraint conflicts)
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
- # Create table with primary key (for rowIdColumns)
+ # Create parent table
cursor.execute("""
- CREATE TABLE pytest_special_schema.rowid_test (
- id INT PRIMARY KEY,
- name NVARCHAR(100) NOT NULL,
- unique_col NVARCHAR(100) UNIQUE,
- non_unique_col NVARCHAR(100)
+ CREATE TABLE pytest_fk_schema.customers (
+ customer_id INT PRIMARY KEY,
+ customer_name VARCHAR(100) NOT NULL
)
""")
- # Create table with rowversion column (for rowVerColumns)
+ # Create child table with foreign key
cursor.execute("""
- CREATE TABLE pytest_special_schema.timestamp_test (
- id INT PRIMARY KEY,
- name NVARCHAR(100) NOT NULL,
- last_updated ROWVERSION
+ CREATE TABLE pytest_fk_schema.orders (
+ order_id INT PRIMARY KEY,
+ order_date DATETIME NOT NULL,
+ customer_id INT NOT NULL,
+ total_amount DECIMAL(10, 2) NOT NULL,
+ CONSTRAINT FK_Orders_Customers FOREIGN KEY (customer_id)
+ REFERENCES pytest_fk_schema.customers (customer_id)
)
""")
- # Create table with multiple unique identifiers
+ # Insert test data
cursor.execute("""
- CREATE TABLE pytest_special_schema.multiple_unique_test (
- id INT NOT NULL,
- code VARCHAR(10) NOT NULL,
- email VARCHAR(100) UNIQUE,
- order_number VARCHAR(20) UNIQUE,
- CONSTRAINT PK_multiple_unique_test PRIMARY KEY (id, code)
- )
+ INSERT INTO pytest_fk_schema.customers (customer_id, customer_name)
+ VALUES (1, 'Test Customer 1'), (2, 'Test Customer 2')
+ """)
+
+ cursor.execute("""
+ INSERT INTO pytest_fk_schema.orders (order_id, order_date, customer_id, total_amount)
+ VALUES (101, GETDATE(), 1, 150.00), (102, GETDATE(), 2, 250.50)
""")
- # Create table with identity column
- cursor.execute("""
- CREATE TABLE pytest_special_schema.identity_test (
- id INT IDENTITY(1,1) PRIMARY KEY,
- name NVARCHAR(100) NOT NULL,
- last_modified DATETIME DEFAULT GETDATE()
- )
- """)
+ db_connection.commit()
+ except Exception as e:
+ pytest.fail(f"Test setup failed: {e}")
+
+
+def test_foreignkeys_all(cursor, db_connection):
+ """Test getting all foreign keys"""
+ try:
+ # First set up our test tables
+ test_foreignkeys_setup(cursor, db_connection)
+
+ # Get all foreign keys
+ fks = cursor.foreignKeys(table="orders", schema="pytest_fk_schema").fetchall()
+
+ # Verify we got results
+ assert fks is not None, "foreignKeys() should return results"
+ assert len(fks) > 0, "foreignKeys() should return at least one foreign key"
+
+ # Verify our test FK is in the results
+ # Search case-insensitively since the database might return different case
+ found_test_fk = False
+ for fk in fks:
+ if fk.fktable_name.lower() == "orders" and fk.pktable_name.lower() == "customers":
+ found_test_fk = True
+ break
+
+ assert found_test_fk, "Could not find the test foreign key in results"
+
+ finally:
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
+ db_connection.commit()
+
+
+def test_foreignkeys_specific_table(cursor, db_connection):
+ """Test getting foreign keys for a specific table"""
+ try:
+ # First set up our test tables
+ test_foreignkeys_setup(cursor, db_connection)
+
+ # Get foreign keys for the orders table
+ fks = cursor.foreignKeys(table="orders", schema="pytest_fk_schema").fetchall()
+
+ # Verify we got results
+ assert len(fks) == 1, "Should find exactly one foreign key for orders table"
+
+ # Verify the foreign key details
+ fk = fks[0]
+ assert fk.fktable_name.lower() == "orders", "Wrong foreign key table name"
+ assert fk.pktable_name.lower() == "customers", "Wrong primary key table name"
+ assert fk.fkcolumn_name.lower() == "customer_id", "Wrong foreign key column name"
+ assert fk.pkcolumn_name.lower() == "customer_id", "Wrong primary key column name"
+
+ finally:
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
+ db_connection.commit()
+
+
+def test_foreignkeys_specific_foreign_table(cursor, db_connection):
+ """Test getting foreign keys that reference a specific table"""
+ try:
+ # First set up our test tables
+ test_foreignkeys_setup(cursor, db_connection)
+
+ # Get foreign keys that reference the customers table
+ fks = cursor.foreignKeys(
+ foreignTable="customers", foreignSchema="pytest_fk_schema"
+ ).fetchall()
+
+ # Verify we got results
+ assert len(fks) > 0, "Should find at least one foreign key referencing customers table"
+
+ # Verify our test FK is in the results
+ found_test_fk = False
+ for fk in fks:
+ if fk.fktable_name.lower() == "orders" and fk.pktable_name.lower() == "customers":
+ found_test_fk = True
+ break
+
+ assert found_test_fk, "Could not find the test foreign key in results"
+ finally:
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
db_connection.commit()
- except Exception as e:
- pytest.fail(f"Test setup failed: {e}")
-def test_rowid_columns_basic(cursor, db_connection):
- """Test basic functionality of rowIdColumns"""
+def test_foreignkeys_both_tables(cursor, db_connection):
+ """Test getting foreign keys with both table and foreignTable specified"""
try:
- # Get row identifier columns for simple table
- rowid_cols = cursor.rowIdColumns(
- table="rowid_test", schema="pytest_special_schema"
+ # First set up our test tables
+ test_foreignkeys_setup(cursor, db_connection)
+
+ # Get foreign keys between the two tables
+ fks = cursor.foreignKeys(
+ table="orders",
+ schema="pytest_fk_schema",
+ foreignTable="customers",
+ foreignSchema="pytest_fk_schema",
).fetchall()
- # LIMITATION: Only returns first column of primary key
- assert len(rowid_cols) == 1, "Should find exactly one ROWID column (first column of PK)"
+ # Verify we got results
+ assert len(fks) == 1, "Should find exactly one foreign key between specified tables"
- # Verify column name in the results
- col = rowid_cols[0]
- assert (
- col.column_name.lower() == "id"
- ), "Primary key column should be included in ROWID results"
+ # Verify the foreign key details
+ fk = fks[0]
+ assert fk.fktable_name.lower() == "orders", "Wrong foreign key table name"
+ assert fk.pktable_name.lower() == "customers", "Wrong primary key table name"
+ assert fk.fkcolumn_name.lower() == "customer_id", "Wrong foreign key column name"
+ assert fk.pkcolumn_name.lower() == "customer_id", "Wrong primary key column name"
- # Verify result structure
- assert hasattr(col, "scope"), "Result should have scope column"
- assert hasattr(col, "column_name"), "Result should have column_name column"
- assert hasattr(col, "data_type"), "Result should have data_type column"
- assert hasattr(col, "type_name"), "Result should have type_name column"
- assert hasattr(col, "column_size"), "Result should have column_size column"
- assert hasattr(col, "buffer_length"), "Result should have buffer_length column"
- assert hasattr(col, "decimal_digits"), "Result should have decimal_digits column"
- assert hasattr(col, "pseudo_column"), "Result should have pseudo_column column"
+ finally:
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
+ db_connection.commit()
- # The scope should be one of the valid values or NULL
- assert col.scope in [0, 1, 2, None], f"Invalid scope value: {col.scope}"
- # The pseudo_column should be one of the valid values
- assert col.pseudo_column in [
- 0,
- 1,
- 2,
- None,
- ], f"Invalid pseudo_column value: {col.pseudo_column}"
+def test_foreignkeys_nonexistent(cursor):
+ """Test foreignKeys() with non-existent table name"""
+ # Use a table name that's highly unlikely to exist
+ fks = cursor.foreignKeys(table="nonexistent_table_xyz123").fetchall()
- except Exception as e:
- pytest.fail(f"rowIdColumns basic test failed: {e}")
- finally:
- # Clean up happens in test_specialcolumns_cleanup
- pass
+ # Should return empty list, not error
+ assert isinstance(fks, list), "Should return a list for non-existent table"
+ assert len(fks) == 0, "Should return empty list for non-existent table"
-def test_rowid_columns_identity(cursor, db_connection):
- """Test rowIdColumns with identity column"""
+def test_foreignkeys_catalog_schema(cursor, db_connection):
+ """Test foreignKeys() with catalog and schema filters"""
try:
- # Get row identifier columns for table with identity column
- rowid_cols = cursor.rowIdColumns(
- table="identity_test", schema="pytest_special_schema"
+ # First set up our test tables
+ test_foreignkeys_setup(cursor, db_connection)
+
+ # Get current database name
+ cursor.execute("SELECT DB_NAME() AS current_db")
+ row = cursor.fetchone()
+ current_db = row.current_db
+
+ # Get foreign keys with current catalog and pytest schema
+ fks = cursor.foreignKeys(
+ table="orders", catalog=current_db, schema="pytest_fk_schema"
).fetchall()
- # LIMITATION: Only returns the identity column if it's the primary key
- assert len(rowid_cols) == 1, "Should find exactly one ROWID column (identity column as PK)"
+ # Verify we got results
+ assert len(fks) > 0, "Should find foreign keys with correct catalog/schema"
- # Verify it's the identity column
- col = rowid_cols[0]
- assert col.column_name.lower() == "id", "Identity column should be included as it's the PK"
+ # Verify catalog/schema in results
+ for fk in fks:
+ assert fk.fktable_cat == current_db, "Wrong foreign key table catalog"
+ assert fk.fktable_schem == "pytest_fk_schema", "Wrong foreign key table schema"
- except Exception as e:
- pytest.fail(f"rowIdColumns identity test failed: {e}")
finally:
- # Clean up happens in test_specialcolumns_cleanup
- pass
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
+ db_connection.commit()
-def test_rowid_columns_composite(cursor, db_connection):
- """Test rowIdColumns with composite primary key"""
+def test_foreignkeys_result_structure(cursor, db_connection):
+ """Test the structure of foreignKeys result rows"""
try:
- # Get row identifier columns for table with composite primary key
- rowid_cols = cursor.rowIdColumns(
- table="multiple_unique_test", schema="pytest_special_schema"
- ).fetchall()
-
- # LIMITATION: Only returns first column of composite primary key
- assert len(rowid_cols) >= 1, "Should find at least one ROWID column (first column of PK)"
+ # First set up our test tables
+ test_foreignkeys_setup(cursor, db_connection)
- # Verify column names in the results - should be the first PK column
- col_names = [col.column_name.lower() for col in rowid_cols]
- assert "id" in col_names, "First part of composite PK should be included"
+ # Get foreign keys for the orders table
+ fks = cursor.foreignKeys(table="orders", schema="pytest_fk_schema").fetchall()
- # LIMITATION: Other parts of the PK or unique constraints may not be included
- if len(rowid_cols) > 1:
- # If additional columns are returned, they should be valid
- for col in rowid_cols:
- assert col.column_name.lower() in [
- "id",
- "code",
- ], "Only PK columns should be returned"
+ # Verify we got results
+ assert len(fks) > 0, "Should find at least one foreign key"
- except Exception as e:
- pytest.fail(f"rowIdColumns composite test failed: {e}")
- finally:
- # Clean up happens in test_specialcolumns_cleanup
- pass
+ # Check for all required columns in the result
+ first_row = fks[0]
+ required_columns = [
+ "pktable_cat",
+ "pktable_schem",
+ "pktable_name",
+ "pkcolumn_name",
+ "fktable_cat",
+ "fktable_schem",
+ "fktable_name",
+ "fkcolumn_name",
+ "key_seq",
+ "update_rule",
+ "delete_rule",
+ "fk_name",
+ "pk_name",
+ "deferrability",
+ ]
+ for column in required_columns:
+ assert hasattr(first_row, column), f"Result missing required column: {column}"
-def test_rowid_columns_nonexistent(cursor):
- """Test rowIdColumns with non-existent table"""
- # Use a table name that's highly unlikely to exist
- rowid_cols = cursor.rowIdColumns("nonexistent_table_xyz123").fetchall()
+ # Verify specific values
+ assert first_row.fktable_name.lower() == "orders", "Wrong foreign key table name"
+ assert first_row.pktable_name.lower() == "customers", "Wrong primary key table name"
+ assert first_row.fkcolumn_name.lower() == "customer_id", "Wrong foreign key column name"
+ assert first_row.pkcolumn_name.lower() == "customer_id", "Wrong primary key column name"
+ assert first_row.key_seq == 1, "Wrong key sequence number"
+ assert first_row.fk_name is not None, "Foreign key name should not be None"
+ assert first_row.pk_name is not None, "Primary key name should not be None"
- # Should return empty list, not error
- assert isinstance(rowid_cols, list), "Should return a list for non-existent table"
- assert len(rowid_cols) == 0, "Should return empty list for non-existent table"
+ finally:
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
+ db_connection.commit()
-def test_rowid_columns_nullable(cursor, db_connection):
- """Test rowIdColumns with nullable parameter"""
+def test_foreignkeys_multiple_column_fk(cursor, db_connection):
+ """Test foreignKeys() with a multi-column foreign key"""
try:
- # First create a table with nullable unique column and non-nullable PK
- cursor.execute("""
- CREATE TABLE pytest_special_schema.nullable_test (
- id INT PRIMARY KEY, -- PK can't be nullable in SQL Server
- data NVARCHAR(100) NULL
+ # First create the schema if needed
+ cursor.execute(
+ "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_fk_schema') EXEC('CREATE SCHEMA pytest_fk_schema')"
)
- """)
- db_connection.commit()
-
- # Test with nullable=True (default)
- rowid_cols_with_nullable = cursor.rowIdColumns(
- table="nullable_test", schema="pytest_special_schema"
- ).fetchall()
- # Verify PK column is included
- assert len(rowid_cols_with_nullable) == 1, "Should return exactly one column (PK)"
- assert (
- rowid_cols_with_nullable[0].column_name.lower() == "id"
- ), "PK column should be returned"
+ # Drop tables if they exist (in reverse order to avoid constraint conflicts)
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.order_details")
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.product_variants")
- # Test with nullable=False
- rowid_cols_no_nullable = cursor.rowIdColumns(
- table="nullable_test", schema="pytest_special_schema", nullable=False
- ).fetchall()
+ # Create parent table with composite primary key
+ cursor.execute("""
+ CREATE TABLE pytest_fk_schema.product_variants (
+ product_id INT NOT NULL,
+ variant_id INT NOT NULL,
+ variant_name VARCHAR(100) NOT NULL,
+ PRIMARY KEY (product_id, variant_id)
+ )
+ """)
- # The behavior of SQLSpecialColumns with SQL_NO_NULLS is to only return
- # non-nullable columns that uniquely identify a row, but SQL Server returns
- # an empty set in this case - this is expected behavior
- assert (
- len(rowid_cols_no_nullable) == 0
- ), "Should return empty list when nullable=False (ODBC API behavior)"
+ # Create child table with composite foreign key
+ cursor.execute("""
+ CREATE TABLE pytest_fk_schema.order_details (
+ order_id INT NOT NULL,
+ product_id INT NOT NULL,
+ variant_id INT NOT NULL,
+ quantity INT NOT NULL,
+ PRIMARY KEY (order_id, product_id, variant_id),
+ CONSTRAINT FK_OrderDetails_ProductVariants FOREIGN KEY (product_id, variant_id)
+ REFERENCES pytest_fk_schema.product_variants (product_id, variant_id)
+ )
+ """)
- except Exception as e:
- pytest.fail(f"rowIdColumns nullable test failed: {e}")
- finally:
- cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.nullable_test")
db_connection.commit()
-
-def test_rowver_columns_basic(cursor, db_connection):
- """Test basic functionality of rowVerColumns"""
- try:
- # Get version columns from timestamp test table
- rowver_cols = cursor.rowVerColumns(
- table="timestamp_test", schema="pytest_special_schema"
- ).fetchall()
+ # Get foreign keys for the order_details table
+ fks = cursor.foreignKeys(table="order_details", schema="pytest_fk_schema").fetchall()
# Verify we got results
- assert len(rowver_cols) == 1, "Should find exactly one ROWVER column"
+ assert len(fks) == 2, "Should find two rows for the composite foreign key (one per column)"
- # Verify the column is the rowversion column
- rowver_col = rowver_cols[0]
- assert (
- rowver_col.column_name.lower() == "last_updated"
- ), "ROWVER column should be 'last_updated'"
- assert rowver_col.type_name.lower() in [
- "rowversion",
- "timestamp",
- ], "ROWVER column should have rowversion or timestamp type"
+ # Group by key_seq to verify both columns
+ fk_columns = {}
+ for fk in fks:
+ fk_columns[fk.key_seq] = {
+ "pkcolumn": fk.pkcolumn_name.lower(),
+ "fkcolumn": fk.fkcolumn_name.lower(),
+ }
- # Verify result structure - allowing for NULL values
- assert hasattr(rowver_col, "scope"), "Result should have scope column"
- assert hasattr(rowver_col, "column_name"), "Result should have column_name column"
- assert hasattr(rowver_col, "data_type"), "Result should have data_type column"
- assert hasattr(rowver_col, "type_name"), "Result should have type_name column"
- assert hasattr(rowver_col, "column_size"), "Result should have column_size column"
- assert hasattr(rowver_col, "buffer_length"), "Result should have buffer_length column"
- assert hasattr(rowver_col, "decimal_digits"), "Result should have decimal_digits column"
- assert hasattr(rowver_col, "pseudo_column"), "Result should have pseudo_column column"
+ # Verify both columns are present
+ assert 1 in fk_columns, "First column of composite key missing"
+ assert 2 in fk_columns, "Second column of composite key missing"
- # The scope should be one of the valid values or NULL
- assert rowver_col.scope in [
- 0,
- 1,
- 2,
- None,
- ], f"Invalid scope value: {rowver_col.scope}"
+ # Verify column mappings
+ assert fk_columns[1]["pkcolumn"] == "product_id", "Wrong primary key column 1"
+ assert fk_columns[1]["fkcolumn"] == "product_id", "Wrong foreign key column 1"
+ assert fk_columns[2]["pkcolumn"] == "variant_id", "Wrong primary key column 2"
+ assert fk_columns[2]["fkcolumn"] == "variant_id", "Wrong foreign key column 2"
- except Exception as e:
- pytest.fail(f"rowVerColumns basic test failed: {e}")
finally:
- # Clean up happens in test_specialcolumns_cleanup
- pass
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.order_details")
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.product_variants")
+ db_connection.commit()
-def test_rowver_columns_nonexistent(cursor):
- """Test rowVerColumns with non-existent table"""
- # Use a table name that's highly unlikely to exist
- rowver_cols = cursor.rowVerColumns("nonexistent_table_xyz123").fetchall()
+def test_cleanup_schema(cursor, db_connection):
+ """Clean up the test schema after all tests"""
+ try:
+ # Make sure no tables remain
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.orders")
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.customers")
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.order_details")
+ cursor.execute("DROP TABLE IF EXISTS pytest_fk_schema.product_variants")
+ db_connection.commit()
- # Should return empty list, not error
- assert isinstance(rowver_cols, list), "Should return a list for non-existent table"
- assert len(rowver_cols) == 0, "Should return empty list for non-existent table"
+ # Drop the schema
+ cursor.execute("DROP SCHEMA IF EXISTS pytest_fk_schema")
+ db_connection.commit()
+ except Exception as e:
+ pytest.fail(f"Schema cleanup failed: {e}")
-def test_rowver_columns_nullable(cursor, db_connection):
- """Test rowVerColumns with nullable parameter (not expected to have effect)"""
+def test_primarykeys_setup(cursor, db_connection):
+ """Create tables with primary keys for testing"""
try:
- # First create a table with rowversion column
+ # Create a test schema for isolation
+ cursor.execute(
+ "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_pk_schema') EXEC('CREATE SCHEMA pytest_pk_schema')"
+ )
+
+ # Drop tables if they exist
+ cursor.execute("DROP TABLE IF EXISTS pytest_pk_schema.single_pk_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_pk_schema.composite_pk_test")
+
+ # Create table with simple primary key
cursor.execute("""
- CREATE TABLE pytest_special_schema.nullable_rowver_test (
+ CREATE TABLE pytest_pk_schema.single_pk_test (
id INT PRIMARY KEY,
- ts ROWVERSION
+ name VARCHAR(100) NOT NULL,
+ description VARCHAR(200) NULL
)
""")
- db_connection.commit()
-
- # Test with nullable=True (default)
- rowver_cols_with_nullable = cursor.rowVerColumns(
- table="nullable_rowver_test", schema="pytest_special_schema"
- ).fetchall()
-
- # Verify rowversion column is included (rowversion can't be nullable)
- assert len(rowver_cols_with_nullable) == 1, "Should find exactly one ROWVER column"
- assert (
- rowver_cols_with_nullable[0].column_name.lower() == "ts"
- ), "ROWVERSION column should be included"
-
- # Test with nullable=False
- rowver_cols_no_nullable = cursor.rowVerColumns(
- table="nullable_rowver_test", schema="pytest_special_schema", nullable=False
- ).fetchall()
- # Verify rowversion column is still included
- assert len(rowver_cols_no_nullable) == 1, "Should find exactly one ROWVER column"
- assert (
- rowver_cols_no_nullable[0].column_name.lower() == "ts"
- ), "ROWVERSION column should be included even with nullable=False"
+ # Create table with composite primary key
+ cursor.execute("""
+ CREATE TABLE pytest_pk_schema.composite_pk_test (
+ dept_id INT NOT NULL,
+ emp_id INT NOT NULL,
+ hire_date DATE NOT NULL,
+ CONSTRAINT PK_composite_test PRIMARY KEY (dept_id, emp_id)
+ )
+ """)
- except Exception as e:
- pytest.fail(f"rowVerColumns nullable test failed: {e}")
- finally:
- cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.nullable_rowver_test")
db_connection.commit()
+ except Exception as e:
+ pytest.fail(f"Test setup failed: {e}")
-def test_specialcolumns_catalog_filter(cursor, db_connection):
- """Test special columns with catalog filter"""
+def test_primarykeys_simple(cursor, db_connection):
+ """Test primaryKeys returns information about a simple primary key"""
try:
- # Get current database name
- cursor.execute("SELECT DB_NAME() AS current_db")
- current_db = cursor.fetchone().current_db
-
- # Test rowIdColumns with current catalog
- rowid_cols = cursor.rowIdColumns(
- table="rowid_test", catalog=current_db, schema="pytest_special_schema"
- ).fetchall()
-
- # Verify catalog filter worked
- assert len(rowid_cols) > 0, "Should find ROWID columns with correct catalog"
-
- # Test rowIdColumns with non-existent catalog
- fake_rowid_cols = cursor.rowIdColumns(
- table="rowid_test",
- catalog="nonexistent_db_xyz123",
- schema="pytest_special_schema",
- ).fetchall()
- assert len(fake_rowid_cols) == 0, "Should return empty list for non-existent catalog"
+ # First set up our test tables
+ test_primarykeys_setup(cursor, db_connection)
- # Test rowVerColumns with current catalog
- rowver_cols = cursor.rowVerColumns(
- table="timestamp_test", catalog=current_db, schema="pytest_special_schema"
- ).fetchall()
+ # Get primary key information
+ pks = cursor.primaryKeys("single_pk_test", schema="pytest_pk_schema").fetchall()
- # Verify catalog filter worked
- assert len(rowver_cols) > 0, "Should find ROWVER columns with correct catalog"
+ # Verify we got results
+ assert len(pks) == 1, "Should find exactly one primary key column"
+ pk = pks[0]
- # Test rowVerColumns with non-existent catalog
- fake_rowver_cols = cursor.rowVerColumns(
- table="timestamp_test",
- catalog="nonexistent_db_xyz123",
- schema="pytest_special_schema",
- ).fetchall()
- assert len(fake_rowver_cols) == 0, "Should return empty list for non-existent catalog"
+ # Verify primary key details
+ assert pk.table_name.lower() == "single_pk_test", "Wrong table name"
+ assert pk.column_name.lower() == "id", "Wrong primary key column name"
+ assert pk.key_seq == 1, "Wrong key sequence number"
+ assert pk.pk_name is not None, "Primary key name should not be None"
- except Exception as e:
- pytest.fail(f"Special columns catalog filter test failed: {e}")
finally:
- # Clean up happens in test_specialcolumns_cleanup
+ # Clean up happens in test_primarykeys_cleanup
pass
-def test_specialcolumns_cleanup(cursor, db_connection):
- """Clean up test tables after testing"""
+def test_primarykeys_composite(cursor, db_connection):
+ """Test primaryKeys with a composite primary key"""
try:
- # Drop all test tables
- cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.rowid_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.timestamp_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.multiple_unique_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.identity_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.nullable_unique_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.nullable_timestamp_test")
+ # Get primary key information
+ pks = cursor.primaryKeys("composite_pk_test", schema="pytest_pk_schema").fetchall()
- # Drop the test schema
- cursor.execute("DROP SCHEMA IF EXISTS pytest_special_schema")
- db_connection.commit()
- except Exception as e:
- pytest.fail(f"Test cleanup failed: {e}")
+ # Verify we got results for both columns
+ assert len(pks) == 2, "Should find two primary key columns"
+ # Sort by key_seq to ensure consistent order
+ pks = sorted(pks, key=lambda row: row.key_seq)
-def test_statistics_setup(cursor, db_connection):
- """Create test tables and indexes for statistics testing"""
- try:
- # Create a test schema for isolation
- cursor.execute(
- "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_stats_schema') EXEC('CREATE SCHEMA pytest_stats_schema')"
- )
+ # Verify first column
+ assert pks[0].table_name.lower() == "composite_pk_test", "Wrong table name"
+ assert pks[0].column_name.lower() == "dept_id", "Wrong first primary key column name"
+ assert pks[0].key_seq == 1, "Wrong key sequence number for first column"
- # Drop tables if they exist
- cursor.execute("DROP TABLE IF EXISTS pytest_stats_schema.stats_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_stats_schema.empty_stats_test")
+ # Verify second column
+ assert pks[1].table_name.lower() == "composite_pk_test", "Wrong table name"
+ assert pks[1].column_name.lower() == "emp_id", "Wrong second primary key column name"
+ assert pks[1].key_seq == 2, "Wrong key sequence number for second column"
- # Create test table with various indexes
- cursor.execute("""
- CREATE TABLE pytest_stats_schema.stats_test (
- id INT PRIMARY KEY,
- name VARCHAR(100) NOT NULL,
- email VARCHAR(100) UNIQUE,
- department VARCHAR(50) NOT NULL,
- salary DECIMAL(10, 2) NULL,
- hire_date DATE NOT NULL
- )
- """)
+ # Both should have the same PK name
+ assert (
+ pks[0].pk_name == pks[1].pk_name
+ ), "Both columns should have the same primary key name"
- # Create a non-unique index
- cursor.execute("""
- CREATE INDEX IX_stats_test_dept_date ON pytest_stats_schema.stats_test (department, hire_date)
- """)
+ finally:
+ # Clean up happens in test_primarykeys_cleanup
+ pass
- # Create a unique index on multiple columns
- cursor.execute("""
- CREATE UNIQUE INDEX UX_stats_test_name_dept ON pytest_stats_schema.stats_test (name, department)
- """)
- # Create an empty table for testing
- cursor.execute("""
- CREATE TABLE pytest_stats_schema.empty_stats_test (
- id INT PRIMARY KEY,
- data VARCHAR(100) NULL
- )
- """)
+def test_primarykeys_column_info(cursor, db_connection):
+ """Test that primaryKeys returns correct column information"""
+ try:
+ # Get primary key information
+ pks = cursor.primaryKeys("single_pk_test", schema="pytest_pk_schema").fetchall()
- db_connection.commit()
- except Exception as e:
- pytest.fail(f"Test setup failed: {e}")
+ # Verify column information
+ assert len(pks) == 1, "Should find exactly one primary key column"
+ pk = pks[0]
+ # Verify expected columns are present
+ assert hasattr(pk, "table_cat"), "Result should have table_cat column"
+ assert hasattr(pk, "table_schem"), "Result should have table_schem column"
+ assert hasattr(pk, "table_name"), "Result should have table_name column"
+ assert hasattr(pk, "column_name"), "Result should have column_name column"
+ assert hasattr(pk, "key_seq"), "Result should have key_seq column"
+ assert hasattr(pk, "pk_name"), "Result should have pk_name column"
-def test_statistics_basic(cursor, db_connection):
- """Test basic functionality of statistics method"""
- try:
- # First set up our test tables
- test_statistics_setup(cursor, db_connection)
+ # Verify values are correct
+ assert pk.table_schem.lower() == "pytest_pk_schema", "Wrong schema name"
+ assert pk.table_name.lower() == "single_pk_test", "Wrong table name"
+ assert pk.column_name.lower() == "id", "Wrong column name"
+ assert isinstance(pk.key_seq, int), "key_seq should be an integer"
- # Get statistics for the test table (all indexes)
- stats = cursor.statistics(table="stats_test", schema="pytest_stats_schema").fetchall()
+ finally:
+ # Clean up happens in test_primarykeys_cleanup
+ pass
- # Verify we got results - should include PK, unique index on email, and non-unique index
- assert stats is not None, "statistics() should return results"
- assert len(stats) > 0, "statistics() should return at least one row"
- # Count different types of indexes
- table_stats = [s for s in stats if s.type == 0] # TABLE_STAT
- indexes = [s for s in stats if s.type != 0] # Actual indexes
+def test_primarykeys_nonexistent(cursor):
+ """Test primaryKeys() with non-existent table name"""
+ # Use a table name that's highly unlikely to exist
+ pks = cursor.primaryKeys("nonexistent_table_xyz123").fetchall()
- # We should have at least one table statistics row and multiple index rows
- assert len(table_stats) <= 1, "Should have at most one TABLE_STAT row"
- assert (
- len(indexes) >= 3
- ), "Should have at least 3 index entries (PK, unique email, non-unique dept+date)"
+ # Should return empty list, not error
+ assert isinstance(pks, list), "Should return a list for non-existent table"
+ assert len(pks) == 0, "Should return empty list for non-existent table"
- # Verify column names in results
- first_row = stats[0]
- assert hasattr(first_row, "table_name"), "Result should have table_name column"
- assert hasattr(first_row, "non_unique"), "Result should have non_unique column"
- assert hasattr(first_row, "index_name"), "Result should have index_name column"
- assert hasattr(first_row, "type"), "Result should have type column"
- assert hasattr(first_row, "column_name"), "Result should have column_name column"
- # Check that we can find the primary key
- pk_found = False
- for stat in stats:
- if hasattr(stat, "index_name") and stat.index_name and "pk" in stat.index_name.lower():
- pk_found = True
- break
+def test_primarykeys_catalog_filter(cursor, db_connection):
+ """Test primaryKeys() with catalog filter"""
+ try:
+ # Get current database name
+ cursor.execute("SELECT DB_NAME() AS current_db")
+ current_db = cursor.fetchone().current_db
- assert pk_found, "Primary key should be included in statistics results"
+ # Get primary keys with current catalog
+ pks = cursor.primaryKeys(
+ "single_pk_test", catalog=current_db, schema="pytest_pk_schema"
+ ).fetchall()
- # Check that we can find the unique index on email
- email_index_found = False
- for stat in stats:
- if (
- hasattr(stat, "column_name")
- and stat.column_name
- and stat.column_name.lower() == "email"
- and hasattr(stat, "non_unique")
- and stat.non_unique == 0
- ): # 0 = unique
- email_index_found = True
- break
+ # Verify catalog filter worked
+ assert len(pks) == 1, "Should find exactly one primary key column"
+ pk = pks[0]
+ assert pk.table_cat == current_db, f"Expected catalog {current_db}, got {pk.table_cat}"
- assert email_index_found, "Unique index on email should be included in statistics results"
+ # Get primary keys with non-existent catalog
+ fake_pks = cursor.primaryKeys("single_pk_test", catalog="nonexistent_db_xyz123").fetchall()
+ assert len(fake_pks) == 0, "Should return empty list for non-existent catalog"
finally:
- # Clean up happens in test_statistics_cleanup
+ # Clean up happens in test_primarykeys_cleanup
pass
-def test_statistics_unique_only(cursor, db_connection):
- """Test statistics with unique=True to get only unique indexes"""
+def test_primarykeys_cleanup(cursor, db_connection):
+ """Clean up test tables after testing"""
try:
- # Get statistics for only unique indexes
- stats = cursor.statistics(
- table="stats_test", schema="pytest_stats_schema", unique=True
- ).fetchall()
+ # Drop all test tables
+ cursor.execute("DROP TABLE IF EXISTS pytest_pk_schema.single_pk_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_pk_schema.composite_pk_test")
- # Verify we got results
- assert stats is not None, "statistics() with unique=True should return results"
- assert len(stats) > 0, "statistics() with unique=True should return at least one row"
+ # Drop the test schema
+ cursor.execute("DROP SCHEMA IF EXISTS pytest_pk_schema")
+ db_connection.commit()
+ except Exception as e:
+ pytest.fail(f"Test cleanup failed: {e}")
- # All index entries should be for unique indexes (non_unique = 0)
- for stat in stats:
- if hasattr(stat, "type") and stat.type != 0: # Skip TABLE_STAT entries
- assert hasattr(stat, "non_unique"), "Index entry should have non_unique column"
- assert stat.non_unique == 0, "With unique=True, all indexes should be unique"
- # Count different types of indexes
- indexes = [s for s in stats if hasattr(s, "type") and s.type != 0]
+def test_rowcount_after_fetch_operations(cursor, db_connection):
+ """Test that rowcount is updated correctly after various fetch operations."""
+ try:
+ # Create a test table
+ cursor.execute("CREATE TABLE #rowcount_fetch_test (id INT PRIMARY KEY, name NVARCHAR(100))")
- # We should have multiple unique indexes (PK, unique email, unique name+dept)
- assert len(indexes) >= 3, "Should have at least 3 unique index entries"
+ # Insert some test data
+ cursor.execute("INSERT INTO #rowcount_fetch_test VALUES (1, 'Row 1')")
+ cursor.execute("INSERT INTO #rowcount_fetch_test VALUES (2, 'Row 2')")
+ cursor.execute("INSERT INTO #rowcount_fetch_test VALUES (3, 'Row 3')")
+ cursor.execute("INSERT INTO #rowcount_fetch_test VALUES (4, 'Row 4')")
+ cursor.execute("INSERT INTO #rowcount_fetch_test VALUES (5, 'Row 5')")
+ db_connection.commit()
- finally:
- # Clean up happens in test_statistics_cleanup
- pass
+ # Test fetchone
+ cursor.execute("SELECT * FROM #rowcount_fetch_test ORDER BY id")
+ # Initially, rowcount should be -1 after a SELECT statement
+ assert cursor.rowcount == -1, "rowcount should be -1 right after SELECT statement"
+ # After fetchone, rowcount should be 1
+ row = cursor.fetchone()
+ assert row is not None, "Should fetch one row"
+ assert cursor.rowcount == 1, "rowcount should be 1 after fetchone"
-def test_statistics_empty_table(cursor, db_connection):
- """Test statistics on a table with no data (just schema)"""
- try:
- # Get statistics for the empty table
- stats = cursor.statistics(table="empty_stats_test", schema="pytest_stats_schema").fetchall()
+ # After another fetchone, rowcount should be 2
+ row = cursor.fetchone()
+ assert row is not None, "Should fetch second row"
+ assert cursor.rowcount == 2, "rowcount should be 2 after second fetchone"
- # Should still return metadata about the primary key
- assert stats is not None, "statistics() should return results even for empty table"
- assert len(stats) > 0, "statistics() should return at least one row for empty table"
+ # Test fetchmany
+ cursor.execute("SELECT * FROM #rowcount_fetch_test ORDER BY id")
+ assert cursor.rowcount == -1, "rowcount should be -1 right after SELECT statement"
- # Check for primary key
- pk_found = False
- for stat in stats:
- if hasattr(stat, "index_name") and stat.index_name and "pk" in stat.index_name.lower():
- pk_found = True
- break
+ # After fetchmany(2), rowcount should be 2
+ rows = cursor.fetchmany(2)
+ assert len(rows) == 2, "Should fetch two rows"
+ assert cursor.rowcount == 2, "rowcount should be 2 after fetchmany(2)"
- assert pk_found, "Primary key should be included in statistics results for empty table"
+ # After another fetchmany(2), rowcount should be 4
+ rows = cursor.fetchmany(2)
+ assert len(rows) == 2, "Should fetch two more rows"
+ assert cursor.rowcount == 4, "rowcount should be 4 after second fetchmany(2)"
- finally:
- # Clean up happens in test_statistics_cleanup
- pass
+ # Test fetchall
+ cursor.execute("SELECT * FROM #rowcount_fetch_test ORDER BY id")
+ assert cursor.rowcount == -1, "rowcount should be -1 right after SELECT statement"
+ # After fetchall, rowcount should be the total number of rows fetched (5)
+ rows = cursor.fetchall()
+ assert len(rows) == 5, "Should fetch all rows"
+ assert cursor.rowcount == 5, "rowcount should be 5 after fetchall"
-def test_statistics_nonexistent(cursor):
- """Test statistics with non-existent table name"""
- # Use a table name that's highly unlikely to exist
- stats = cursor.statistics("nonexistent_table_xyz123").fetchall()
+ # Test mixed fetch operations
+ cursor.execute("SELECT * FROM #rowcount_fetch_test ORDER BY id")
- # Should return empty list, not error
- assert isinstance(stats, list), "Should return a list for non-existent table"
- assert len(stats) == 0, "Should return empty list for non-existent table"
+ # Fetch one row
+ row = cursor.fetchone()
+ assert row is not None, "Should fetch one row"
+ assert cursor.rowcount == 1, "rowcount should be 1 after fetchone"
+ # Fetch two more rows with fetchmany
+ rows = cursor.fetchmany(2)
+ assert len(rows) == 2, "Should fetch two more rows"
+ assert cursor.rowcount == 3, "rowcount should be 3 after fetchone + fetchmany(2)"
-def test_statistics_result_structure(cursor, db_connection):
- """Test the complete structure of statistics result rows"""
- try:
- # Get statistics for the test table
- stats = cursor.statistics(table="stats_test", schema="pytest_stats_schema").fetchall()
+ # Fetch remaining rows with fetchall
+ rows = cursor.fetchall()
+ assert len(rows) == 2, "Should fetch remaining two rows"
+ assert cursor.rowcount == 5, "rowcount should be 5 after fetchone + fetchmany(2) + fetchall"
- # Verify we have results
- assert len(stats) > 0, "Should have statistics results"
+ # Test fetchall on an empty result
+ cursor.execute("SELECT * FROM #rowcount_fetch_test WHERE id > 100")
+ rows = cursor.fetchall()
+ assert len(rows) == 0, "Should fetch zero rows"
+ assert cursor.rowcount == 0, "rowcount should be 0 after fetchall on empty result"
- # Find a row that's an actual index (not TABLE_STAT)
- index_row = None
- for stat in stats:
- if hasattr(stat, "type") and stat.type != 0:
- index_row = stat
- break
+ finally:
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS #rowcount_fetch_test")
+ db_connection.commit()
- assert index_row is not None, "Should have at least one index row"
- # Check for all required columns
- required_columns = [
- "table_cat",
- "table_schem",
- "table_name",
- "non_unique",
- "index_qualifier",
- "index_name",
- "type",
- "ordinal_position",
- "column_name",
- "asc_or_desc",
- "cardinality",
- "pages",
- "filter_condition",
- ]
+def test_rowcount_guid_table(cursor, db_connection):
+ """Test rowcount with GUID/uniqueidentifier columns to match the GitHub issue scenario."""
+ try:
+ # Create a test table similar to the one in the GitHub issue
+ cursor.execute(
+ "CREATE TABLE #test_log (id uniqueidentifier PRIMARY KEY DEFAULT NEWID(), message VARCHAR(100))"
+ )
+
+ # Insert test data
+ cursor.execute("INSERT INTO #test_log (message) VALUES ('Log 1')")
+ cursor.execute("INSERT INTO #test_log (message) VALUES ('Log 2')")
+ cursor.execute("INSERT INTO #test_log (message) VALUES ('Log 3')")
+ db_connection.commit()
- for column in required_columns:
- assert hasattr(index_row, column), f"Result missing required column: {column}"
+ # Execute SELECT query
+ cursor.execute("SELECT * FROM #test_log")
+ assert (
+ cursor.rowcount == -1
+ ), "Rowcount should be -1 after a SELECT statement (before fetch)"
- # Check types of key columns
- assert isinstance(index_row.table_name, str), "table_name should be a string"
- assert isinstance(index_row.type, int), "type should be an integer"
+ # Test fetchall
+ rows = cursor.fetchall()
+ assert len(rows) == 3, "Should fetch 3 rows"
+ assert cursor.rowcount == 3, "Rowcount should be 3 after fetchall"
- # Don't check the actual values of cardinality and pages as they may be NULL
- # or driver-dependent, especially for empty tables
+ # Execute SELECT again
+ cursor.execute("SELECT * FROM #test_log")
- finally:
- # Clean up happens in test_statistics_cleanup
- pass
+ # Test fetchmany
+ rows = cursor.fetchmany(2)
+ assert len(rows) == 2, "Should fetch 2 rows"
+ assert cursor.rowcount == 2, "Rowcount should be 2 after fetchmany(2)"
+ # Fetch remaining row
+ rows = cursor.fetchall()
+ assert len(rows) == 1, "Should fetch 1 remaining row"
+ assert cursor.rowcount == 3, "Rowcount should be 3 after fetchmany(2) + fetchall"
-def test_statistics_catalog_filter(cursor, db_connection):
- """Test statistics with catalog filter"""
- try:
- # Get current database name
- cursor.execute("SELECT DB_NAME() AS current_db")
- current_db = cursor.fetchone().current_db
+ # Execute SELECT again
+ cursor.execute("SELECT * FROM #test_log")
- # Get statistics with current catalog
- stats = cursor.statistics(
- table="stats_test", catalog=current_db, schema="pytest_stats_schema"
- ).fetchall()
+ # Test individual fetchone calls
+ row1 = cursor.fetchone()
+ assert row1 is not None, "First row should not be None"
+ assert cursor.rowcount == 1, "Rowcount should be 1 after first fetchone"
- # Verify catalog filter worked
- assert len(stats) > 0, "Should find statistics with correct catalog"
+ row2 = cursor.fetchone()
+ assert row2 is not None, "Second row should not be None"
+ assert cursor.rowcount == 2, "Rowcount should be 2 after second fetchone"
- # Verify catalog in results
- for stat in stats:
- if hasattr(stat, "table_cat"):
- assert stat.table_cat.lower() == current_db.lower(), "Wrong table catalog"
+ row3 = cursor.fetchone()
+ assert row3 is not None, "Third row should not be None"
+ assert cursor.rowcount == 3, "Rowcount should be 3 after third fetchone"
- # Get statistics with non-existent catalog
- fake_stats = cursor.statistics(
- table="stats_test",
- catalog="nonexistent_db_xyz123",
- schema="pytest_stats_schema",
- ).fetchall()
- assert len(fake_stats) == 0, "Should return empty list for non-existent catalog"
+ row4 = cursor.fetchone()
+ assert row4 is None, "Fourth row should be None (no more rows)"
+ assert cursor.rowcount == 3, "Rowcount should remain 3 when fetchone returns None"
finally:
- # Clean up happens in test_statistics_cleanup
- pass
+ # Clean up
+ cursor.execute("DROP TABLE IF EXISTS #test_log")
+ db_connection.commit()
-def test_statistics_with_quick_parameter(cursor, db_connection):
- """Test statistics with quick parameter variations"""
+def test_rowcount(cursor, db_connection):
+ """Test rowcount after various operations"""
try:
- # Test with quick=True (default)
- quick_stats = cursor.statistics(
- table="stats_test", schema="pytest_stats_schema", quick=True
- ).fetchall()
+ cursor.execute(
+ "CREATE TABLE #pytest_test_rowcount (id INT IDENTITY(1,1) PRIMARY KEY, name NVARCHAR(100))"
+ )
+ db_connection.commit()
- # Test with quick=False
- thorough_stats = cursor.statistics(
- table="stats_test", schema="pytest_stats_schema", quick=False
- ).fetchall()
+ cursor.execute("INSERT INTO #pytest_test_rowcount (name) VALUES ('JohnDoe1');")
+ assert cursor.rowcount == 1, "Rowcount should be 1 after first insert"
- # Both should return results, but we can't guarantee behavior differences
- # since it depends on the ODBC driver and database system
- assert len(quick_stats) > 0, "quick=True should return results"
- assert len(thorough_stats) > 0, "quick=False should return results"
+ cursor.execute("INSERT INTO #pytest_test_rowcount (name) VALUES ('JohnDoe2');")
+ assert cursor.rowcount == 1, "Rowcount should be 1 after second insert"
- # Just verify that changing the parameter didn't cause errors
+ cursor.execute("INSERT INTO #pytest_test_rowcount (name) VALUES ('JohnDoe3');")
+ assert cursor.rowcount == 1, "Rowcount should be 1 after third insert"
- finally:
- # Clean up happens in test_statistics_cleanup
- pass
+ cursor.execute("""
+ INSERT INTO #pytest_test_rowcount (name)
+ VALUES
+ ('JohnDoe4'),
+ ('JohnDoe5'),
+ ('JohnDoe6');
+ """)
+ assert cursor.rowcount == 3, "Rowcount should be 3 after inserting multiple rows"
+ cursor.execute("SELECT * FROM #pytest_test_rowcount;")
+ assert (
+ cursor.rowcount == -1
+ ), "Rowcount should be -1 after a SELECT statement (before fetch)"
-def test_statistics_cleanup(cursor, db_connection):
- """Clean up test tables after testing"""
- try:
- # Drop all test tables
- cursor.execute("DROP TABLE IF EXISTS pytest_stats_schema.stats_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_stats_schema.empty_stats_test")
+ # After fetchall, rowcount should be updated to match the number of rows fetched
+ rows = cursor.fetchall()
+ assert len(rows) == 6, "Should have fetched 6 rows"
+ assert cursor.rowcount == 6, "Rowcount should be updated to 6 after fetchall"
- # Drop the test schema
- cursor.execute("DROP SCHEMA IF EXISTS pytest_stats_schema")
db_connection.commit()
except Exception as e:
- pytest.fail(f"Test cleanup failed: {e}")
+ pytest.fail(f"Rowcount test failed: {e}")
+ finally:
+ cursor.execute("DROP TABLE #pytest_test_rowcount")
-def test_columns_setup(cursor, db_connection):
- """Create test tables for columns method testing"""
+def test_specialcolumns_setup(cursor, db_connection):
+ """Create test tables for testing rowIdColumns and rowVerColumns"""
try:
# Create a test schema for isolation
cursor.execute(
- "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_cols_schema') EXEC('CREATE SCHEMA pytest_cols_schema')"
+ "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_special_schema') EXEC('CREATE SCHEMA pytest_special_schema')"
)
# Drop tables if they exist
- cursor.execute("DROP TABLE IF EXISTS pytest_cols_schema.columns_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_cols_schema.columns_special_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.rowid_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.timestamp_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.multiple_unique_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.identity_test")
- # Create test table with various column types
- cursor.execute("""
- CREATE TABLE pytest_cols_schema.columns_test (
+ # Create table with primary key (for rowIdColumns)
+ cursor.execute("""
+ CREATE TABLE pytest_special_schema.rowid_test (
id INT PRIMARY KEY,
name NVARCHAR(100) NOT NULL,
- description NVARCHAR(MAX) NULL,
- price DECIMAL(10, 2) NULL,
- created_date DATETIME DEFAULT GETDATE(),
- is_active BIT NOT NULL DEFAULT 1,
- binary_data VARBINARY(MAX) NULL,
- notes TEXT NULL,
- [computed_col] AS (name + ' - ' + CAST(id AS VARCHAR(10)))
+ unique_col NVARCHAR(100) UNIQUE,
+ non_unique_col NVARCHAR(100)
)
""")
- # Create table with special column names and edge cases - fix the problematic column name
- cursor.execute("""
- CREATE TABLE pytest_cols_schema.columns_special_test (
- [ID] INT PRIMARY KEY,
- [User Name] NVARCHAR(100) NULL,
- [Spaces Multiple] VARCHAR(50) NULL,
- [123_numeric_start] INT NULL,
- [MAX] VARCHAR(20) NULL, -- SQL keyword as column name
- [SELECT] INT NULL, -- SQL keyword as column name
- [Column.With.Dots] VARCHAR(20) NULL,
- [Column/With/Slashes] VARCHAR(20) NULL,
- [Column_With_Underscores] VARCHAR(20) NULL -- Changed from problematic nested brackets
+ # Create table with rowversion column (for rowVerColumns)
+ cursor.execute("""
+ CREATE TABLE pytest_special_schema.timestamp_test (
+ id INT PRIMARY KEY,
+ name NVARCHAR(100) NOT NULL,
+ last_updated ROWVERSION
+ )
+ """)
+
+ # Create table with multiple unique identifiers
+ cursor.execute("""
+ CREATE TABLE pytest_special_schema.multiple_unique_test (
+ id INT NOT NULL,
+ code VARCHAR(10) NOT NULL,
+ email VARCHAR(100) UNIQUE,
+ order_number VARCHAR(20) UNIQUE,
+ CONSTRAINT PK_multiple_unique_test PRIMARY KEY (id, code)
+ )
+ """)
+
+ # Create table with identity column
+ cursor.execute("""
+ CREATE TABLE pytest_special_schema.identity_test (
+ id INT IDENTITY(1,1) PRIMARY KEY,
+ name NVARCHAR(100) NOT NULL,
+ last_modified DATETIME DEFAULT GETDATE()
)
""")
- db_connection.commit()
+ db_connection.commit()
+ except Exception as e:
+ pytest.fail(f"Test setup failed: {e}")
+
+
+def test_rowid_columns_basic(cursor, db_connection):
+ """Test basic functionality of rowIdColumns"""
+ try:
+ # Get row identifier columns for simple table
+ rowid_cols = cursor.rowIdColumns(
+ table="rowid_test", schema="pytest_special_schema"
+ ).fetchall()
+
+ # LIMITATION: Only returns first column of primary key
+ assert len(rowid_cols) == 1, "Should find exactly one ROWID column (first column of PK)"
+
+ # Verify column name in the results
+ col = rowid_cols[0]
+ assert (
+ col.column_name.lower() == "id"
+ ), "Primary key column should be included in ROWID results"
+
+ # Verify result structure
+ assert hasattr(col, "scope"), "Result should have scope column"
+ assert hasattr(col, "column_name"), "Result should have column_name column"
+ assert hasattr(col, "data_type"), "Result should have data_type column"
+ assert hasattr(col, "type_name"), "Result should have type_name column"
+ assert hasattr(col, "column_size"), "Result should have column_size column"
+ assert hasattr(col, "buffer_length"), "Result should have buffer_length column"
+ assert hasattr(col, "decimal_digits"), "Result should have decimal_digits column"
+ assert hasattr(col, "pseudo_column"), "Result should have pseudo_column column"
+
+ # The scope should be one of the valid values or NULL
+ assert col.scope in [0, 1, 2, None], f"Invalid scope value: {col.scope}"
+
+ # The pseudo_column should be one of the valid values
+ assert col.pseudo_column in [
+ 0,
+ 1,
+ 2,
+ None,
+ ], f"Invalid pseudo_column value: {col.pseudo_column}"
+
except Exception as e:
- pytest.fail(f"Test setup failed: {e}")
+ pytest.fail(f"rowIdColumns basic test failed: {e}")
+ finally:
+ # Clean up happens in test_specialcolumns_cleanup
+ pass
-def test_columns_all(cursor, db_connection):
- """Test columns returns information about all columns in all tables"""
+def test_rowid_columns_identity(cursor, db_connection):
+ """Test rowIdColumns with identity column"""
try:
- # First set up our test tables
- test_columns_setup(cursor, db_connection)
+ # Get row identifier columns for table with identity column
+ rowid_cols = cursor.rowIdColumns(
+ table="identity_test", schema="pytest_special_schema"
+ ).fetchall()
- # Get all columns (no filters)
- cols_cursor = cursor.columns()
- cols = cols_cursor.fetchall()
+ # LIMITATION: Only returns the identity column if it's the primary key
+ assert len(rowid_cols) == 1, "Should find exactly one ROWID column (identity column as PK)"
- # Verify we got results
- assert cols is not None, "columns() should return results"
- assert len(cols) > 0, "columns() should return at least one column"
+ # Verify it's the identity column
+ col = rowid_cols[0]
+ assert col.column_name.lower() == "id", "Identity column should be included as it's the PK"
- # Verify our test tables' columns are in the results
- # Use case-insensitive comparison to avoid driver case sensitivity issues
- found_test_table = False
- for col in cols:
- if (
- hasattr(col, "table_name")
- and col.table_name
- and col.table_name.lower() == "columns_test"
- and hasattr(col, "table_schem")
- and col.table_schem
- and col.table_schem.lower() == "pytest_cols_schema"
- ):
- found_test_table = True
- break
+ except Exception as e:
+ pytest.fail(f"rowIdColumns identity test failed: {e}")
+ finally:
+ # Clean up happens in test_specialcolumns_cleanup
+ pass
- assert found_test_table, "Test table columns should be included in results"
- # Verify structure of results
- first_row = cols[0]
- assert hasattr(first_row, "table_cat"), "Result should have table_cat column"
- assert hasattr(first_row, "table_schem"), "Result should have table_schem column"
- assert hasattr(first_row, "table_name"), "Result should have table_name column"
- assert hasattr(first_row, "column_name"), "Result should have column_name column"
- assert hasattr(first_row, "data_type"), "Result should have data_type column"
- assert hasattr(first_row, "type_name"), "Result should have type_name column"
- assert hasattr(first_row, "column_size"), "Result should have column_size column"
- assert hasattr(first_row, "buffer_length"), "Result should have buffer_length column"
- assert hasattr(first_row, "decimal_digits"), "Result should have decimal_digits column"
- assert hasattr(first_row, "num_prec_radix"), "Result should have num_prec_radix column"
- assert hasattr(first_row, "nullable"), "Result should have nullable column"
- assert hasattr(first_row, "remarks"), "Result should have remarks column"
- assert hasattr(first_row, "column_def"), "Result should have column_def column"
- assert hasattr(first_row, "sql_data_type"), "Result should have sql_data_type column"
- assert hasattr(first_row, "sql_datetime_sub"), "Result should have sql_datetime_sub column"
- assert hasattr(
- first_row, "char_octet_length"
- ), "Result should have char_octet_length column"
- assert hasattr(first_row, "ordinal_position"), "Result should have ordinal_position column"
- assert hasattr(first_row, "is_nullable"), "Result should have is_nullable column"
+def test_rowid_columns_composite(cursor, db_connection):
+ """Test rowIdColumns with composite primary key"""
+ try:
+ # Get row identifier columns for table with composite primary key
+ rowid_cols = cursor.rowIdColumns(
+ table="multiple_unique_test", schema="pytest_special_schema"
+ ).fetchall()
+
+ # LIMITATION: Only returns first column of composite primary key
+ assert len(rowid_cols) >= 1, "Should find at least one ROWID column (first column of PK)"
+
+ # Verify column names in the results - should be the first PK column
+ col_names = [col.column_name.lower() for col in rowid_cols]
+ assert "id" in col_names, "First part of composite PK should be included"
+
+ # LIMITATION: Other parts of the PK or unique constraints may not be included
+ if len(rowid_cols) > 1:
+ # If additional columns are returned, they should be valid
+ for col in rowid_cols:
+ assert col.column_name.lower() in [
+ "id",
+ "code",
+ ], "Only PK columns should be returned"
+ except Exception as e:
+ pytest.fail(f"rowIdColumns composite test failed: {e}")
finally:
- # Clean up happens in test_columns_cleanup
+ # Clean up happens in test_specialcolumns_cleanup
pass
-def test_columns_specific_table(cursor, db_connection):
- """Test columns returns information about a specific table"""
- try:
- # Get columns for the test table
- cols = cursor.columns(table="columns_test", schema="pytest_cols_schema").fetchall()
+def test_rowid_columns_nonexistent(cursor):
+ """Test rowIdColumns with non-existent table"""
+ # Use a table name that's highly unlikely to exist
+ rowid_cols = cursor.rowIdColumns("nonexistent_table_xyz123").fetchall()
- # Verify we got results
- assert len(cols) == 9, "Should find exactly 9 columns in columns_test"
+ # Should return empty list, not error
+ assert isinstance(rowid_cols, list), "Should return a list for non-existent table"
+ assert len(rowid_cols) == 0, "Should return empty list for non-existent table"
- # Verify all column names are present (case insensitive)
- col_names = [col.column_name.lower() for col in cols]
- expected_names = [
- "id",
- "name",
- "description",
- "price",
- "created_date",
- "is_active",
- "binary_data",
- "notes",
- "computed_col",
- ]
- for name in expected_names:
- assert name in col_names, f"Column {name} should be in results"
+def test_rowid_columns_nullable(cursor, db_connection):
+ """Test rowIdColumns with nullable parameter"""
+ try:
+ # First create a table with nullable unique column and non-nullable PK
+ cursor.execute("""
+ CREATE TABLE pytest_special_schema.nullable_test (
+ id INT PRIMARY KEY, -- PK can't be nullable in SQL Server
+ data NVARCHAR(100) NULL
+ )
+ """)
+ db_connection.commit()
- # Verify details of a specific column (id)
- id_col = next(col for col in cols if col.column_name.lower() == "id")
- assert id_col.nullable == 0, "id column should be non-nullable"
- assert id_col.ordinal_position == 1, "id should be the first column"
- assert id_col.is_nullable == "NO", "is_nullable should be NO for id column"
+ # Test with nullable=True (default)
+ rowid_cols_with_nullable = cursor.rowIdColumns(
+ table="nullable_test", schema="pytest_special_schema"
+ ).fetchall()
- # Check data types (but don't assume specific ODBC type codes since they vary by driver)
- # Instead check that the type_name is correct
- id_type = id_col.type_name.lower()
- assert "int" in id_type, f"id column should be INTEGER type, got {id_type}"
+ # Verify PK column is included
+ assert len(rowid_cols_with_nullable) == 1, "Should return exactly one column (PK)"
+ assert (
+ rowid_cols_with_nullable[0].column_name.lower() == "id"
+ ), "PK column should be returned"
- # Check a nullable column
- desc_col = next(col for col in cols if col.column_name.lower() == "description")
- assert desc_col.nullable == 1, "description column should be nullable"
- assert desc_col.is_nullable == "YES", "is_nullable should be YES for description column"
+ # Test with nullable=False
+ rowid_cols_no_nullable = cursor.rowIdColumns(
+ table="nullable_test", schema="pytest_special_schema", nullable=False
+ ).fetchall()
+
+ # The behavior of SQLSpecialColumns with SQL_NO_NULLS is to only return
+ # non-nullable columns that uniquely identify a row, but SQL Server returns
+ # an empty set in this case - this is expected behavior
+ assert (
+ len(rowid_cols_no_nullable) == 0
+ ), "Should return empty list when nullable=False (ODBC API behavior)"
+ except Exception as e:
+ pytest.fail(f"rowIdColumns nullable test failed: {e}")
finally:
- # Clean up happens in test_columns_cleanup
- pass
+ cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.nullable_test")
+ db_connection.commit()
-def test_columns_special_chars(cursor, db_connection):
- """Test columns with special characters and edge cases"""
+def test_rowver_columns_basic(cursor, db_connection):
+ """Test basic functionality of rowVerColumns"""
try:
- # Get columns for the special table
- cols = cursor.columns(table="columns_special_test", schema="pytest_cols_schema").fetchall()
+ # Get version columns from timestamp test table
+ rowver_cols = cursor.rowVerColumns(
+ table="timestamp_test", schema="pytest_special_schema"
+ ).fetchall()
# Verify we got results
- assert len(cols) == 9, "Should find exactly 9 columns in columns_special_test"
+ assert len(rowver_cols) == 1, "Should find exactly one ROWVER column"
- # Check that special column names are handled correctly
- col_names = [col.column_name for col in cols]
+ # Verify the column is the rowversion column
+ rowver_col = rowver_cols[0]
+ assert (
+ rowver_col.column_name.lower() == "last_updated"
+ ), "ROWVER column should be 'last_updated'"
+ assert rowver_col.type_name.lower() in [
+ "rowversion",
+ "timestamp",
+ ], "ROWVER column should have rowversion or timestamp type"
- # Create case-insensitive lookup
- col_names_lower = [name.lower() if name else None for name in col_names]
+ # Verify result structure - allowing for NULL values
+ assert hasattr(rowver_col, "scope"), "Result should have scope column"
+ assert hasattr(rowver_col, "column_name"), "Result should have column_name column"
+ assert hasattr(rowver_col, "data_type"), "Result should have data_type column"
+ assert hasattr(rowver_col, "type_name"), "Result should have type_name column"
+ assert hasattr(rowver_col, "column_size"), "Result should have column_size column"
+ assert hasattr(rowver_col, "buffer_length"), "Result should have buffer_length column"
+ assert hasattr(rowver_col, "decimal_digits"), "Result should have decimal_digits column"
+ assert hasattr(rowver_col, "pseudo_column"), "Result should have pseudo_column column"
- # Check for columns with special characters - note that column names might be
- # returned with or without brackets/quotes depending on the driver
- assert any(
- "user name" in name.lower() for name in col_names
- ), "Column with spaces should be in results"
- assert any("id" == name.lower() for name in col_names), "ID column should be in results"
- assert any(
- "123_numeric_start" in name.lower() for name in col_names
- ), "Column starting with numbers should be in results"
- assert any("max" == name.lower() for name in col_names), "MAX column should be in results"
- assert any(
- "select" == name.lower() for name in col_names
- ), "SELECT column should be in results"
- assert any(
- "column.with.dots" in name.lower() for name in col_names
- ), "Column with dots should be in results"
- assert any(
- "column/with/slashes" in name.lower() for name in col_names
- ), "Column with slashes should be in results"
- assert any(
- "column_with_underscores" in name.lower() for name in col_names
- ), "Column with underscores should be in results"
+ # The scope should be one of the valid values or NULL
+ assert rowver_col.scope in [
+ 0,
+ 1,
+ 2,
+ None,
+ ], f"Invalid scope value: {rowver_col.scope}"
+ except Exception as e:
+ pytest.fail(f"rowVerColumns basic test failed: {e}")
finally:
- # Clean up happens in test_columns_cleanup
+ # Clean up happens in test_specialcolumns_cleanup
pass
-def test_columns_specific_column(cursor, db_connection):
- """Test columns with specific column filter"""
- try:
- # Get specific column
- cols = cursor.columns(
- table="columns_test", schema="pytest_cols_schema", column="name"
- ).fetchall()
+def test_rowver_columns_nonexistent(cursor):
+ """Test rowVerColumns with non-existent table"""
+ # Use a table name that's highly unlikely to exist
+ rowver_cols = cursor.rowVerColumns("nonexistent_table_xyz123").fetchall()
- # Verify we got just one result
- assert len(cols) == 1, "Should find exactly 1 column named 'name'"
+ # Should return empty list, not error
+ assert isinstance(rowver_cols, list), "Should return a list for non-existent table"
+ assert len(rowver_cols) == 0, "Should return empty list for non-existent table"
- # Verify column details
- col = cols[0]
- assert col.column_name.lower() == "name", "Column name should be 'name'"
- assert col.table_name.lower() == "columns_test", "Table name should be 'columns_test'"
- assert (
- col.table_schem.lower() == "pytest_cols_schema"
- ), "Schema should be 'pytest_cols_schema'"
- assert col.nullable == 0, "name column should be non-nullable"
- # Get column using pattern (% wildcard)
- pattern_cols = cursor.columns(
- table="columns_test", schema="pytest_cols_schema", column="%date%"
- ).fetchall()
+def test_rowver_columns_nullable(cursor, db_connection):
+ """Test rowVerColumns with nullable parameter (not expected to have effect)"""
+ try:
+ # First create a table with rowversion column
+ cursor.execute("""
+ CREATE TABLE pytest_special_schema.nullable_rowver_test (
+ id INT PRIMARY KEY,
+ ts ROWVERSION
+ )
+ """)
+ db_connection.commit()
- # Should find created_date column
- assert len(pattern_cols) == 1, "Should find 1 column matching '%date%'"
+ # Test with nullable=True (default)
+ rowver_cols_with_nullable = cursor.rowVerColumns(
+ table="nullable_rowver_test", schema="pytest_special_schema"
+ ).fetchall()
+ # Verify rowversion column is included (rowversion can't be nullable)
+ assert len(rowver_cols_with_nullable) == 1, "Should find exactly one ROWVER column"
assert (
- pattern_cols[0].column_name.lower() == "created_date"
- ), "Should find created_date column"
+ rowver_cols_with_nullable[0].column_name.lower() == "ts"
+ ), "ROWVERSION column should be included"
- # Get multiple columns with pattern
- multi_cols = cursor.columns(
- table="columns_test",
- schema="pytest_cols_schema",
- column="%d%", # Should match id, description, created_date
+ # Test with nullable=False
+ rowver_cols_no_nullable = cursor.rowVerColumns(
+ table="nullable_rowver_test", schema="pytest_special_schema", nullable=False
).fetchall()
- # At least 3 columns should match this pattern
- assert len(multi_cols) >= 3, "Should find at least 3 columns matching '%d%'"
- match_names = [col.column_name.lower() for col in multi_cols]
- assert "id" in match_names, "id should match '%d%'"
- assert "description" in match_names, "description should match '%d%'"
- assert "created_date" in match_names, "created_date should match '%d%'"
+ # Verify rowversion column is still included
+ assert len(rowver_cols_no_nullable) == 1, "Should find exactly one ROWVER column"
+ assert (
+ rowver_cols_no_nullable[0].column_name.lower() == "ts"
+ ), "ROWVERSION column should be included even with nullable=False"
+ except Exception as e:
+ pytest.fail(f"rowVerColumns nullable test failed: {e}")
finally:
- # Clean up happens in test_columns_cleanup
- pass
+ cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.nullable_rowver_test")
+ db_connection.commit()
-def test_columns_with_underscore_pattern(cursor):
- """Test columns with underscore wildcard pattern"""
+def test_specialcolumns_catalog_filter(cursor, db_connection):
+ """Test special columns with catalog filter"""
try:
- # Get columns with underscore pattern (one character wildcard)
- # Looking for 'id' (exactly 2 chars)
- cols = cursor.columns(
- table="columns_test", schema="pytest_cols_schema", column="__"
+ # Get current database name
+ cursor.execute("SELECT DB_NAME() AS current_db")
+ current_db = cursor.fetchone().current_db
+
+ # Test rowIdColumns with current catalog
+ rowid_cols = cursor.rowIdColumns(
+ table="rowid_test", catalog=current_db, schema="pytest_special_schema"
).fetchall()
- # Should find 'id' column
- id_found = False
- for col in cols:
- if col.column_name.lower() == "id" and col.table_name.lower() == "columns_test":
- id_found = True
- break
+ # Verify catalog filter worked
+ assert len(rowid_cols) > 0, "Should find ROWID columns with correct catalog"
- assert id_found, "Should find 'id' column with pattern '__'"
+ # Test rowIdColumns with non-existent catalog
+ fake_rowid_cols = cursor.rowIdColumns(
+ table="rowid_test",
+ catalog="nonexistent_db_xyz123",
+ schema="pytest_special_schema",
+ ).fetchall()
+ assert len(fake_rowid_cols) == 0, "Should return empty list for non-existent catalog"
- # Try a more complex pattern with both % and _
- # For example: '%_d%' matches any column with 'd' as the second or later character
- pattern_cols = cursor.columns(
- table="columns_test", schema="pytest_cols_schema", column="%_d%"
+ # Test rowVerColumns with current catalog
+ rowver_cols = cursor.rowVerColumns(
+ table="timestamp_test", catalog=current_db, schema="pytest_special_schema"
).fetchall()
- # Should match 'id' (if considering case-insensitive) and 'created_date'
- match_names = [
- col.column_name.lower()
- for col in pattern_cols
- if col.table_name.lower() == "columns_test"
- ]
+ # Verify catalog filter worked
+ assert len(rowver_cols) > 0, "Should find ROWVER columns with correct catalog"
- # At least 'created_date' should match this pattern
- assert "created_date" in match_names, "created_date should match '%_d%'"
+ # Test rowVerColumns with non-existent catalog
+ fake_rowver_cols = cursor.rowVerColumns(
+ table="timestamp_test",
+ catalog="nonexistent_db_xyz123",
+ schema="pytest_special_schema",
+ ).fetchall()
+ assert len(fake_rowver_cols) == 0, "Should return empty list for non-existent catalog"
+ except Exception as e:
+ pytest.fail(f"Special columns catalog filter test failed: {e}")
finally:
- # Clean up happens in test_columns_cleanup
+ # Clean up happens in test_specialcolumns_cleanup
pass
-def test_columns_nonexistent(cursor):
- """Test columns with non-existent table or column"""
- # Test with non-existent table
- table_cols = cursor.columns(table="nonexistent_table_xyz123")
- assert len(table_cols) == 0, "Should return empty list for non-existent table"
-
- # Test with non-existent column in existing table
- col_cols = cursor.columns(
- table="columns_test",
- schema="pytest_cols_schema",
- column="nonexistent_column_xyz123",
- ).fetchall()
- assert len(col_cols) == 0, "Should return empty list for non-existent column"
-
- # Test with non-existent schema
- schema_cols = cursor.columns(
- table="columns_test", schema="nonexistent_schema_xyz123"
- ).fetchall()
- assert len(schema_cols) == 0, "Should return empty list for non-existent schema"
-
-
-def test_columns_data_types(cursor):
- """Test columns returns correct data type information"""
+def test_specialcolumns_cleanup(cursor, db_connection):
+ """Clean up test tables after testing"""
try:
- # Get all columns from test table
- cols = cursor.columns(table="columns_test", schema="pytest_cols_schema").fetchall()
-
- # Create a dictionary mapping column names to their details
- col_dict = {col.column_name.lower(): col for col in cols}
-
- # Check data types by name (case insensitive checks)
- # Note: We're checking type_name as a string to avoid SQL type code inconsistencies
- # between drivers
-
- # INT column
- assert "int" in col_dict["id"].type_name.lower(), "id should be INT type"
-
- # NVARCHAR column
- assert any(
- name in col_dict["name"].type_name.lower()
- for name in ["nvarchar", "varchar", "char", "wchar"]
- ), "name should be NVARCHAR type"
-
- # DECIMAL column
- assert any(
- name in col_dict["price"].type_name.lower() for name in ["decimal", "numeric", "money"]
- ), "price should be DECIMAL type"
-
- # BIT column
- assert any(
- name in col_dict["is_active"].type_name.lower() for name in ["bit", "boolean"]
- ), "is_active should be BIT type"
+ # Drop all test tables
+ cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.rowid_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.timestamp_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.multiple_unique_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.identity_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.nullable_unique_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_special_schema.nullable_timestamp_test")
- # TEXT column
- assert any(
- name in col_dict["notes"].type_name.lower() for name in ["text", "char", "varchar"]
- ), "notes should be TEXT type"
+ # Drop the test schema
+ cursor.execute("DROP SCHEMA IF EXISTS pytest_special_schema")
+ db_connection.commit()
+ except Exception as e:
+ pytest.fail(f"Test cleanup failed: {e}")
- # Check nullable flag
- assert col_dict["id"].nullable == 0, "id should be non-nullable"
- assert col_dict["description"].nullable == 1, "description should be nullable"
- # Check column size
- assert col_dict["name"].column_size == 100, "name should have size 100"
+def test_statistics_setup(cursor, db_connection):
+ """Create test tables and indexes for statistics testing"""
+ try:
+ # Create a test schema for isolation
+ cursor.execute(
+ "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_stats_schema') EXEC('CREATE SCHEMA pytest_stats_schema')"
+ )
- # Check decimal digits for numeric type
- assert col_dict["price"].decimal_digits == 2, "price should have 2 decimal digits"
+ # Drop tables if they exist
+ cursor.execute("DROP TABLE IF EXISTS pytest_stats_schema.stats_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_stats_schema.empty_stats_test")
- finally:
- # Clean up happens in test_columns_cleanup
- pass
+ # Create test table with various indexes
+ cursor.execute("""
+ CREATE TABLE pytest_stats_schema.stats_test (
+ id INT PRIMARY KEY,
+ name VARCHAR(100) NOT NULL,
+ email VARCHAR(100) UNIQUE,
+ department VARCHAR(50) NOT NULL,
+ salary DECIMAL(10, 2) NULL,
+ hire_date DATE NOT NULL
+ )
+ """)
+ # Create a non-unique index
+ cursor.execute("""
+ CREATE INDEX IX_stats_test_dept_date ON pytest_stats_schema.stats_test (department, hire_date)
+ """)
-def test_columns_nonexistent(cursor):
- """Test columns with non-existent table or column"""
- # Test with non-existent table
- table_cols = cursor.columns(table="nonexistent_table_xyz123").fetchall()
- assert len(table_cols) == 0, "Should return empty list for non-existent table"
+ # Create a unique index on multiple columns
+ cursor.execute("""
+ CREATE UNIQUE INDEX UX_stats_test_name_dept ON pytest_stats_schema.stats_test (name, department)
+ """)
- # Test with non-existent column in existing table
- col_cols = cursor.columns(
- table="columns_test",
- schema="pytest_cols_schema",
- column="nonexistent_column_xyz123",
- ).fetchall()
- assert len(col_cols) == 0, "Should return empty list for non-existent column"
+ # Create an empty table for testing
+ cursor.execute("""
+ CREATE TABLE pytest_stats_schema.empty_stats_test (
+ id INT PRIMARY KEY,
+ data VARCHAR(100) NULL
+ )
+ """)
- # Test with non-existent schema
- schema_cols = cursor.columns(
- table="columns_test", schema="nonexistent_schema_xyz123"
- ).fetchall()
- assert len(schema_cols) == 0, "Should return empty list for non-existent schema"
+ db_connection.commit()
+ except Exception as e:
+ pytest.fail(f"Test setup failed: {e}")
-def test_columns_catalog_filter(cursor):
- """Test columns with catalog filter"""
+def test_statistics_basic(cursor, db_connection):
+ """Test basic functionality of statistics method"""
try:
- # Get current database name
- cursor.execute("SELECT DB_NAME() AS current_db")
- current_db = cursor.fetchone().current_db
-
- # Get columns with current catalog
- cols = cursor.columns(
- table="columns_test", catalog=current_db, schema="pytest_cols_schema"
- ).fetchall()
+ # First set up our test tables
+ test_statistics_setup(cursor, db_connection)
- # Verify catalog filter worked
- assert len(cols) > 0, "Should find columns with correct catalog"
+ # Get statistics for the test table (all indexes)
+ stats = cursor.statistics(table="stats_test", schema="pytest_stats_schema").fetchall()
- # Check catalog in results
- for col in cols:
- # Some drivers might return None for catalog
- if col.table_cat is not None:
- assert col.table_cat.lower() == current_db.lower(), "Wrong table catalog"
+ # Verify we got results - should include PK, unique index on email, and non-unique index
+ assert stats is not None, "statistics() should return results"
+ assert len(stats) > 0, "statistics() should return at least one row"
- # Test with non-existent catalog
- fake_cols = cursor.columns(
- table="columns_test",
- catalog="nonexistent_db_xyz123",
- schema="pytest_cols_schema",
- ).fetchall()
- assert len(fake_cols) == 0, "Should return empty list for non-existent catalog"
+ # Count different types of indexes
+ table_stats = [s for s in stats if s.type == 0] # TABLE_STAT
+ indexes = [s for s in stats if s.type != 0] # Actual indexes
- finally:
- # Clean up happens in test_columns_cleanup
- pass
+ # We should have at least one table statistics row and multiple index rows
+ assert len(table_stats) <= 1, "Should have at most one TABLE_STAT row"
+ assert (
+ len(indexes) >= 3
+ ), "Should have at least 3 index entries (PK, unique email, non-unique dept+date)"
+ # Verify column names in results
+ first_row = stats[0]
+ assert hasattr(first_row, "table_name"), "Result should have table_name column"
+ assert hasattr(first_row, "non_unique"), "Result should have non_unique column"
+ assert hasattr(first_row, "index_name"), "Result should have index_name column"
+ assert hasattr(first_row, "type"), "Result should have type column"
+ assert hasattr(first_row, "column_name"), "Result should have column_name column"
-def test_columns_schema_pattern(cursor):
- """Test columns with schema name pattern"""
- try:
- # Get columns with schema pattern
- cols = cursor.columns(table="columns_test", schema="pytest_%").fetchall()
+ # Check that we can find the primary key
+ pk_found = False
+ for stat in stats:
+ if hasattr(stat, "index_name") and stat.index_name and "pk" in stat.index_name.lower():
+ pk_found = True
+ break
- # Should find our test table columns
- test_cols = [col for col in cols if col.table_name.lower() == "columns_test"]
- assert len(test_cols) > 0, "Should find columns using schema pattern"
+ assert pk_found, "Primary key should be included in statistics results"
- # Try a more specific pattern
- specific_cols = cursor.columns(table="columns_test", schema="pytest_cols%").fetchall()
+ # Check that we can find the unique index on email
+ email_index_found = False
+ for stat in stats:
+ if (
+ hasattr(stat, "column_name")
+ and stat.column_name
+ and stat.column_name.lower() == "email"
+ and hasattr(stat, "non_unique")
+ and stat.non_unique == 0
+ ): # 0 = unique
+ email_index_found = True
+ break
- # Should still find our test table columns
- test_cols = [col for col in specific_cols if col.table_name.lower() == "columns_test"]
- assert len(test_cols) > 0, "Should find columns using specific schema pattern"
+ assert email_index_found, "Unique index on email should be included in statistics results"
finally:
- # Clean up happens in test_columns_cleanup
+ # Clean up happens in test_statistics_cleanup
pass
-def test_columns_table_pattern(cursor):
- """Test columns with table name pattern"""
+def test_statistics_unique_only(cursor, db_connection):
+ """Test statistics with unique=True to get only unique indexes"""
try:
- # Get columns with table pattern
- cols = cursor.columns(table="columns_%", schema="pytest_cols_schema").fetchall()
+ # Get statistics for only unique indexes
+ stats = cursor.statistics(
+ table="stats_test", schema="pytest_stats_schema", unique=True
+ ).fetchall()
- # Should find columns from both test tables
- tables_found = set()
- for col in cols:
- if col.table_name:
- tables_found.add(col.table_name.lower())
+ # Verify we got results
+ assert stats is not None, "statistics() with unique=True should return results"
+ assert len(stats) > 0, "statistics() with unique=True should return at least one row"
- assert "columns_test" in tables_found, "Should find columns_test with pattern columns_%"
- assert (
- "columns_special_test" in tables_found
- ), "Should find columns_special_test with pattern columns_%"
+ # All index entries should be for unique indexes (non_unique = 0)
+ for stat in stats:
+ if hasattr(stat, "type") and stat.type != 0: # Skip TABLE_STAT entries
+ assert hasattr(stat, "non_unique"), "Index entry should have non_unique column"
+ assert stat.non_unique == 0, "With unique=True, all indexes should be unique"
+
+ # Count different types of indexes
+ indexes = [s for s in stats if hasattr(s, "type") and s.type != 0]
+
+ # We should have multiple unique indexes (PK, unique email, unique name+dept)
+ assert len(indexes) >= 3, "Should have at least 3 unique index entries"
finally:
- # Clean up happens in test_columns_cleanup
+ # Clean up happens in test_statistics_cleanup
pass
-def test_columns_ordinal_position(cursor):
- """Test ordinal_position is correct in columns results"""
+def test_statistics_empty_table(cursor, db_connection):
+ """Test statistics on a table with no data (just schema)"""
try:
- # Get columns for the test table
- cols = cursor.columns(table="columns_test", schema="pytest_cols_schema").fetchall()
+ # Get statistics for the empty table
+ stats = cursor.statistics(table="empty_stats_test", schema="pytest_stats_schema").fetchall()
- # Sort by ordinal position
- sorted_cols = sorted(cols, key=lambda col: col.ordinal_position)
+ # Should still return metadata about the primary key
+ assert stats is not None, "statistics() should return results even for empty table"
+ assert len(stats) > 0, "statistics() should return at least one row for empty table"
- # Verify positions are consecutive starting from 1
- for i, col in enumerate(sorted_cols, 1):
- assert (
- col.ordinal_position == i
- ), f"Column {col.column_name} should have ordinal_position {i}"
+ # Check for primary key
+ pk_found = False
+ for stat in stats:
+ if hasattr(stat, "index_name") and stat.index_name and "pk" in stat.index_name.lower():
+ pk_found = True
+ break
- # First column should be id (primary key)
- assert sorted_cols[0].column_name.lower() == "id", "First column should be id"
+ assert pk_found, "Primary key should be included in statistics results for empty table"
finally:
- # Clean up happens in test_columns_cleanup
+ # Clean up happens in test_statistics_cleanup
pass
-def test_columns_cleanup(cursor, db_connection):
- """Clean up test tables after testing"""
- try:
- # Drop all test tables
- cursor.execute("DROP TABLE IF EXISTS pytest_cols_schema.columns_test")
- cursor.execute("DROP TABLE IF EXISTS pytest_cols_schema.columns_special_test")
-
- # Drop the test schema
- cursor.execute("DROP SCHEMA IF EXISTS pytest_cols_schema")
- db_connection.commit()
- except Exception as e:
- pytest.fail(f"Test cleanup failed: {e}")
-
+def test_statistics_nonexistent(cursor):
+ """Test statistics with non-existent table name"""
+ # Use a table name that's highly unlikely to exist
+ stats = cursor.statistics("nonexistent_table_xyz123").fetchall()
-def test_lowercase_attribute(cursor, db_connection):
- """Test that the lowercase attribute properly converts column names to lowercase"""
+ # Should return empty list, not error
+ assert isinstance(stats, list), "Should return a list for non-existent table"
+ assert len(stats) == 0, "Should return empty list for non-existent table"
- # Store original value to restore after test
- original_lowercase = mssql_python.lowercase
- drop_cursor = None
+def test_statistics_result_structure(cursor, db_connection):
+ """Test the complete structure of statistics result rows"""
try:
- # Create a test table with mixed-case column names
- cursor.execute("""
- CREATE TABLE #pytest_lowercase_test (
- ID INT PRIMARY KEY,
- UserName VARCHAR(50),
- EMAIL_ADDRESS VARCHAR(100),
- PhoneNumber VARCHAR(20)
- )
- """)
- db_connection.commit()
-
- # Insert test data
- cursor.execute("""
- INSERT INTO #pytest_lowercase_test (ID, UserName, EMAIL_ADDRESS, PhoneNumber)
- VALUES (1, 'JohnDoe', 'john@example.com', '555-1234')
- """)
- db_connection.commit()
-
- # First test with lowercase=False (default)
- mssql_python.lowercase = False
- cursor1 = db_connection.cursor()
- cursor1.execute("SELECT * FROM #pytest_lowercase_test")
-
- # Description column names should preserve original case
- column_names1 = [desc[0] for desc in cursor1.description]
- assert "ID" in column_names1, "Column 'ID' should be present with original case"
- assert "UserName" in column_names1, "Column 'UserName' should be present with original case"
+ # Get statistics for the test table
+ stats = cursor.statistics(table="stats_test", schema="pytest_stats_schema").fetchall()
- # Make sure to consume all results and close the cursor
- cursor1.fetchall()
- cursor1.close()
+ # Verify we have results
+ assert len(stats) > 0, "Should have statistics results"
- # Now test with lowercase=True
- mssql_python.lowercase = True
- cursor2 = db_connection.cursor()
- cursor2.execute("SELECT * FROM #pytest_lowercase_test")
+ # Find a row that's an actual index (not TABLE_STAT)
+ index_row = None
+ for stat in stats:
+ if hasattr(stat, "type") and stat.type != 0:
+ index_row = stat
+ break
- # Description column names should be lowercase
- column_names2 = [desc[0] for desc in cursor2.description]
- assert "id" in column_names2, "Column names should be lowercase when lowercase=True"
- assert "username" in column_names2, "Column names should be lowercase when lowercase=True"
+ assert index_row is not None, "Should have at least one index row"
- # Make sure to consume all results and close the cursor
- cursor2.fetchall()
- cursor2.close()
+ # Check for all required columns
+ required_columns = [
+ "table_cat",
+ "table_schem",
+ "table_name",
+ "non_unique",
+ "index_qualifier",
+ "index_name",
+ "type",
+ "ordinal_position",
+ "column_name",
+ "asc_or_desc",
+ "cardinality",
+ "pages",
+ "filter_condition",
+ ]
- # Create a fresh cursor for cleanup
- drop_cursor = db_connection.cursor()
+ for column in required_columns:
+ assert hasattr(index_row, column), f"Result missing required column: {column}"
- finally:
- # Restore original value
- mssql_python.lowercase = original_lowercase
+ # Check types of key columns
+ assert isinstance(index_row.table_name, str), "table_name should be a string"
+ assert isinstance(index_row.type, int), "type should be an integer"
- try:
- # Use a separate cursor for cleanup
- if drop_cursor:
- drop_cursor.execute("DROP TABLE IF EXISTS #pytest_lowercase_test")
- db_connection.commit()
- drop_cursor.close()
- except Exception as e:
- print(f"Warning: Failed to drop test table: {e}")
+ # Don't check the actual values of cardinality and pages as they may be NULL
+ # or driver-dependent, especially for empty tables
+ finally:
+ # Clean up happens in test_statistics_cleanup
+ pass
-def test_decimal_separator_function(cursor, db_connection):
- """Test decimal separator functionality with database operations"""
- # Store original value to restore after test
- original_separator = mssql_python.getDecimalSeparator()
+def test_statistics_catalog_filter(cursor, db_connection):
+ """Test statistics with catalog filter"""
try:
- # Create test table
- cursor.execute("""
- CREATE TABLE #pytest_decimal_separator_test (
- id INT PRIMARY KEY,
- decimal_value DECIMAL(10, 2)
- )
- """)
- db_connection.commit()
+ # Get current database name
+ cursor.execute("SELECT DB_NAME() AS current_db")
+ current_db = cursor.fetchone().current_db
- # Insert test values with default separator (.)
- test_value = decimal.Decimal("123.45")
- cursor.execute(
- """
- INSERT INTO #pytest_decimal_separator_test (id, decimal_value)
- VALUES (1, ?)
- """,
- [test_value],
- )
- db_connection.commit()
+ # Get statistics with current catalog
+ stats = cursor.statistics(
+ table="stats_test", catalog=current_db, schema="pytest_stats_schema"
+ ).fetchall()
- # First test with default decimal separator (.)
- cursor.execute("SELECT id, decimal_value FROM #pytest_decimal_separator_test")
- row = cursor.fetchone()
- default_str = str(row)
- assert "123.45" in default_str, "Default separator not found in string representation"
+ # Verify catalog filter worked
+ assert len(stats) > 0, "Should find statistics with correct catalog"
- # Now change to comma separator and test string representation
- mssql_python.setDecimalSeparator(",")
- cursor.execute("SELECT id, decimal_value FROM #pytest_decimal_separator_test")
- row = cursor.fetchone()
+ # Verify catalog in results
+ for stat in stats:
+ if hasattr(stat, "table_cat"):
+ assert stat.table_cat.lower() == current_db.lower(), "Wrong table catalog"
- # This should format the decimal with a comma in the string representation
- comma_str = str(row)
- assert (
- "123,45" in comma_str
- ), f"Expected comma in string representation but got: {comma_str}"
+ # Get statistics with non-existent catalog
+ fake_stats = cursor.statistics(
+ table="stats_test",
+ catalog="nonexistent_db_xyz123",
+ schema="pytest_stats_schema",
+ ).fetchall()
+ assert len(fake_stats) == 0, "Should return empty list for non-existent catalog"
finally:
- # Restore original decimal separator
- mssql_python.setDecimalSeparator(original_separator)
-
- # Cleanup
- cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_separator_test")
- db_connection.commit()
-
+ # Clean up happens in test_statistics_cleanup
+ pass
-def test_decimal_separator_basic_functionality():
- """Test basic decimal separator functionality without database operations"""
- # Store original value to restore after test
- original_separator = mssql_python.getDecimalSeparator()
+def test_statistics_with_quick_parameter(cursor, db_connection):
+ """Test statistics with quick parameter variations"""
try:
- # Test default value
- assert mssql_python.getDecimalSeparator() == ".", "Default decimal separator should be '.'"
+ # Test with quick=True (default)
+ quick_stats = cursor.statistics(
+ table="stats_test", schema="pytest_stats_schema", quick=True
+ ).fetchall()
- # Test setting to comma
- mssql_python.setDecimalSeparator(",")
- assert (
- mssql_python.getDecimalSeparator() == ","
- ), "Decimal separator should be ',' after setting"
+ # Test with quick=False
+ thorough_stats = cursor.statistics(
+ table="stats_test", schema="pytest_stats_schema", quick=False
+ ).fetchall()
- # Test setting to other valid separators
- mssql_python.setDecimalSeparator(":")
- assert (
- mssql_python.getDecimalSeparator() == ":"
- ), "Decimal separator should be ':' after setting"
+ # Both should return results, but we can't guarantee behavior differences
+ # since it depends on the ODBC driver and database system
+ assert len(quick_stats) > 0, "quick=True should return results"
+ assert len(thorough_stats) > 0, "quick=False should return results"
- # Test invalid inputs
- with pytest.raises(ValueError):
- mssql_python.setDecimalSeparator("") # Empty string
+ # Just verify that changing the parameter didn't cause errors
- with pytest.raises(ValueError):
- mssql_python.setDecimalSeparator("too_long") # More than one character
+ finally:
+ # Clean up happens in test_statistics_cleanup
+ pass
- with pytest.raises(ValueError):
- mssql_python.setDecimalSeparator(123) # Not a string
- finally:
- # Restore original separator
- mssql_python.setDecimalSeparator(original_separator)
+def test_statistics_cleanup(cursor, db_connection):
+ """Clean up test tables after testing"""
+ try:
+ # Drop all test tables
+ cursor.execute("DROP TABLE IF EXISTS pytest_stats_schema.stats_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_stats_schema.empty_stats_test")
+ # Drop the test schema
+ cursor.execute("DROP SCHEMA IF EXISTS pytest_stats_schema")
+ db_connection.commit()
+ except Exception as e:
+ pytest.fail(f"Test cleanup failed: {e}")
-def test_decimal_separator_with_multiple_values(cursor, db_connection):
- """Test decimal separator with multiple different decimal values"""
- original_separator = mssql_python.getDecimalSeparator()
+def test_columns_setup(cursor, db_connection):
+ """Create test tables for columns method testing"""
try:
- # Create test table
- cursor.execute("""
- CREATE TABLE #pytest_decimal_multi_test (
+ # Create a test schema for isolation
+ cursor.execute(
+ "IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'pytest_cols_schema') EXEC('CREATE SCHEMA pytest_cols_schema')"
+ )
+
+ # Drop tables if they exist
+ cursor.execute("DROP TABLE IF EXISTS pytest_cols_schema.columns_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_cols_schema.columns_special_test")
+
+ # Create test table with various column types
+ cursor.execute("""
+ CREATE TABLE pytest_cols_schema.columns_test (
id INT PRIMARY KEY,
- positive_value DECIMAL(10, 2),
- negative_value DECIMAL(10, 2),
- zero_value DECIMAL(10, 2),
- small_value DECIMAL(10, 4)
+ name NVARCHAR(100) NOT NULL,
+ description NVARCHAR(MAX) NULL,
+ price DECIMAL(10, 2) NULL,
+ created_date DATETIME DEFAULT GETDATE(),
+ is_active BIT NOT NULL DEFAULT 1,
+ binary_data VARBINARY(MAX) NULL,
+ notes TEXT NULL,
+ [computed_col] AS (name + ' - ' + CAST(id AS VARCHAR(10)))
)
""")
- db_connection.commit()
- # Insert test data
- cursor.execute("""
- INSERT INTO #pytest_decimal_multi_test VALUES (1, 123.45, -67.89, 0.00, 0.0001)
+ # Create table with special column names and edge cases - fix the problematic column name
+ cursor.execute("""
+ CREATE TABLE pytest_cols_schema.columns_special_test (
+ [ID] INT PRIMARY KEY,
+ [User Name] NVARCHAR(100) NULL,
+ [Spaces Multiple] VARCHAR(50) NULL,
+ [123_numeric_start] INT NULL,
+ [MAX] VARCHAR(20) NULL, -- SQL keyword as column name
+ [SELECT] INT NULL, -- SQL keyword as column name
+ [Column.With.Dots] VARCHAR(20) NULL,
+ [Column/With/Slashes] VARCHAR(20) NULL,
+ [Column_With_Underscores] VARCHAR(20) NULL -- Changed from problematic nested brackets
+ )
""")
+
db_connection.commit()
+ except Exception as e:
+ pytest.fail(f"Test setup failed: {e}")
- # Test with default separator first
- cursor.execute("SELECT * FROM #pytest_decimal_multi_test")
- row = cursor.fetchone()
- default_str = str(row)
- assert "123.45" in default_str, "Default positive value formatting incorrect"
- assert "-67.89" in default_str, "Default negative value formatting incorrect"
- # Change to comma separator
- mssql_python.setDecimalSeparator(",")
- cursor.execute("SELECT * FROM #pytest_decimal_multi_test")
- row = cursor.fetchone()
- comma_str = str(row)
+def test_columns_all(cursor, db_connection):
+ """Test columns returns information about all columns in all tables"""
+ try:
+ # First set up our test tables
+ test_columns_setup(cursor, db_connection)
- # Verify comma is used in all decimal values
- assert "123,45" in comma_str, "Positive value not formatted with comma"
- assert "-67,89" in comma_str, "Negative value not formatted with comma"
- assert "0,00" in comma_str, "Zero value not formatted with comma"
- assert "0,0001" in comma_str, "Small value not formatted with comma"
+ # Get all columns (no filters)
+ cols_cursor = cursor.columns()
+ cols = cols_cursor.fetchall()
- finally:
- # Restore original separator
- mssql_python.setDecimalSeparator(original_separator)
+ # Verify we got results
+ assert cols is not None, "columns() should return results"
+ assert len(cols) > 0, "columns() should return at least one column"
- # Cleanup
- cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_multi_test")
- db_connection.commit()
+ # Verify our test tables' columns are in the results
+ # Use case-insensitive comparison to avoid driver case sensitivity issues
+ found_test_table = False
+ for col in cols:
+ if (
+ hasattr(col, "table_name")
+ and col.table_name
+ and col.table_name.lower() == "columns_test"
+ and hasattr(col, "table_schem")
+ and col.table_schem
+ and col.table_schem.lower() == "pytest_cols_schema"
+ ):
+ found_test_table = True
+ break
+
+ assert found_test_table, "Test table columns should be included in results"
+
+ # Verify structure of results
+ first_row = cols[0]
+ assert hasattr(first_row, "table_cat"), "Result should have table_cat column"
+ assert hasattr(first_row, "table_schem"), "Result should have table_schem column"
+ assert hasattr(first_row, "table_name"), "Result should have table_name column"
+ assert hasattr(first_row, "column_name"), "Result should have column_name column"
+ assert hasattr(first_row, "data_type"), "Result should have data_type column"
+ assert hasattr(first_row, "type_name"), "Result should have type_name column"
+ assert hasattr(first_row, "column_size"), "Result should have column_size column"
+ assert hasattr(first_row, "buffer_length"), "Result should have buffer_length column"
+ assert hasattr(first_row, "decimal_digits"), "Result should have decimal_digits column"
+ assert hasattr(first_row, "num_prec_radix"), "Result should have num_prec_radix column"
+ assert hasattr(first_row, "nullable"), "Result should have nullable column"
+ assert hasattr(first_row, "remarks"), "Result should have remarks column"
+ assert hasattr(first_row, "column_def"), "Result should have column_def column"
+ assert hasattr(first_row, "sql_data_type"), "Result should have sql_data_type column"
+ assert hasattr(first_row, "sql_datetime_sub"), "Result should have sql_datetime_sub column"
+ assert hasattr(
+ first_row, "char_octet_length"
+ ), "Result should have char_octet_length column"
+ assert hasattr(first_row, "ordinal_position"), "Result should have ordinal_position column"
+ assert hasattr(first_row, "is_nullable"), "Result should have is_nullable column"
+ finally:
+ # Clean up happens in test_columns_cleanup
+ pass
-def test_decimal_separator_calculations(cursor, db_connection):
- """Test that decimal separator doesn't affect calculations"""
- original_separator = mssql_python.getDecimalSeparator()
+def test_columns_specific_table(cursor, db_connection):
+ """Test columns returns information about a specific table"""
try:
- # Create test table
- cursor.execute("""
- CREATE TABLE #pytest_decimal_calc_test (
- id INT PRIMARY KEY,
- value1 DECIMAL(10, 2),
- value2 DECIMAL(10, 2)
- )
- """)
- db_connection.commit()
+ # Get columns for the test table
+ cols = cursor.columns(table="columns_test", schema="pytest_cols_schema").fetchall()
- # Insert test data
- cursor.execute("""
- INSERT INTO #pytest_decimal_calc_test VALUES (1, 10.25, 5.75)
- """)
- db_connection.commit()
+ # Verify we got results
+ assert len(cols) == 9, "Should find exactly 9 columns in columns_test"
- # Test with default separator
- cursor.execute("SELECT value1 + value2 AS sum_result FROM #pytest_decimal_calc_test")
- row = cursor.fetchone()
- assert row.sum_result == decimal.Decimal(
- "16.00"
- ), "Sum calculation incorrect with default separator"
+ # Verify all column names are present (case insensitive)
+ col_names = [col.column_name.lower() for col in cols]
+ expected_names = [
+ "id",
+ "name",
+ "description",
+ "price",
+ "created_date",
+ "is_active",
+ "binary_data",
+ "notes",
+ "computed_col",
+ ]
- # Change to comma separator
- mssql_python.setDecimalSeparator(",")
+ for name in expected_names:
+ assert name in col_names, f"Column {name} should be in results"
- # Calculations should still work correctly
- cursor.execute("SELECT value1 + value2 AS sum_result FROM #pytest_decimal_calc_test")
- row = cursor.fetchone()
- assert row.sum_result == decimal.Decimal(
- "16.00"
- ), "Sum calculation affected by separator change"
+ # Verify details of a specific column (id)
+ id_col = next(col for col in cols if col.column_name.lower() == "id")
+ assert id_col.nullable == 0, "id column should be non-nullable"
+ assert id_col.ordinal_position == 1, "id should be the first column"
+ assert id_col.is_nullable == "NO", "is_nullable should be NO for id column"
- # But string representation should use comma
- assert "16,00" in str(row), "Sum result not formatted with comma in string representation"
+ # Check data types (but don't assume specific ODBC type codes since they vary by driver)
+ # Instead check that the type_name is correct
+ id_type = id_col.type_name.lower()
+ assert "int" in id_type, f"id column should be INTEGER type, got {id_type}"
- finally:
- # Restore original separator
- mssql_python.setDecimalSeparator(original_separator)
+ # Check a nullable column
+ desc_col = next(col for col in cols if col.column_name.lower() == "description")
+ assert desc_col.nullable == 1, "description column should be nullable"
+ assert desc_col.is_nullable == "YES", "is_nullable should be YES for description column"
- # Cleanup
- cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_calc_test")
- db_connection.commit()
+ finally:
+ # Clean up happens in test_columns_cleanup
+ pass
-def test_executemany_with_uuids(cursor, db_connection):
- """Test inserting multiple rows with UUIDs and None using executemany."""
- table_name = "#pytest_uuid_batch"
+def test_columns_special_chars(cursor, db_connection):
+ """Test columns with special characters and edge cases"""
try:
- cursor.execute(f"DROP TABLE IF EXISTS {table_name}")
- cursor.execute(f"""
- CREATE TABLE {table_name} (
- id UNIQUEIDENTIFIER,
- description NVARCHAR(50)
- )
- """)
- db_connection.commit()
-
- # Prepare test data: mix of UUIDs and None
- test_data = [
- [uuid.uuid4(), "Item 1"],
- [uuid.uuid4(), "Item 2"],
- [None, "Item 3"],
- [uuid.uuid4(), "Item 4"],
- [None, "Item 5"],
- ]
-
- # Map descriptions to original UUIDs for O(1) lookup
- uuid_map = {desc: uid for uid, desc in test_data}
-
- # Execute batch insert
- cursor.executemany(f"INSERT INTO {table_name} (id, description) VALUES (?, ?)", test_data)
- cursor.connection.commit()
-
- # Fetch and verify
- cursor.execute(f"SELECT id, description FROM {table_name}")
- rows = cursor.fetchall()
+ # Get columns for the special table
+ cols = cursor.columns(table="columns_special_test", schema="pytest_cols_schema").fetchall()
- assert len(rows) == len(test_data), "Number of fetched rows does not match inserted rows."
+ # Verify we got results
+ assert len(cols) == 9, "Should find exactly 9 columns in columns_special_test"
- for retrieved_uuid, retrieved_desc in rows:
- expected_uuid = uuid_map[retrieved_desc]
+ # Check that special column names are handled correctly
+ col_names = [col.column_name for col in cols]
- if expected_uuid is None:
- assert (
- retrieved_uuid is None
- ), f"Expected None for '{retrieved_desc}', got {retrieved_uuid}"
- else:
- # Convert string to UUID if needed
- if isinstance(retrieved_uuid, str):
- retrieved_uuid = uuid.UUID(retrieved_uuid)
+ # Create case-insensitive lookup
+ col_names_lower = [name.lower() if name else None for name in col_names]
- assert isinstance(
- retrieved_uuid, uuid.UUID
- ), f"Expected UUID, got {type(retrieved_uuid)}"
- assert retrieved_uuid == expected_uuid, f"UUID mismatch for '{retrieved_desc}'"
+ # Check for columns with special characters - note that column names might be
+ # returned with or without brackets/quotes depending on the driver
+ assert any(
+ "user name" in name.lower() for name in col_names
+ ), "Column with spaces should be in results"
+ assert any("id" == name.lower() for name in col_names), "ID column should be in results"
+ assert any(
+ "123_numeric_start" in name.lower() for name in col_names
+ ), "Column starting with numbers should be in results"
+ assert any("max" == name.lower() for name in col_names), "MAX column should be in results"
+ assert any(
+ "select" == name.lower() for name in col_names
+ ), "SELECT column should be in results"
+ assert any(
+ "column.with.dots" in name.lower() for name in col_names
+ ), "Column with dots should be in results"
+ assert any(
+ "column/with/slashes" in name.lower() for name in col_names
+ ), "Column with slashes should be in results"
+ assert any(
+ "column_with_underscores" in name.lower() for name in col_names
+ ), "Column with underscores should be in results"
finally:
- cursor.execute(f"DROP TABLE IF EXISTS {table_name}")
- db_connection.commit()
+ # Clean up happens in test_columns_cleanup
+ pass
-def test_nvarcharmax_executemany_streaming(cursor, db_connection):
- """Streaming insert + fetch > 4k NVARCHAR(MAX) using executemany with all fetch modes."""
+def test_columns_specific_column(cursor, db_connection):
+ """Test columns with specific column filter"""
try:
- values = ["Ω" * 4100, "漢" * 5000]
- cursor.execute("CREATE TABLE #pytest_nvarcharmax (col NVARCHAR(MAX))")
- db_connection.commit()
-
- # --- executemany insert ---
- cursor.executemany("INSERT INTO #pytest_nvarcharmax VALUES (?)", [(v,) for v in values])
- db_connection.commit()
-
- # --- fetchall ---
- cursor.execute("SELECT col FROM #pytest_nvarcharmax ORDER BY LEN(col)")
- rows = [r[0] for r in cursor.fetchall()]
- assert rows == sorted(values, key=len)
+ # Get specific column
+ cols = cursor.columns(
+ table="columns_test", schema="pytest_cols_schema", column="name"
+ ).fetchall()
- # --- fetchone ---
- cursor.execute("SELECT col FROM #pytest_nvarcharmax ORDER BY LEN(col)")
- r1 = cursor.fetchone()[0]
- r2 = cursor.fetchone()[0]
- assert {r1, r2} == set(values)
- assert cursor.fetchone() is None
+ # Verify we got just one result
+ assert len(cols) == 1, "Should find exactly 1 column named 'name'"
- # --- fetchmany ---
- cursor.execute("SELECT col FROM #pytest_nvarcharmax ORDER BY LEN(col)")
- batch = [r[0] for r in cursor.fetchmany(1)]
- assert batch[0] in values
- finally:
- cursor.execute("DROP TABLE #pytest_nvarcharmax")
- db_connection.commit()
+ # Verify column details
+ col = cols[0]
+ assert col.column_name.lower() == "name", "Column name should be 'name'"
+ assert col.table_name.lower() == "columns_test", "Table name should be 'columns_test'"
+ assert (
+ col.table_schem.lower() == "pytest_cols_schema"
+ ), "Schema should be 'pytest_cols_schema'"
+ assert col.nullable == 0, "name column should be non-nullable"
+ # Get column using pattern (% wildcard)
+ pattern_cols = cursor.columns(
+ table="columns_test", schema="pytest_cols_schema", column="%date%"
+ ).fetchall()
-def test_varcharmax_executemany_streaming(cursor, db_connection):
- """Streaming insert + fetch > 4k VARCHAR(MAX) using executemany with all fetch modes."""
- try:
- values = ["A" * 4100, "B" * 5000]
- cursor.execute("CREATE TABLE #pytest_varcharmax (col VARCHAR(MAX))")
- db_connection.commit()
+ # Should find created_date column
+ assert len(pattern_cols) == 1, "Should find 1 column matching '%date%'"
- # --- executemany insert ---
- cursor.executemany("INSERT INTO #pytest_varcharmax VALUES (?)", [(v,) for v in values])
- db_connection.commit()
+ assert (
+ pattern_cols[0].column_name.lower() == "created_date"
+ ), "Should find created_date column"
- # --- fetchall ---
- cursor.execute("SELECT col FROM #pytest_varcharmax ORDER BY LEN(col)")
- rows = [r[0] for r in cursor.fetchall()]
- assert rows == sorted(values, key=len)
+ # Get multiple columns with pattern
+ multi_cols = cursor.columns(
+ table="columns_test",
+ schema="pytest_cols_schema",
+ column="%d%", # Should match id, description, created_date
+ ).fetchall()
- # --- fetchone ---
- cursor.execute("SELECT col FROM #pytest_varcharmax ORDER BY LEN(col)")
- r1 = cursor.fetchone()[0]
- r2 = cursor.fetchone()[0]
- assert {r1, r2} == set(values)
- assert cursor.fetchone() is None
+ # At least 3 columns should match this pattern
+ assert len(multi_cols) >= 3, "Should find at least 3 columns matching '%d%'"
+ match_names = [col.column_name.lower() for col in multi_cols]
+ assert "id" in match_names, "id should match '%d%'"
+ assert "description" in match_names, "description should match '%d%'"
+ assert "created_date" in match_names, "created_date should match '%d%'"
- # --- fetchmany ---
- cursor.execute("SELECT col FROM #pytest_varcharmax ORDER BY LEN(col)")
- batch = [r[0] for r in cursor.fetchmany(1)]
- assert batch[0] in values
finally:
- cursor.execute("DROP TABLE #pytest_varcharmax")
- db_connection.commit()
+ # Clean up happens in test_columns_cleanup
+ pass
-def test_varbinarymax_executemany_streaming(cursor, db_connection):
- """Streaming insert + fetch > 4k VARBINARY(MAX) using executemany with all fetch modes."""
+def test_columns_with_underscore_pattern(cursor):
+ """Test columns with underscore wildcard pattern"""
try:
- values = [b"\x01" * 4100, b"\x02" * 5000]
- cursor.execute("CREATE TABLE #pytest_varbinarymax (col VARBINARY(MAX))")
- db_connection.commit()
+ # Get columns with underscore pattern (one character wildcard)
+ # Looking for 'id' (exactly 2 chars)
+ cols = cursor.columns(
+ table="columns_test", schema="pytest_cols_schema", column="__"
+ ).fetchall()
- # --- executemany insert ---
- cursor.executemany("INSERT INTO #pytest_varbinarymax VALUES (?)", [(v,) for v in values])
- db_connection.commit()
+ # Should find 'id' column
+ id_found = False
+ for col in cols:
+ if col.column_name.lower() == "id" and col.table_name.lower() == "columns_test":
+ id_found = True
+ break
- # --- fetchall ---
- cursor.execute("SELECT col FROM #pytest_varbinarymax ORDER BY DATALENGTH(col)")
- rows = [r[0] for r in cursor.fetchall()]
- assert rows == sorted(values, key=len)
+ assert id_found, "Should find 'id' column with pattern '__'"
- # --- fetchone ---
- cursor.execute("SELECT col FROM #pytest_varbinarymax ORDER BY DATALENGTH(col)")
- r1 = cursor.fetchone()[0]
- r2 = cursor.fetchone()[0]
- assert {r1, r2} == set(values)
- assert cursor.fetchone() is None
+ # Try a more complex pattern with both % and _
+ # For example: '%_d%' matches any column with 'd' as the second or later character
+ pattern_cols = cursor.columns(
+ table="columns_test", schema="pytest_cols_schema", column="%_d%"
+ ).fetchall()
+
+ # Should match 'id' (if considering case-insensitive) and 'created_date'
+ match_names = [
+ col.column_name.lower()
+ for col in pattern_cols
+ if col.table_name.lower() == "columns_test"
+ ]
+
+ # At least 'created_date' should match this pattern
+ assert "created_date" in match_names, "created_date should match '%_d%'"
- # --- fetchmany ---
- cursor.execute("SELECT col FROM #pytest_varbinarymax ORDER BY DATALENGTH(col)")
- batch = [r[0] for r in cursor.fetchmany(1)]
- assert batch[0] in values
finally:
- cursor.execute("DROP TABLE #pytest_varbinarymax")
- db_connection.commit()
+ # Clean up happens in test_columns_cleanup
+ pass
-def test_date_string_parameter_binding(cursor, db_connection):
- """Verify that date-like strings are treated as strings in parameter binding"""
- table_name = "#pytest_date_string"
+def test_columns_data_types(cursor):
+ """Test columns returns correct data type information"""
try:
- drop_table_if_exists(cursor, table_name)
- cursor.execute(f"""
- CREATE TABLE {table_name} (
- a_column VARCHAR(20)
- )
- """)
- cursor.execute(f"INSERT INTO {table_name} (a_column) VALUES ('string1'), ('string2')")
- db_connection.commit()
+ # Get all columns from test table
+ cols = cursor.columns(table="columns_test", schema="pytest_cols_schema").fetchall()
- date_str = "2025-08-12"
+ # Create a dictionary mapping column names to their details
+ col_dict = {col.column_name.lower(): col for col in cols}
- # Should fail to match anything, since binding may treat it as DATE not VARCHAR
- cursor.execute(
- f"SELECT a_column FROM {table_name} WHERE RIGHT(a_column, 10) = ?",
- (date_str,),
- )
- rows = cursor.fetchall()
+ # Check data types by name (case insensitive checks)
+ # Note: We're checking type_name as a string to avoid SQL type code inconsistencies
+ # between drivers
- assert rows == [], f"Expected no match for date-like string, got {rows}"
+ # INT column
+ assert "int" in col_dict["id"].type_name.lower(), "id should be INT type"
- except Exception as e:
- pytest.fail(f"Date string parameter binding test failed: {e}")
- finally:
- drop_table_if_exists(cursor, table_name)
- db_connection.commit()
+ # NVARCHAR column
+ assert any(
+ name in col_dict["name"].type_name.lower()
+ for name in ["nvarchar", "varchar", "char", "wchar"]
+ ), "name should be NVARCHAR type"
+ # DECIMAL column
+ assert any(
+ name in col_dict["price"].type_name.lower() for name in ["decimal", "numeric", "money"]
+ ), "price should be DECIMAL type"
-def test_time_string_parameter_binding(cursor, db_connection):
- """Verify that time-like strings are treated as strings in parameter binding"""
- table_name = "#pytest_time_string"
- try:
- drop_table_if_exists(cursor, table_name)
- cursor.execute(f"""
- CREATE TABLE {table_name} (
- time_col VARCHAR(22)
- )
- """)
- cursor.execute(f"INSERT INTO {table_name} (time_col) VALUES ('prefix_14:30:45_suffix')")
- db_connection.commit()
+ # BIT column
+ assert any(
+ name in col_dict["is_active"].type_name.lower() for name in ["bit", "boolean"]
+ ), "is_active should be BIT type"
- time_str = "14:30:45"
+ # TEXT column
+ assert any(
+ name in col_dict["notes"].type_name.lower() for name in ["text", "char", "varchar"]
+ ), "notes should be TEXT type"
- # This should fail because '14:30:45' gets converted to TIME type
- # and SQL Server can't compare TIME against VARCHAR with prefix/suffix
- cursor.execute(f"SELECT time_col FROM {table_name} WHERE time_col = ?", (time_str,))
- rows = cursor.fetchall()
+ # Check nullable flag
+ assert col_dict["id"].nullable == 0, "id should be non-nullable"
+ assert col_dict["description"].nullable == 1, "description should be nullable"
- assert rows == [], f"Expected no match for time-like string, got {rows}"
+ # Check column size
+ assert col_dict["name"].column_size == 100, "name should have size 100"
+
+ # Check decimal digits for numeric type
+ assert col_dict["price"].decimal_digits == 2, "price should have 2 decimal digits"
- except Exception as e:
- pytest.fail(f"Time string parameter binding test failed: {e}")
finally:
- drop_table_if_exists(cursor, table_name)
- db_connection.commit()
+ # Clean up happens in test_columns_cleanup
+ pass
-def test_datetime_string_parameter_binding(cursor, db_connection):
- """Verify that datetime-like strings are treated as strings in parameter binding"""
- table_name = "#pytest_datetime_string"
- try:
- drop_table_if_exists(cursor, table_name)
- cursor.execute(f"""
- CREATE TABLE {table_name} (
- datetime_col VARCHAR(33)
- )
- """)
- cursor.execute(
- f"INSERT INTO {table_name} (datetime_col) VALUES ('prefix_2025-08-12T14:30:45_suffix')"
- )
- db_connection.commit()
+def test_columns_nonexistent(cursor):
+ """Test columns with non-existent table or column"""
+ # Test with non-existent table
+ table_cols = cursor.columns(table="nonexistent_table_xyz123").fetchall()
+ assert len(table_cols) == 0, "Should return empty list for non-existent table"
- datetime_str = "2025-08-12T14:30:45"
+ # Test with non-existent column in existing table
+ col_cols = cursor.columns(
+ table="columns_test",
+ schema="pytest_cols_schema",
+ column="nonexistent_column_xyz123",
+ ).fetchall()
+ assert len(col_cols) == 0, "Should return empty list for non-existent column"
- # This should fail because '2025-08-12T14:30:45' gets converted to TIMESTAMP type
- # and SQL Server can't compare TIMESTAMP against VARCHAR with prefix/suffix
- cursor.execute(
- f"SELECT datetime_col FROM {table_name} WHERE datetime_col = ?",
- (datetime_str,),
- )
- rows = cursor.fetchall()
+ # Test with non-existent schema
+ schema_cols = cursor.columns(
+ table="columns_test", schema="nonexistent_schema_xyz123"
+ ).fetchall()
+ assert len(schema_cols) == 0, "Should return empty list for non-existent schema"
- assert rows == [], f"Expected no match for datetime-like string, got {rows}"
- except Exception as e:
- pytest.fail(f"Datetime string parameter binding test failed: {e}")
- finally:
- drop_table_if_exists(cursor, table_name)
- db_connection.commit()
+def test_columns_catalog_filter(cursor):
+ """Test columns with catalog filter"""
+ try:
+ # Get current database name
+ cursor.execute("SELECT DB_NAME() AS current_db")
+ current_db = cursor.fetchone().current_db
+ # Get columns with current catalog
+ cols = cursor.columns(
+ table="columns_test", catalog=current_db, schema="pytest_cols_schema"
+ ).fetchall()
-# ---------------------------------------------------------
-# Test 1: Basic numeric insertion and fetch roundtrip
-# ---------------------------------------------------------
-@pytest.mark.parametrize(
- "precision, scale, value",
- [
- (10, 2, decimal.Decimal("12345.67")),
- (10, 4, decimal.Decimal("12.3456")),
- (10, 0, decimal.Decimal("1234567890")),
- ],
-)
-def test_numeric_basic_roundtrip(cursor, db_connection, precision, scale, value):
- """Verify simple numeric values roundtrip correctly"""
- table_name = f"#pytest_numeric_basic_{precision}_{scale}"
- try:
- cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC({precision}, {scale}))")
- cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
- db_connection.commit()
+ # Verify catalog filter worked
+ assert len(cols) > 0, "Should find columns with correct catalog"
- cursor.execute(f"SELECT val FROM {table_name}")
- row = cursor.fetchone()
- assert row is not None, "Expected one row to be returned"
- fetched = row[0]
+ # Check catalog in results
+ for col in cols:
+ # Some drivers might return None for catalog
+ if col.table_cat is not None:
+ assert col.table_cat.lower() == current_db.lower(), "Wrong table catalog"
- expected = value.quantize(decimal.Decimal(f"1e-{scale}")) if scale > 0 else value
- assert fetched == expected, f"Expected {expected}, got {fetched}"
+ # Test with non-existent catalog
+ fake_cols = cursor.columns(
+ table="columns_test",
+ catalog="nonexistent_db_xyz123",
+ schema="pytest_cols_schema",
+ ).fetchall()
+ assert len(fake_cols) == 0, "Should return empty list for non-existent catalog"
finally:
- cursor.execute(f"DROP TABLE {table_name}")
- db_connection.commit()
+ # Clean up happens in test_columns_cleanup
+ pass
-# ---------------------------------------------------------
-# Test 2: High precision numeric values (near SQL Server max)
-# ---------------------------------------------------------
-@pytest.mark.parametrize(
- "value",
- [
- decimal.Decimal("99999999999999999999999999999999999999"), # 38 digits
- decimal.Decimal("12345678901234567890.1234567890"), # high precision
- ],
-)
-def test_numeric_high_precision_roundtrip(cursor, db_connection, value):
- """Verify high-precision NUMERIC values roundtrip without precision loss"""
- precision, scale = 38, max(0, -value.as_tuple().exponent)
- table_name = "#pytest_numeric_high_precision"
+def test_columns_schema_pattern(cursor):
+ """Test columns with schema name pattern"""
try:
- cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC({precision}, {scale}))")
- cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
- db_connection.commit()
+ # Get columns with schema pattern
+ cols = cursor.columns(table="columns_test", schema="pytest_%").fetchall()
- cursor.execute(f"SELECT val FROM {table_name}")
- row = cursor.fetchone()
- assert row is not None
- assert row[0] == value, f"High-precision roundtrip failed. Expected {value}, got {row[0]}"
+ # Should find our test table columns
+ test_cols = [col for col in cols if col.table_name.lower() == "columns_test"]
+ assert len(test_cols) > 0, "Should find columns using schema pattern"
+
+ # Try a more specific pattern
+ specific_cols = cursor.columns(table="columns_test", schema="pytest_cols%").fetchall()
+
+ # Should still find our test table columns
+ test_cols = [col for col in specific_cols if col.table_name.lower() == "columns_test"]
+ assert len(test_cols) > 0, "Should find columns using specific schema pattern"
finally:
- cursor.execute(f"DROP TABLE {table_name}")
- db_connection.commit()
+ # Clean up happens in test_columns_cleanup
+ pass
-# ---------------------------------------------------------
-# Test 3: Negative, zero, and small fractional values
-# ---------------------------------------------------------
-@pytest.mark.parametrize(
- "value",
- [
- decimal.Decimal("-98765.43210"),
- decimal.Decimal("-99999999999999999999.9999999999"),
- decimal.Decimal("0"),
- decimal.Decimal("0.00001"),
- ],
-)
-def test_numeric_negative_and_small_values(cursor, db_connection, value):
- precision, scale = 38, max(0, -value.as_tuple().exponent)
- table_name = "#pytest_numeric_neg_small"
+def test_columns_table_pattern(cursor):
+ """Test columns with table name pattern"""
try:
- cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC({precision}, {scale}))")
- cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
- db_connection.commit()
+ # Get columns with table pattern
+ cols = cursor.columns(table="columns_%", schema="pytest_cols_schema").fetchall()
- cursor.execute(f"SELECT val FROM {table_name}")
- row = cursor.fetchone()
- assert row[0] == value, f"Expected {value}, got {row[0]}"
+ # Should find columns from both test tables
+ tables_found = set()
+ for col in cols:
+ if col.table_name:
+ tables_found.add(col.table_name.lower())
+
+ assert "columns_test" in tables_found, "Should find columns_test with pattern columns_%"
+ assert (
+ "columns_special_test" in tables_found
+ ), "Should find columns_special_test with pattern columns_%"
finally:
- cursor.execute(f"DROP TABLE {table_name}")
- db_connection.commit()
+ # Clean up happens in test_columns_cleanup
+ pass
-# ---------------------------------------------------------
-# Test 4: NULL handling and multiple inserts
-# ---------------------------------------------------------
-def test_numeric_null_and_multiple_rows(cursor, db_connection):
- table_name = "#pytest_numeric_nulls"
+def test_columns_ordinal_position(cursor):
+ """Test ordinal_position is correct in columns results"""
try:
- cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC(20,5))")
-
- values = [decimal.Decimal("123.45678"), None, decimal.Decimal("-999.99999")]
- for v in values:
- cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (v,))
- db_connection.commit()
+ # Get columns for the test table
+ cols = cursor.columns(table="columns_test", schema="pytest_cols_schema").fetchall()
- cursor.execute(f"SELECT val FROM {table_name} ORDER BY val ASC")
- rows = [r[0] for r in cursor.fetchall()]
+ # Sort by ordinal position
+ sorted_cols = sorted(cols, key=lambda col: col.ordinal_position)
- non_null_expected = sorted([v for v in values if v is not None])
- non_null_actual = sorted([v for v in rows if v is not None])
+ # Verify positions are consecutive starting from 1
+ for i, col in enumerate(sorted_cols, 1):
+ assert (
+ col.ordinal_position == i
+ ), f"Column {col.column_name} should have ordinal_position {i}"
- assert (
- non_null_actual == non_null_expected
- ), f"Expected {non_null_expected}, got {non_null_actual}"
- assert any(r is None for r in rows), "Expected one NULL value in result set"
+ # First column should be id (primary key)
+ assert sorted_cols[0].column_name.lower() == "id", "First column should be id"
finally:
- cursor.execute(f"DROP TABLE {table_name}")
- db_connection.commit()
+ # Clean up happens in test_columns_cleanup
+ pass
-# ---------------------------------------------------------
-# Test 5: Boundary precision values (max precision / scale)
-# ---------------------------------------------------------
-def test_numeric_boundary_precision(cursor, db_connection):
- table_name = "#pytest_numeric_boundary"
- precision, scale = 38, 37
- value = decimal.Decimal("0." + "9" * 37) # 0.999... up to 37 digits
+def test_columns_cleanup(cursor, db_connection):
+ """Clean up test tables after testing"""
try:
- cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC({precision},{scale}))")
- cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
+ # Drop all test tables
+ cursor.execute("DROP TABLE IF EXISTS pytest_cols_schema.columns_test")
+ cursor.execute("DROP TABLE IF EXISTS pytest_cols_schema.columns_special_test")
+
+ # Drop the test schema
+ cursor.execute("DROP SCHEMA IF EXISTS pytest_cols_schema")
db_connection.commit()
+ except Exception as e:
+ pytest.fail(f"Test cleanup failed: {e}")
- cursor.execute(f"SELECT val FROM {table_name}")
- row = cursor.fetchone()
- assert row[0] == value, f"Boundary precision mismatch: expected {value}, got {row[0]}"
- finally:
- cursor.execute(f"DROP TABLE {table_name}")
- db_connection.commit()
+def test_lowercase_attribute(cursor, db_connection):
+ """Test that the lowercase attribute properly converts column names to lowercase"""
+ # Store original value to restore after test
+ original_lowercase = mssql_python.lowercase
+ drop_cursor = None
-# ---------------------------------------------------------
-# Test 6: Precision/scale positive exponent (corner case)
-# ---------------------------------------------------------
-def test_numeric_precision_scale_positive_exponent(cursor, db_connection):
try:
- cursor.execute("CREATE TABLE #pytest_numeric_test (numeric_column DECIMAL(10, 2))")
- db_connection.commit()
- cursor.execute(
- "INSERT INTO #pytest_numeric_test (numeric_column) VALUES (?)",
- [decimal.Decimal("31400")],
+ # Create a test table with mixed-case column names
+ cursor.execute("""
+ CREATE TABLE #pytest_lowercase_test (
+ ID INT PRIMARY KEY,
+ UserName VARCHAR(50),
+ EMAIL_ADDRESS VARCHAR(100),
+ PhoneNumber VARCHAR(20)
)
+ """)
db_connection.commit()
- cursor.execute("SELECT numeric_column FROM #pytest_numeric_test")
- row = cursor.fetchone()
- assert row[0] == decimal.Decimal("31400"), "Numeric data parsing failed"
- precision = 5
- scale = 0
- assert precision == 5, "Precision calculation failed"
- assert scale == 0, "Scale calculation failed"
-
- finally:
- cursor.execute("DROP TABLE #pytest_numeric_test")
+ # Insert test data
+ cursor.execute("""
+ INSERT INTO #pytest_lowercase_test (ID, UserName, EMAIL_ADDRESS, PhoneNumber)
+ VALUES (1, 'JohnDoe', 'john@example.com', '555-1234')
+ """)
db_connection.commit()
+ # First test with lowercase=False (default)
+ mssql_python.lowercase = False
+ cursor1 = db_connection.cursor()
+ cursor1.execute("SELECT * FROM #pytest_lowercase_test")
+
+ # Description column names should preserve original case
+ column_names1 = [desc[0] for desc in cursor1.description]
+ assert "ID" in column_names1, "Column 'ID' should be present with original case"
+ assert "UserName" in column_names1, "Column 'UserName' should be present with original case"
-# ---------------------------------------------------------
-# Test 7: Precision/scale negative exponent (corner case)
-# ---------------------------------------------------------
-def test_numeric_precision_scale_negative_exponent(cursor, db_connection):
- try:
- cursor.execute("CREATE TABLE #pytest_numeric_test (numeric_column DECIMAL(10, 5))")
- db_connection.commit()
- cursor.execute(
- "INSERT INTO #pytest_numeric_test (numeric_column) VALUES (?)",
- [decimal.Decimal("0.03140")],
- )
- db_connection.commit()
- cursor.execute("SELECT numeric_column FROM #pytest_numeric_test")
- row = cursor.fetchone()
- assert row[0] == decimal.Decimal("0.03140"), "Numeric data parsing failed"
+ # Make sure to consume all results and close the cursor
+ cursor1.fetchall()
+ cursor1.close()
- precision = 5
- scale = 5
- assert precision == 5, "Precision calculation failed"
- assert scale == 5, "Scale calculation failed"
+ # Now test with lowercase=True
+ mssql_python.lowercase = True
+ cursor2 = db_connection.cursor()
+ cursor2.execute("SELECT * FROM #pytest_lowercase_test")
+
+ # Description column names should be lowercase
+ column_names2 = [desc[0] for desc in cursor2.description]
+ assert "id" in column_names2, "Column names should be lowercase when lowercase=True"
+ assert "username" in column_names2, "Column names should be lowercase when lowercase=True"
+
+ # Make sure to consume all results and close the cursor
+ cursor2.fetchall()
+ cursor2.close()
+
+ # Create a fresh cursor for cleanup
+ drop_cursor = db_connection.cursor()
finally:
- cursor.execute("DROP TABLE #pytest_numeric_test")
- db_connection.commit()
+ # Restore original value
+ mssql_python.lowercase = original_lowercase
+ try:
+ # Use a separate cursor for cleanup
+ if drop_cursor:
+ drop_cursor.execute("DROP TABLE IF EXISTS #pytest_lowercase_test")
+ db_connection.commit()
+ drop_cursor.close()
+ except Exception as e:
+ print(f"Warning: Failed to drop test table: {e}")
+
+
+def test_decimal_separator_function(cursor, db_connection):
+ """Test decimal separator functionality with database operations"""
+ # Store original value to restore after test
+ original_separator = mssql_python.getDecimalSeparator()
-# ---------------------------------------------------------
-# Test 8: fetchmany for numeric values
-# ---------------------------------------------------------
-@pytest.mark.parametrize(
- "values",
- [[decimal.Decimal("11.11"), decimal.Decimal("22.22"), decimal.Decimal("33.33")]],
-)
-def test_numeric_fetchmany(cursor, db_connection, values):
- table_name = "#pytest_numeric_fetchmany"
try:
- cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC(10,2))")
- for v in values:
- cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (v,))
+ # Create test table
+ cursor.execute("""
+ CREATE TABLE #pytest_decimal_separator_test (
+ id INT PRIMARY KEY,
+ decimal_value DECIMAL(10, 2)
+ )
+ """)
db_connection.commit()
- cursor.execute(f"SELECT val FROM {table_name} ORDER BY val")
- rows1 = cursor.fetchmany(2)
- rows2 = cursor.fetchmany(2)
- all_rows = [r[0] for r in rows1 + rows2]
+ # Insert test values with default separator (.)
+ test_value = decimal.Decimal("123.45")
+ cursor.execute(
+ """
+ INSERT INTO #pytest_decimal_separator_test (id, decimal_value)
+ VALUES (1, ?)
+ """,
+ [test_value],
+ )
+ db_connection.commit()
+
+ # First test with default decimal separator (.)
+ cursor.execute("SELECT id, decimal_value FROM #pytest_decimal_separator_test")
+ row = cursor.fetchone()
+ default_str = str(row)
+ assert "123.45" in default_str, "Default separator not found in string representation"
+
+ # Now change to comma separator and test string representation
+ mssql_python.setDecimalSeparator(",")
+ cursor.execute("SELECT id, decimal_value FROM #pytest_decimal_separator_test")
+ row = cursor.fetchone()
- assert all_rows == sorted(
- values
- ), f"fetchmany mismatch: expected {sorted(values)}, got {all_rows}"
+ # This should format the decimal with a comma in the string representation
+ comma_str = str(row)
+ assert (
+ "123,45" in comma_str
+ ), f"Expected comma in string representation but got: {comma_str}"
finally:
- cursor.execute(f"DROP TABLE {table_name}")
+ # Restore original decimal separator
+ mssql_python.setDecimalSeparator(original_separator)
+
+ # Cleanup
+ cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_separator_test")
db_connection.commit()
-# ---------------------------------------------------------
-# Test 9: executemany for numeric values
-# ---------------------------------------------------------
-@pytest.mark.parametrize(
- "values",
- [
- [
- decimal.Decimal("111.1111"),
- decimal.Decimal("222.2222"),
- decimal.Decimal("333.3333"),
- ]
- ],
-)
-def test_numeric_executemany(cursor, db_connection, values):
- precision, scale = 38, 10
- table_name = "#pytest_numeric_executemany"
+def test_decimal_separator_basic_functionality():
+ """Test basic decimal separator functionality without database operations"""
+ # Store original value to restore after test
+ original_separator = mssql_python.getDecimalSeparator()
+
try:
- cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC({precision},{scale}))")
+ # Test default value
+ assert mssql_python.getDecimalSeparator() == ".", "Default decimal separator should be '.'"
- params = [(v,) for v in values]
- cursor.executemany(f"INSERT INTO {table_name} (val) VALUES (?)", params)
- db_connection.commit()
+ # Test setting to comma
+ mssql_python.setDecimalSeparator(",")
+ assert (
+ mssql_python.getDecimalSeparator() == ","
+ ), "Decimal separator should be ',' after setting"
- cursor.execute(f"SELECT val FROM {table_name} ORDER BY val")
- rows = [r[0] for r in cursor.fetchall()]
- assert rows == sorted(
- values
- ), f"executemany() mismatch: expected {sorted(values)}, got {rows}"
+ # Test setting to other valid separators
+ mssql_python.setDecimalSeparator(":")
+ assert (
+ mssql_python.getDecimalSeparator() == ":"
+ ), "Decimal separator should be ':' after setting"
+
+ # Test invalid inputs
+ with pytest.raises(ValueError):
+ mssql_python.setDecimalSeparator("") # Empty string
+
+ with pytest.raises(ValueError):
+ mssql_python.setDecimalSeparator("too_long") # More than one character
+
+ with pytest.raises(ValueError):
+ mssql_python.setDecimalSeparator(123) # Not a string
finally:
- cursor.execute(f"DROP TABLE {table_name}")
- db_connection.commit()
+ # Restore original separator
+ mssql_python.setDecimalSeparator(original_separator)
-# ---------------------------------------------------------
-# Test 10: Leading zeros precision loss
-# ---------------------------------------------------------
-@pytest.mark.parametrize(
- "value, expected_precision, expected_scale",
- [
- # Leading zeros (using values that won't become scientific notation)
- (decimal.Decimal("000000123.45"), 38, 2), # Leading zeros in integer part
- (decimal.Decimal("000.0001234"), 38, 7), # Leading zeros in decimal part
- (
- decimal.Decimal("0000000000000.123456789"),
- 38,
- 9,
- ), # Many leading zeros + decimal
- (
- decimal.Decimal("000000.000000123456"),
- 38,
- 12,
- ), # Lots of leading zeros (avoiding E notation)
- ],
-)
-def test_numeric_leading_zeros_precision_loss(
- cursor, db_connection, value, expected_precision, expected_scale
-):
- """Test precision loss with values containing lots of leading zeros"""
- table_name = "#pytest_numeric_leading_zeros"
+def test_decimal_separator_with_multiple_values(cursor, db_connection):
+ """Test decimal separator with multiple different decimal values"""
+ original_separator = mssql_python.getDecimalSeparator()
+
try:
- # Use explicit precision and scale to avoid scientific notation issues
- cursor.execute(
- f"CREATE TABLE {table_name} (val NUMERIC({expected_precision}, {expected_scale}))"
+ # Create test table
+ cursor.execute("""
+ CREATE TABLE #pytest_decimal_multi_test (
+ id INT PRIMARY KEY,
+ positive_value DECIMAL(10, 2),
+ negative_value DECIMAL(10, 2),
+ zero_value DECIMAL(10, 2),
+ small_value DECIMAL(10, 4)
)
- cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
+ """)
db_connection.commit()
- cursor.execute(f"SELECT val FROM {table_name}")
+ # Insert test data
+ cursor.execute("""
+ INSERT INTO #pytest_decimal_multi_test VALUES (1, 123.45, -67.89, 0.00, 0.0001)
+ """)
+ db_connection.commit()
+
+ # Test with default separator first
+ cursor.execute("SELECT * FROM #pytest_decimal_multi_test")
row = cursor.fetchone()
- assert row is not None, "Expected one row to be returned"
+ default_str = str(row)
+ assert "123.45" in default_str, "Default positive value formatting incorrect"
+ assert "-67.89" in default_str, "Default negative value formatting incorrect"
- # Normalize both values to the same scale for comparison
- expected = value.quantize(decimal.Decimal(f"1e-{expected_scale}"))
- actual = row[0]
+ # Change to comma separator
+ mssql_python.setDecimalSeparator(",")
+ cursor.execute("SELECT * FROM #pytest_decimal_multi_test")
+ row = cursor.fetchone()
+ comma_str = str(row)
- # Verify that leading zeros are handled correctly during conversion and roundtrip
- assert (
- actual == expected
- ), f"Leading zeros precision loss for {value}, expected {expected}, got {actual}"
+ # Verify comma is used in all decimal values
+ assert "123,45" in comma_str, "Positive value not formatted with comma"
+ assert "-67,89" in comma_str, "Negative value not formatted with comma"
+ assert "0,00" in comma_str, "Zero value not formatted with comma"
+ assert "0,0001" in comma_str, "Small value not formatted with comma"
finally:
- try:
- cursor.execute(f"DROP TABLE {table_name}")
- db_connection.commit()
- except:
- pass
+ # Restore original separator
+ mssql_python.setDecimalSeparator(original_separator)
+
+ # Cleanup
+ cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_multi_test")
+ db_connection.commit()
-# ---------------------------------------------------------
-# Test 11: Extreme exponents precision loss
-# ---------------------------------------------------------
-@pytest.mark.parametrize(
- "value, description",
- [
- (decimal.Decimal("1E-20"), "1E-20 exponent"),
- (decimal.Decimal("1E-38"), "1E-38 exponent"),
- (decimal.Decimal("5E-35"), "5E-35 exponent"),
- (decimal.Decimal("9E-30"), "9E-30 exponent"),
- (decimal.Decimal("2.5E-25"), "2.5E-25 exponent"),
- ],
-)
-def test_numeric_extreme_exponents_precision_loss(cursor, db_connection, value, description):
- """Test precision loss with values having extreme small magnitudes"""
- # Scientific notation values like 1E-20 create scale > precision situations
- # that violate SQL Server's NUMERIC(P,S) rules - this is expected behavior
+def test_decimal_separator_calculations(cursor, db_connection):
+ """Test that decimal separator doesn't affect calculations"""
+ original_separator = mssql_python.getDecimalSeparator()
- table_name = "#pytest_numeric_extreme_exp"
try:
- # Try with a reasonable precision/scale that should handle most cases
- cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC(38, 20))")
- cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
+ # Create test table
+ cursor.execute("""
+ CREATE TABLE #pytest_decimal_calc_test (
+ id INT PRIMARY KEY,
+ value1 DECIMAL(10, 2),
+ value2 DECIMAL(10, 2)
+ )
+ """)
db_connection.commit()
- cursor.execute(f"SELECT val FROM {table_name}")
+ # Insert test data
+ cursor.execute("""
+ INSERT INTO #pytest_decimal_calc_test VALUES (1, 10.25, 5.75)
+ """)
+ db_connection.commit()
+
+ # Test with default separator
+ cursor.execute("SELECT value1 + value2 AS sum_result FROM #pytest_decimal_calc_test")
row = cursor.fetchone()
- assert row is not None, "Expected one row to be returned"
+ assert row.sum_result == decimal.Decimal(
+ "16.00"
+ ), "Sum calculation incorrect with default separator"
- # Verify the value was stored and retrieved
- actual = row[0]
+ # Change to comma separator
+ mssql_python.setDecimalSeparator(",")
- # For extreme small values, check they're mathematically equivalent
- assert abs(actual - value) < decimal.Decimal(
- "1E-18"
- ), f"Extreme exponent value not preserved for {description}: {value} -> {actual}"
+ # Calculations should still work correctly
+ cursor.execute("SELECT value1 + value2 AS sum_result FROM #pytest_decimal_calc_test")
+ row = cursor.fetchone()
+ assert row.sum_result == decimal.Decimal(
+ "16.00"
+ ), "Sum calculation affected by separator change"
- finally:
- try:
- cursor.execute(f"DROP TABLE {table_name}")
- db_connection.commit()
- except:
- pass # Table might not exist if creation failed
+ # But string representation should use comma
+ assert "16,00" in str(row), "Sum result not formatted with comma in string representation"
+ finally:
+ # Restore original separator
+ mssql_python.setDecimalSeparator(original_separator)
-# ---------------------------------------------------------
-# Test 12: 38-digit precision boundary limits
-# ---------------------------------------------------------
-@pytest.mark.parametrize(
- "value",
- [
- # 38 digits with negative exponent
- decimal.Decimal("0." + "0" * 36 + "1"), # 38 digits total (1 + 37 decimal places)
- # very large numbers at 38-digit limit
- decimal.Decimal("9" * 38), # Maximum 38-digit integer
- decimal.Decimal("1" + "0" * 37), # Large 38-digit number
- # Additional boundary cases
- decimal.Decimal("0." + "0" * 35 + "12"), # 37 total digits
- decimal.Decimal("0." + "0" * 34 + "123"), # 36 total digits
- decimal.Decimal("0." + "1" * 37), # All 1's in decimal part
- decimal.Decimal("1." + "9" * 36), # Close to maximum with integer part
- ],
-)
-def test_numeric_precision_boundary_limits(cursor, db_connection, value):
- """Test precision loss with values close to the 38-digit precision limit"""
- precision, scale = 38, 37 # Maximum precision with high scale
- table_name = "#pytest_numeric_boundary_limits"
- try:
- cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC({precision}, {scale}))")
- cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
+ # Cleanup
+ cursor.execute("DROP TABLE IF EXISTS #pytest_decimal_calc_test")
db_connection.commit()
- cursor.execute(f"SELECT val FROM {table_name}")
- row = cursor.fetchone()
- assert row is not None, "Expected one row to be returned"
- # Ensure implementation behaves correctly even at the boundaries of SQL Server's maximum precision
- assert row[0] == value, f"Boundary precision loss for {value}, got {row[0]}"
+def test_executemany_with_uuids(cursor, db_connection):
+ """Test inserting multiple rows with UUIDs and None using executemany."""
+ table_name = "#pytest_uuid_batch"
+ try:
+ cursor.execute(f"DROP TABLE IF EXISTS {table_name}")
+ cursor.execute(f"""
+ CREATE TABLE {table_name} (
+ id UNIQUEIDENTIFIER,
+ description NVARCHAR(50)
+ )
+ """)
+ db_connection.commit()
- except Exception as e:
- # Some boundary values might exceed SQL Server limits
- pytest.skip(f"Value {value} may exceed SQL Server precision limits: {e}")
- finally:
- try:
- cursor.execute(f"DROP TABLE {table_name}")
- db_connection.commit()
- except:
- pass # Table might not exist if creation failed
+ # Prepare test data: mix of UUIDs and None
+ test_data = [
+ [uuid.uuid4(), "Item 1"],
+ [uuid.uuid4(), "Item 2"],
+ [None, "Item 3"],
+ [uuid.uuid4(), "Item 4"],
+ [None, "Item 5"],
+ ]
+ # Map descriptions to original UUIDs for O(1) lookup
+ uuid_map = {desc: uid for uid, desc in test_data}
-# ---------------------------------------------------------
-# Test 13: Negative test - Values exceeding 38-digit precision limit
-# ---------------------------------------------------------
-@pytest.mark.parametrize(
- "value, description",
- [
- (decimal.Decimal("1" + "0" * 38), "39 digits integer"), # 39 digits
- (decimal.Decimal("9" * 39), "39 nines"), # 39 digits of 9s
- (
- decimal.Decimal("12345678901234567890123456789012345678901234567890"),
- "50 digits",
- ), # 50 digits
- (
- decimal.Decimal("0.111111111111111111111111111111111111111"),
- "39 decimal places",
- ), # 39 decimal digits
- (
- decimal.Decimal("1" * 20 + "." + "9" * 20),
- "40 total digits",
- ), # 40 total digits (20+20)
- (
- decimal.Decimal("123456789012345678901234567890.12345678901234567"),
- "47 total digits",
- ), # 47 total digits
- ],
-)
-def test_numeric_beyond_38_digit_precision_negative(cursor, db_connection, value, description):
- """
- Negative test: Ensure proper error handling for values exceeding SQL Server's 38-digit precision limit.
+ # Execute batch insert
+ cursor.executemany(f"INSERT INTO {table_name} (id, description) VALUES (?, ?)", test_data)
+ cursor.connection.commit()
- After our precision validation fix, mssql-python should now gracefully reject values with precision > 38
- by raising a ValueError with a clear message, matching pyodbc behavior.
- """
- # These values should be rejected by our precision validation
- with pytest.raises(ValueError) as exc_info:
- cursor.execute("SELECT ?", (value,))
+ # Fetch and verify
+ cursor.execute(f"SELECT id, description FROM {table_name}")
+ rows = cursor.fetchall()
- error_msg = str(exc_info.value)
- assert (
- "Precision of the numeric value is too high" in error_msg
- ), f"Expected precision error message for {description}, got: {error_msg}"
- assert (
- "maximum precision supported by SQL Server is 38" in error_msg
- ), f"Expected SQL Server precision limit message for {description}, got: {error_msg}"
+ assert len(rows) == len(test_data), "Number of fetched rows does not match inserted rows."
+ for retrieved_uuid, retrieved_desc in rows:
+ expected_uuid = uuid_map[retrieved_desc]
-@pytest.mark.parametrize(
- "values, description",
- [
- # Small decimal values with scientific notation
- (
- [
- decimal.Decimal("0.70000000000696"),
- decimal.Decimal("1E-7"),
- decimal.Decimal("0.00001"),
- decimal.Decimal("6.96E-12"),
- ],
- "Small decimals with scientific notation",
- ),
- # Large decimal values with scientific notation
- (
- [
- decimal.Decimal("4E+8"),
- decimal.Decimal("1.521E+15"),
- decimal.Decimal("5.748E+18"),
- decimal.Decimal("1E+11"),
- ],
- "Large decimals with positive exponents",
- ),
- # Medium-sized decimals
- (
- [
- decimal.Decimal("123.456"),
- decimal.Decimal("9999.9999"),
- decimal.Decimal("1000000.50"),
- ],
- "Medium-sized decimals",
- ),
- ],
-)
-def test_decimal_scientific_notation_to_varchar(cursor, db_connection, values, description):
- """
- Test that Decimal values with scientific notation are properly converted
- to VARCHAR without triggering 'varchar to numeric' conversion errors.
- This verifies that the driver correctly handles Decimal to VARCHAR conversion
- """
- table_name = "#pytest_decimal_varchar_conversion"
- try:
- cursor.execute(f"CREATE TABLE {table_name} (id INT IDENTITY(1,1), val VARCHAR(50))")
+ if expected_uuid is None:
+ assert (
+ retrieved_uuid is None
+ ), f"Expected None for '{retrieved_desc}', got {retrieved_uuid}"
+ else:
+ # Convert string to UUID if needed
+ if isinstance(retrieved_uuid, str):
+ retrieved_uuid = uuid.UUID(retrieved_uuid)
- for val in values:
- cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (val,))
+ assert isinstance(
+ retrieved_uuid, uuid.UUID
+ ), f"Expected UUID, got {type(retrieved_uuid)}"
+ assert retrieved_uuid == expected_uuid, f"UUID mismatch for '{retrieved_desc}'"
+
+ finally:
+ cursor.execute(f"DROP TABLE IF EXISTS {table_name}")
db_connection.commit()
- cursor.execute(f"SELECT val FROM {table_name} ORDER BY id")
- rows = cursor.fetchall()
- assert len(rows) == len(values), f"Expected {len(values)} rows, got {len(rows)}"
+def test_nvarcharmax_executemany_streaming(cursor, db_connection):
+ """Streaming insert + fetch > 4k NVARCHAR(MAX) using executemany with all fetch modes."""
+ try:
+ values = ["Ω" * 4100, "漢" * 5000]
+ cursor.execute("CREATE TABLE #pytest_nvarcharmax (col NVARCHAR(MAX))")
+ db_connection.commit()
- for i, (row, expected_val) in enumerate(zip(rows, values)):
- stored_val = decimal.Decimal(row[0])
- assert (
- stored_val == expected_val
- ), f"{description}: Row {i} mismatch - expected {expected_val}, got {stored_val}"
+ # --- executemany insert ---
+ cursor.executemany("INSERT INTO #pytest_nvarcharmax VALUES (?)", [(v,) for v in values])
+ db_connection.commit()
- finally:
- try:
- cursor.execute(f"DROP TABLE {table_name}")
- db_connection.commit()
- except:
- pass
+ # --- fetchall ---
+ cursor.execute("SELECT col FROM #pytest_nvarcharmax ORDER BY LEN(col)")
+ rows = [r[0] for r in cursor.fetchall()]
+ assert rows == sorted(values, key=len)
+ # --- fetchone ---
+ cursor.execute("SELECT col FROM #pytest_nvarcharmax ORDER BY LEN(col)")
+ r1 = cursor.fetchone()[0]
+ r2 = cursor.fetchone()[0]
+ assert {r1, r2} == set(values)
+ assert cursor.fetchone() is None
-SMALL_XML = "- 1
"
-LARGE_XML = "" + "".join(f"- {i}
" for i in range(10000)) + ""
-EMPTY_XML = ""
-INVALID_XML = " " # malformed
+ # --- fetchmany ---
+ cursor.execute("SELECT col FROM #pytest_nvarcharmax ORDER BY LEN(col)")
+ batch = [r[0] for r in cursor.fetchmany(1)]
+ assert batch[0] in values
+ finally:
+ cursor.execute("DROP TABLE #pytest_nvarcharmax")
+ db_connection.commit()
-def test_xml_basic_insert_fetch(cursor, db_connection):
- """Test insert and fetch of a small XML value."""
+def test_varcharmax_executemany_streaming(cursor, db_connection):
+ """Streaming insert + fetch > 4k VARCHAR(MAX) using executemany with all fetch modes."""
try:
- cursor.execute(
- "CREATE TABLE #pytest_xml_basic (id INT PRIMARY KEY IDENTITY(1,1), xml_col XML NULL);"
- )
+ values = ["A" * 4100, "B" * 5000]
+ cursor.execute("CREATE TABLE #pytest_varcharmax (col VARCHAR(MAX))")
db_connection.commit()
- cursor.execute("INSERT INTO #pytest_xml_basic (xml_col) VALUES (?);", SMALL_XML)
+ # --- executemany insert ---
+ cursor.executemany("INSERT INTO #pytest_varcharmax VALUES (?)", [(v,) for v in values])
db_connection.commit()
- row = cursor.execute("SELECT xml_col FROM #pytest_xml_basic;").fetchone()
- assert row[0] == SMALL_XML
+ # --- fetchall ---
+ cursor.execute("SELECT col FROM #pytest_varcharmax ORDER BY LEN(col)")
+ rows = [r[0] for r in cursor.fetchall()]
+ assert rows == sorted(values, key=len)
+
+ # --- fetchone ---
+ cursor.execute("SELECT col FROM #pytest_varcharmax ORDER BY LEN(col)")
+ r1 = cursor.fetchone()[0]
+ r2 = cursor.fetchone()[0]
+ assert {r1, r2} == set(values)
+ assert cursor.fetchone() is None
+
+ # --- fetchmany ---
+ cursor.execute("SELECT col FROM #pytest_varcharmax ORDER BY LEN(col)")
+ batch = [r[0] for r in cursor.fetchmany(1)]
+ assert batch[0] in values
finally:
- cursor.execute("DROP TABLE IF EXISTS #pytest_xml_basic;")
+ cursor.execute("DROP TABLE #pytest_varcharmax")
db_connection.commit()
-def test_xml_empty_and_null(cursor, db_connection):
- """Test insert and fetch of empty XML and NULL values."""
+def test_varbinarymax_executemany_streaming(cursor, db_connection):
+ """Streaming insert + fetch > 4k VARBINARY(MAX) using executemany with all fetch modes."""
try:
- cursor.execute(
- "CREATE TABLE #pytest_xml_empty_null (id INT PRIMARY KEY IDENTITY(1,1), xml_col XML NULL);"
- )
+ values = [b"\x01" * 4100, b"\x02" * 5000]
+ cursor.execute("CREATE TABLE #pytest_varbinarymax (col VARBINARY(MAX))")
+ db_connection.commit()
+
+ # --- executemany insert ---
+ cursor.executemany("INSERT INTO #pytest_varbinarymax VALUES (?)", [(v,) for v in values])
+ db_connection.commit()
+
+ # --- fetchall ---
+ cursor.execute("SELECT col FROM #pytest_varbinarymax ORDER BY DATALENGTH(col)")
+ rows = [r[0] for r in cursor.fetchall()]
+ assert rows == sorted(values, key=len)
+
+ # --- fetchone ---
+ cursor.execute("SELECT col FROM #pytest_varbinarymax ORDER BY DATALENGTH(col)")
+ r1 = cursor.fetchone()[0]
+ r2 = cursor.fetchone()[0]
+ assert {r1, r2} == set(values)
+ assert cursor.fetchone() is None
+
+ # --- fetchmany ---
+ cursor.execute("SELECT col FROM #pytest_varbinarymax ORDER BY DATALENGTH(col)")
+ batch = [r[0] for r in cursor.fetchmany(1)]
+ assert batch[0] in values
+ finally:
+ cursor.execute("DROP TABLE #pytest_varbinarymax")
db_connection.commit()
- cursor.execute("INSERT INTO #pytest_xml_empty_null (xml_col) VALUES (?);", EMPTY_XML)
- cursor.execute("INSERT INTO #pytest_xml_empty_null (xml_col) VALUES (?);", None)
- db_connection.commit()
- rows = [
- r[0]
- for r in cursor.execute(
- "SELECT xml_col FROM #pytest_xml_empty_null ORDER BY id;"
- ).fetchall()
- ]
- assert rows[0] == EMPTY_XML
- assert rows[1] is None
- finally:
- cursor.execute("DROP TABLE IF EXISTS #pytest_xml_empty_null;")
+def test_date_string_parameter_binding(cursor, db_connection):
+ """Verify that date-like strings are treated as strings in parameter binding"""
+ table_name = "#pytest_date_string"
+ try:
+ drop_table_if_exists(cursor, table_name)
+ cursor.execute(f"""
+ CREATE TABLE {table_name} (
+ a_column VARCHAR(20)
+ )
+ """)
+ cursor.execute(f"INSERT INTO {table_name} (a_column) VALUES ('string1'), ('string2')")
db_connection.commit()
+ date_str = "2025-08-12"
-def test_xml_large_insert(cursor, db_connection):
- """Test insert and fetch of a large XML value to verify streaming/DAE."""
- try:
+ # Should fail to match anything, since binding may treat it as DATE not VARCHAR
cursor.execute(
- "CREATE TABLE #pytest_xml_large (id INT PRIMARY KEY IDENTITY(1,1), xml_col XML NULL);"
+ f"SELECT a_column FROM {table_name} WHERE RIGHT(a_column, 10) = ?",
+ (date_str,),
)
- db_connection.commit()
+ rows = cursor.fetchall()
- cursor.execute("INSERT INTO #pytest_xml_large (xml_col) VALUES (?);", LARGE_XML)
- db_connection.commit()
+ assert rows == [], f"Expected no match for date-like string, got {rows}"
- row = cursor.execute("SELECT xml_col FROM #pytest_xml_large;").fetchone()
- assert row[0] == LARGE_XML
+ except Exception as e:
+ pytest.fail(f"Date string parameter binding test failed: {e}")
finally:
- cursor.execute("DROP TABLE IF EXISTS #pytest_xml_large;")
+ drop_table_if_exists(cursor, table_name)
db_connection.commit()
-def test_xml_batch_insert(cursor, db_connection):
- """Test batch insert (executemany) of multiple XML values."""
+def test_time_string_parameter_binding(cursor, db_connection):
+ """Verify that time-like strings are treated as strings in parameter binding"""
+ table_name = "#pytest_time_string"
try:
- cursor.execute(
- "CREATE TABLE #pytest_xml_batch (id INT PRIMARY KEY IDENTITY(1,1), xml_col XML NULL);"
- )
+ drop_table_if_exists(cursor, table_name)
+ cursor.execute(f"""
+ CREATE TABLE {table_name} (
+ time_col VARCHAR(22)
+ )
+ """)
+ cursor.execute(f"INSERT INTO {table_name} (time_col) VALUES ('prefix_14:30:45_suffix')")
db_connection.commit()
- xmls = [f"- {i}
" for i in range(5)]
- cursor.executemany(
- "INSERT INTO #pytest_xml_batch (xml_col) VALUES (?);", [(x,) for x in xmls]
- )
- db_connection.commit()
+ time_str = "14:30:45"
- rows = [
- r[0]
- for r in cursor.execute("SELECT xml_col FROM #pytest_xml_batch ORDER BY id;").fetchall()
- ]
- assert rows == xmls
+ # This should fail because '14:30:45' gets converted to TIME type
+ # and SQL Server can't compare TIME against VARCHAR with prefix/suffix
+ cursor.execute(f"SELECT time_col FROM {table_name} WHERE time_col = ?", (time_str,))
+ rows = cursor.fetchall()
+
+ assert rows == [], f"Expected no match for time-like string, got {rows}"
+
+ except Exception as e:
+ pytest.fail(f"Time string parameter binding test failed: {e}")
finally:
- cursor.execute("DROP TABLE IF EXISTS #pytest_xml_batch;")
+ drop_table_if_exists(cursor, table_name)
db_connection.commit()
-def test_xml_malformed_input(cursor, db_connection):
- """Verify driver raises error for invalid XML input."""
+def test_datetime_string_parameter_binding(cursor, db_connection):
+ """Verify that datetime-like strings are treated as strings in parameter binding"""
+ table_name = "#pytest_datetime_string"
try:
+ drop_table_if_exists(cursor, table_name)
+ cursor.execute(f"""
+ CREATE TABLE {table_name} (
+ datetime_col VARCHAR(33)
+ )
+ """)
cursor.execute(
- "CREATE TABLE #pytest_xml_invalid (id INT PRIMARY KEY IDENTITY(1,1), xml_col XML NULL);"
+ f"INSERT INTO {table_name} (datetime_col) VALUES ('prefix_2025-08-12T14:30:45_suffix')"
)
db_connection.commit()
- with pytest.raises(Exception):
- cursor.execute("INSERT INTO #pytest_xml_invalid (xml_col) VALUES (?);", INVALID_XML)
- finally:
- cursor.execute("DROP TABLE IF EXISTS #pytest_xml_invalid;")
- db_connection.commit()
+ datetime_str = "2025-08-12T14:30:45"
+ # This should fail because '2025-08-12T14:30:45' gets converted to TIMESTAMP type
+ # and SQL Server can't compare TIMESTAMP against VARCHAR with prefix/suffix
+ cursor.execute(
+ f"SELECT datetime_col FROM {table_name} WHERE datetime_col = ?",
+ (datetime_str,),
+ )
+ rows = cursor.fetchall()
-# ==================== CODE COVERAGE TEST CASES ====================
+ assert rows == [], f"Expected no match for datetime-like string, got {rows}"
+ except Exception as e:
+ pytest.fail(f"Datetime string parameter binding test failed: {e}")
+ finally:
+ drop_table_if_exists(cursor, table_name)
+ db_connection.commit()
-def test_decimal_special_values_coverage(cursor):
- """Test decimal processing with special values like NaN and Infinity (Lines 213-221)."""
- from decimal import Decimal
- # Test special decimal values that have string exponents
- test_values = [
- Decimal("NaN"), # Should have str exponent 'n'
- Decimal("Infinity"), # Should have str exponent 'F'
- Decimal("-Infinity"), # Should have str exponent 'F'
- ]
+# ---------------------------------------------------------
+# Test 1: Basic numeric insertion and fetch roundtrip
+# ---------------------------------------------------------
+@pytest.mark.parametrize(
+ "precision, scale, value",
+ [
+ (10, 2, decimal.Decimal("12345.67")),
+ (10, 4, decimal.Decimal("12.3456")),
+ (10, 0, decimal.Decimal("1234567890")),
+ ],
+)
+def test_numeric_basic_roundtrip(cursor, db_connection, precision, scale, value):
+ """Verify simple numeric values roundtrip correctly"""
+ table_name = f"#pytest_numeric_basic_{precision}_{scale}"
+ try:
+ cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC({precision}, {scale}))")
+ cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
+ db_connection.commit()
- for special_val in test_values:
- try:
- # This should trigger the special value handling path (lines 217-218)
- # But there's a bug in the code - it doesn't handle string exponents properly after line 218
- cursor._get_numeric_data(special_val)
- except (ValueError, TypeError) as e:
- # Expected - either ValueError for unsupported values or TypeError due to str/int comparison
- # This exercises the special value code path (lines 217-218) even though it errors later
- assert (
- "not supported" in str(e)
- or "Precision of the numeric value is too high" in str(e)
- or "'>' not supported between instances of 'str' and 'int'" in str(e)
- )
- except Exception as e:
- # Other exceptions are also acceptable as we're testing error paths
- pass
+ cursor.execute(f"SELECT val FROM {table_name}")
+ row = cursor.fetchone()
+ assert row is not None, "Expected one row to be returned"
+ fetched = row[0]
+ expected = value.quantize(decimal.Decimal(f"1e-{scale}")) if scale > 0 else value
+ assert fetched == expected, f"Expected {expected}, got {fetched}"
-def test_decimal_negative_exponent_edge_cases(cursor):
- """Test decimal processing with negative exponents (Lines 230-239)."""
- from decimal import Decimal
+ finally:
+ cursor.execute(f"DROP TABLE {table_name}")
+ db_connection.commit()
- # Test case where digits < abs(exponent) -> triggers lines 234-235
- # Example: 0.0001 -> digits=(1,), exponent=-4 -> precision=4, scale=4
- test_decimal = Decimal("0.0001") # digits=(1,), exponent=-4
+# ---------------------------------------------------------
+# Test 2: High precision numeric values (near SQL Server max)
+# ---------------------------------------------------------
+@pytest.mark.parametrize(
+ "value",
+ [
+ decimal.Decimal("99999999999999999999999999999999999999"), # 38 digits
+ decimal.Decimal("12345678901234567890.1234567890"), # high precision
+ ],
+)
+def test_numeric_high_precision_roundtrip(cursor, db_connection, value):
+ """Verify high-precision NUMERIC values roundtrip without precision loss"""
+ precision, scale = 38, max(0, -value.as_tuple().exponent)
+ table_name = "#pytest_numeric_high_precision"
try:
- cursor._get_numeric_data(test_decimal)
- except ValueError as e:
- # This is expected - the method should process it and potentially raise precision error
- pass
+ cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC({precision}, {scale}))")
+ cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
+ db_connection.commit()
+ cursor.execute(f"SELECT val FROM {table_name}")
+ row = cursor.fetchone()
+ assert row is not None
+ assert row[0] == value, f"High-precision roundtrip failed. Expected {value}, got {row[0]}"
-def test_decimal_string_conversion_edge_cases(cursor):
- """Test decimal string conversion edge cases (Lines 248-262)."""
- from decimal import Decimal
+ finally:
+ cursor.execute(f"DROP TABLE {table_name}")
+ db_connection.commit()
- # Test case 1: positive exponent (line 252)
- decimal_with_pos_exp = Decimal("123E2") # Should add zeros
- try:
- cursor._get_numeric_data(decimal_with_pos_exp)
- except ValueError:
- pass # Expected for large values
- # Test case 2: negative exponent with padding needed (line 255)
- decimal_with_neg_exp = Decimal("1E-10") # Should need zero padding
+# ---------------------------------------------------------
+# Test 3: Negative, zero, and small fractional values
+# ---------------------------------------------------------
+@pytest.mark.parametrize(
+ "value",
+ [
+ decimal.Decimal("-98765.43210"),
+ decimal.Decimal("-99999999999999999999.9999999999"),
+ decimal.Decimal("0"),
+ decimal.Decimal("0.00001"),
+ ],
+)
+def test_numeric_negative_and_small_values(cursor, db_connection, value):
+ precision, scale = 38, max(0, -value.as_tuple().exponent)
+ table_name = "#pytest_numeric_neg_small"
try:
- cursor._get_numeric_data(decimal_with_neg_exp)
- except ValueError:
- pass
-
- # Test case 3: empty string case (line 258)
- # This is harder to trigger directly, but the logic handles it
- zero_decimal = Decimal("0")
- cursor._get_numeric_data(zero_decimal)
-
-
-def test_decimal_precision_special_values_executemany(cursor):
- """Test _get_decimal_precision with special values (Lines 354-362)."""
- from decimal import Decimal
-
- # Test special values in executemany context
- test_values = [Decimal("NaN"), Decimal("Infinity"), Decimal("-Infinity")]
-
- for special_val in test_values:
- try:
- # This should trigger the special value handling (line 358)
- precision = cursor._get_decimal_precision(special_val)
- assert precision == 38 # Should return default precision
- except Exception:
- # Some special values might not be supported
- pass
-
-
-def test_cursor_close_connection_tracking_error(db_connection):
- """Test cursor close with connection tracking error (Lines 578-586)."""
-
- cursor = db_connection.cursor()
+ cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC({precision}, {scale}))")
+ cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
+ db_connection.commit()
- # Corrupt the connection's cursor tracking to cause error
- original_cursors = db_connection._cursors
+ cursor.execute(f"SELECT val FROM {table_name}")
+ row = cursor.fetchone()
+ assert row[0] == value, f"Expected {value}, got {row[0]}"
- # Replace with something that will cause an error on discard
- class ErrorSet:
- def discard(self, item):
- raise RuntimeError("Simulated cursor tracking error")
+ finally:
+ cursor.execute(f"DROP TABLE {table_name}")
+ db_connection.commit()
- db_connection._cursors = ErrorSet()
+# ---------------------------------------------------------
+# Test 4: NULL handling and multiple inserts
+# ---------------------------------------------------------
+def test_numeric_null_and_multiple_rows(cursor, db_connection):
+ table_name = "#pytest_numeric_nulls"
try:
- # This should trigger the exception handling in close() (line 582)
- cursor.close()
- # Should complete without raising the tracking error
- assert cursor.closed
- finally:
- # Restore original cursor tracking
- db_connection._cursors = original_cursors
+ cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC(20,5))")
+ values = [decimal.Decimal("123.45678"), None, decimal.Decimal("-999.99999")]
+ for v in values:
+ cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (v,))
+ db_connection.commit()
-def test_setinputsizes_validation_errors(cursor):
- """Test setinputsizes parameter validation (Lines 645-669)."""
- from mssql_python.constants import ConstantsDDBC
+ cursor.execute(f"SELECT val FROM {table_name} ORDER BY val ASC")
+ rows = [r[0] for r in cursor.fetchall()]
- # Test invalid column_size (lines 649-651)
- with pytest.raises(ValueError, match="Invalid column size"):
- cursor.setinputsizes([(ConstantsDDBC.SQL_VARCHAR.value, -1, 0)])
+ non_null_expected = sorted([v for v in values if v is not None])
+ non_null_actual = sorted([v for v in rows if v is not None])
- with pytest.raises(ValueError, match="Invalid column size"):
- cursor.setinputsizes([(ConstantsDDBC.SQL_VARCHAR.value, "invalid", 0)])
+ assert (
+ non_null_actual == non_null_expected
+ ), f"Expected {non_null_expected}, got {non_null_actual}"
+ assert any(r is None for r in rows), "Expected one NULL value in result set"
- # Test invalid decimal_digits (lines 654-656)
- with pytest.raises(ValueError, match="Invalid decimal digits"):
- cursor.setinputsizes([(ConstantsDDBC.SQL_DECIMAL.value, 10, -1)])
+ finally:
+ cursor.execute(f"DROP TABLE {table_name}")
+ db_connection.commit()
- with pytest.raises(ValueError, match="Invalid decimal digits"):
- cursor.setinputsizes([(ConstantsDDBC.SQL_DECIMAL.value, 10, "invalid")])
- # Test invalid SQL type (lines 665-667)
- with pytest.raises(ValueError, match="Invalid SQL type"):
- cursor.setinputsizes([99999]) # Invalid SQL type constant
+# ---------------------------------------------------------
+# Test 5: Boundary precision values (max precision / scale)
+# ---------------------------------------------------------
+def test_numeric_boundary_precision(cursor, db_connection):
+ table_name = "#pytest_numeric_boundary"
+ precision, scale = 38, 37
+ value = decimal.Decimal("0." + "9" * 37) # 0.999... up to 37 digits
+ try:
+ cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC({precision},{scale}))")
+ cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
+ db_connection.commit()
- with pytest.raises(ValueError, match="Invalid SQL type"):
- cursor.setinputsizes(["invalid"]) # Non-integer SQL type
+ cursor.execute(f"SELECT val FROM {table_name}")
+ row = cursor.fetchone()
+ assert row[0] == value, f"Boundary precision mismatch: expected {value}, got {row[0]}"
+ finally:
+ cursor.execute(f"DROP TABLE {table_name}")
+ db_connection.commit()
-def test_executemany_decimal_column_size_adjustment(cursor, db_connection):
- """Test executemany decimal column size adjustment (Lines 739-747)."""
+# ---------------------------------------------------------
+# Test 6: Precision/scale positive exponent (corner case)
+# ---------------------------------------------------------
+def test_numeric_precision_scale_positive_exponent(cursor, db_connection):
try:
- # Create table with decimal column
- cursor.execute("CREATE TABLE #test_decimal_adjust (id INT, decimal_col DECIMAL(38,10))")
-
- # Test with decimal parameters that should trigger column size adjustment
- params = [
- (1, decimal.Decimal("123.456")),
- (2, decimal.Decimal("999.999")),
- ]
-
- # This should trigger the decimal column size adjustment logic (lines 743-746)
- cursor.executemany(
- "INSERT INTO #test_decimal_adjust (id, decimal_col) VALUES (?, ?)", params
+ cursor.execute("CREATE TABLE #pytest_numeric_test (numeric_column DECIMAL(10, 2))")
+ db_connection.commit()
+ cursor.execute(
+ "INSERT INTO #pytest_numeric_test (numeric_column) VALUES (?)",
+ [decimal.Decimal("31400")],
)
+ db_connection.commit()
+ cursor.execute("SELECT numeric_column FROM #pytest_numeric_test")
+ row = cursor.fetchone()
+ assert row[0] == decimal.Decimal("31400"), "Numeric data parsing failed"
- # Verify data was inserted correctly
- cursor.execute("SELECT COUNT(*) FROM #test_decimal_adjust")
- count = cursor.fetchone()[0]
- assert count == 2
+ precision = 5
+ scale = 0
+ assert precision == 5, "Precision calculation failed"
+ assert scale == 0, "Scale calculation failed"
finally:
- cursor.execute("DROP TABLE IF EXISTS #test_decimal_adjust")
-
-
-def test_scroll_no_result_set_error(cursor):
- """Test scroll without active result set (Lines 906-914, 2207-2215)."""
-
- # Test decrement rownumber without result set (lines 910-913)
- cursor._rownumber = 5
- cursor._has_result_set = False
+ cursor.execute("DROP TABLE #pytest_numeric_test")
+ db_connection.commit()
- with pytest.raises(mssql_python.InterfaceError, match="Cannot decrement rownumber"):
- cursor._decrement_rownumber()
- # Test scroll without result set (lines 2211-2214)
- with pytest.raises(mssql_python.ProgrammingError, match="No active result set"):
- cursor.scroll(1)
+# ---------------------------------------------------------
+# Test 7: Precision/scale negative exponent (corner case)
+# ---------------------------------------------------------
+def test_numeric_precision_scale_negative_exponent(cursor, db_connection):
+ try:
+ cursor.execute("CREATE TABLE #pytest_numeric_test (numeric_column DECIMAL(10, 5))")
+ db_connection.commit()
+ cursor.execute(
+ "INSERT INTO #pytest_numeric_test (numeric_column) VALUES (?)",
+ [decimal.Decimal("0.03140")],
+ )
+ db_connection.commit()
+ cursor.execute("SELECT numeric_column FROM #pytest_numeric_test")
+ row = cursor.fetchone()
+ assert row[0] == decimal.Decimal("0.03140"), "Numeric data parsing failed"
+ precision = 5
+ scale = 5
+ assert precision == 5, "Precision calculation failed"
+ assert scale == 5, "Scale calculation failed"
-def test_timeout_setting_and_logging(cursor):
- """Test timeout setting with logging (Lines 1006-1014, 1678-1688)."""
+ finally:
+ cursor.execute("DROP TABLE #pytest_numeric_test")
+ db_connection.commit()
- # Test timeout setting in execute (lines 1010, 1682-1684)
- cursor.timeout = 30
+# ---------------------------------------------------------
+# Test 8: fetchmany for numeric values
+# ---------------------------------------------------------
+@pytest.mark.parametrize(
+ "values",
+ [[decimal.Decimal("11.11"), decimal.Decimal("22.22"), decimal.Decimal("33.33")]],
+)
+def test_numeric_fetchmany(cursor, db_connection, values):
+ table_name = "#pytest_numeric_fetchmany"
try:
- # This should trigger timeout setting and logging
- cursor.execute("SELECT 1")
- cursor.fetchall()
-
- # Test with executemany as well
- cursor.executemany("SELECT ?", [(1,), (2,)])
+ cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC(10,2))")
+ for v in values:
+ cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (v,))
+ db_connection.commit()
- except Exception:
- # Timeout setting might fail in some environments, which is okay
- # The important part is that we exercise the code path
- pass
+ cursor.execute(f"SELECT val FROM {table_name} ORDER BY val")
+ rows1 = cursor.fetchmany(2)
+ rows2 = cursor.fetchmany(2)
+ all_rows = [r[0] for r in rows1 + rows2]
+ assert all_rows == sorted(
+ values
+ ), f"fetchmany mismatch: expected {sorted(values)}, got {all_rows}"
-def test_column_description_validation(cursor):
- """Test column description validation (Lines 1116-1124)."""
+ finally:
+ cursor.execute(f"DROP TABLE {table_name}")
+ db_connection.commit()
- # Execute query to get column descriptions
- cursor.execute("SELECT CAST('test' AS NVARCHAR(50)) as col1, CAST(123 as INT) as col2")
- # The description should be populated and validated
- assert cursor.description is not None
- assert len(cursor.description) == 2
+# ---------------------------------------------------------
+# Test 9: executemany for numeric values
+# ---------------------------------------------------------
+@pytest.mark.parametrize(
+ "values",
+ [
+ [
+ decimal.Decimal("111.1111"),
+ decimal.Decimal("222.2222"),
+ decimal.Decimal("333.3333"),
+ ]
+ ],
+)
+def test_numeric_executemany(cursor, db_connection, values):
+ precision, scale = 38, 10
+ table_name = "#pytest_numeric_executemany"
+ try:
+ cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC({precision},{scale}))")
- # Each description should have 7 elements per PEP-249
- for desc in cursor.description:
- assert len(desc) == 7, f"Column description should have 7 elements, got {len(desc)}"
+ params = [(v,) for v in values]
+ cursor.executemany(f"INSERT INTO {table_name} (val) VALUES (?)", params)
+ db_connection.commit()
+ cursor.execute(f"SELECT val FROM {table_name} ORDER BY val")
+ rows = [r[0] for r in cursor.fetchall()]
+ assert rows == sorted(
+ values
+ ), f"executemany() mismatch: expected {sorted(values)}, got {rows}"
-def test_column_metadata_error_handling(cursor):
- """Test column metadata retrieval error handling (Lines 1156-1167)."""
+ finally:
+ cursor.execute(f"DROP TABLE {table_name}")
+ db_connection.commit()
- # Execute a complex query that might stress metadata retrieval
- cursor.execute("""
- SELECT
- CAST(1 as INT) as int_col,
- CAST('test' as NVARCHAR(100)) as nvarchar_col,
- CAST(NEWID() as UNIQUEIDENTIFIER) as guid_col
- """)
- # This should exercise the metadata retrieval code paths
- # If there are any errors, they should be logged but not crash
- description = cursor.description
- assert description is not None
- assert len(description) == 3
+# ---------------------------------------------------------
+# Test 10: Leading zeros precision loss
+# ---------------------------------------------------------
+@pytest.mark.parametrize(
+ "value, expected_precision, expected_scale",
+ [
+ # Leading zeros (using values that won't become scientific notation)
+ (decimal.Decimal("000000123.45"), 38, 2), # Leading zeros in integer part
+ (decimal.Decimal("000.0001234"), 38, 7), # Leading zeros in decimal part
+ (
+ decimal.Decimal("0000000000000.123456789"),
+ 38,
+ 9,
+ ), # Many leading zeros + decimal
+ (
+ decimal.Decimal("000000.000000123456"),
+ 38,
+ 12,
+ ), # Lots of leading zeros (avoiding E notation)
+ ],
+)
+def test_numeric_leading_zeros_precision_loss(
+ cursor, db_connection, value, expected_precision, expected_scale
+):
+ """Test precision loss with values containing lots of leading zeros"""
+ table_name = "#pytest_numeric_leading_zeros"
+ try:
+ # Use explicit precision and scale to avoid scientific notation issues
+ cursor.execute(
+ f"CREATE TABLE {table_name} (val NUMERIC({expected_precision}, {expected_scale}))"
+ )
+ cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
+ db_connection.commit()
+ cursor.execute(f"SELECT val FROM {table_name}")
+ row = cursor.fetchone()
+ assert row is not None, "Expected one row to be returned"
-def test_fetchone_column_mapping_coverage(cursor):
- """Test fetchone with specialized column mapping (Lines 1185-1215)."""
+ # Normalize both values to the same scale for comparison
+ expected = value.quantize(decimal.Decimal(f"1e-{expected_scale}"))
+ actual = row[0]
- # Execute query that should trigger specialized mapping
- cursor.execute("SELECT CAST(NEWID() as UNIQUEIDENTIFIER) as guid_col")
+ # Verify that leading zeros are handled correctly during conversion and roundtrip
+ assert (
+ actual == expected
+ ), f"Leading zeros precision loss for {value}, expected {expected}, got {actual}"
- # This should trigger the UUID column mapping logic and fetchone specialization
- row = cursor.fetchone()
- assert row is not None
+ finally:
+ try:
+ cursor.execute(f"DROP TABLE {table_name}")
+ db_connection.commit()
+ except Exception:
+ pass
- # Test fetchmany and fetchall as well
- cursor.execute(
- "SELECT CAST(NEWID() as UNIQUEIDENTIFIER) as guid_col UNION SELECT CAST(NEWID() as UNIQUEIDENTIFIER)"
- )
- # Test fetchmany (lines 1194-1200)
- rows = cursor.fetchmany(1)
- assert len(rows) == 1
+# ---------------------------------------------------------
+# Test 11: Extreme exponents precision loss
+# ---------------------------------------------------------
+@pytest.mark.parametrize(
+ "value, description",
+ [
+ (decimal.Decimal("1E-20"), "1E-20 exponent"),
+ (decimal.Decimal("1E-38"), "1E-38 exponent"),
+ (decimal.Decimal("5E-35"), "5E-35 exponent"),
+ (decimal.Decimal("9E-30"), "9E-30 exponent"),
+ (decimal.Decimal("2.5E-25"), "2.5E-25 exponent"),
+ ],
+)
+def test_numeric_extreme_exponents_precision_loss(cursor, db_connection, value, description):
+ """Test precision loss with values having extreme small magnitudes"""
+ # Scientific notation values like 1E-20 create scale > precision situations
+ # that violate SQL Server's NUMERIC(P,S) rules - this is expected behavior
- # Test fetchall (lines 1202-1208)
- cursor.execute(
- "SELECT CAST(NEWID() as UNIQUEIDENTIFIER) as guid_col UNION SELECT CAST(NEWID() as UNIQUEIDENTIFIER)"
- )
- rows = cursor.fetchall()
- assert len(rows) == 2
+ table_name = "#pytest_numeric_extreme_exp"
+ try:
+ # Try with a reasonable precision/scale that should handle most cases
+ cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC(38, 20))")
+ cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
+ db_connection.commit()
+ cursor.execute(f"SELECT val FROM {table_name}")
+ row = cursor.fetchone()
+ assert row is not None, "Expected one row to be returned"
-def test_foreignkeys_parameter_validation(cursor):
- """Test foreignkeys parameter validation (Lines 1365-1373)."""
+ # Verify the value was stored and retrieved
+ actual = row[0]
- # Test with both table and foreignTable as None (should raise error)
- with pytest.raises(
- mssql_python.ProgrammingError,
- match="Either table or foreignTable must be specified",
- ):
- cursor.foreignKeys(table=None, foreignTable=None)
+ # For extreme small values, check they're mathematically equivalent
+ assert abs(actual - value) < decimal.Decimal(
+ "1E-18"
+ ), f"Extreme exponent value not preserved for {description}: {value} -> {actual}"
+ finally:
+ try:
+ cursor.execute(f"DROP TABLE {table_name}")
+ db_connection.commit()
+ except Exception:
+ pass # Table might not exist if creation failed
-def test_tables_error_handling(cursor):
- """Test tables method error handling (Lines 2396-2404)."""
- # Call tables method - any errors should be logged and re-raised
+# ---------------------------------------------------------
+# Test 12: 38-digit precision boundary limits
+# ---------------------------------------------------------
+@pytest.mark.parametrize(
+ "value",
+ [
+ # 38 digits with negative exponent
+ decimal.Decimal("0." + "0" * 36 + "1"), # 38 digits total (1 + 37 decimal places)
+ # very large numbers at 38-digit limit
+ decimal.Decimal("9" * 38), # Maximum 38-digit integer
+ decimal.Decimal("1" + "0" * 37), # Large 38-digit number
+ # Additional boundary cases
+ decimal.Decimal("0." + "0" * 35 + "12"), # 37 total digits
+ decimal.Decimal("0." + "0" * 34 + "123"), # 36 total digits
+ decimal.Decimal("0." + "1" * 37), # All 1's in decimal part
+ decimal.Decimal("1." + "9" * 36), # Close to maximum with integer part
+ ],
+)
+def test_numeric_precision_boundary_limits(cursor, db_connection, value):
+ """Test precision loss with values close to the 38-digit precision limit"""
+ precision, scale = 38, 37 # Maximum precision with high scale
+ table_name = "#pytest_numeric_boundary_limits"
try:
- cursor.tables(catalog="invalid_catalog_that_does_not_exist_12345")
- # If this doesn't error, that's fine - we're testing the error handling path
- except Exception:
- # Expected - the error should be logged and re-raised (line 2400)
- pass
-
+ cursor.execute(f"CREATE TABLE {table_name} (val NUMERIC({precision}, {scale}))")
+ cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (value,))
+ db_connection.commit()
-def test_callproc_not_supported_error(cursor):
- """Test callproc NotSupportedError (Lines 2413-2421)."""
+ cursor.execute(f"SELECT val FROM {table_name}")
+ row = cursor.fetchone()
+ assert row is not None, "Expected one row to be returned"
- # This should always raise NotSupportedError (lines 2417-2420)
- with pytest.raises(mssql_python.NotSupportedError, match="callproc.*is not yet implemented"):
- cursor.callproc("test_proc")
+ # Ensure implementation behaves correctly even at the boundaries of SQL Server's maximum precision
+ assert row[0] == value, f"Boundary precision loss for {value}, got {row[0]}"
+ except Exception as e:
+ # Some boundary values might exceed SQL Server limits
+ pytest.skip(f"Value {value} may exceed SQL Server precision limits: {e}")
+ finally:
+ try:
+ cursor.execute(f"DROP TABLE {table_name}")
+ db_connection.commit()
+ except Exception:
+ pass # Table might not exist if creation failed
-def test_setoutputsize_no_op(cursor):
- """Test setoutputsize no-op behavior (Lines 2433-2438)."""
- # This should be a no-op (line 2437)
- cursor.setoutputsize(1000) # Should not raise any errors
- cursor.setoutputsize(1000, 1) # With column parameter
+# ---------------------------------------------------------
+# Test 13: Negative test - Values exceeding 38-digit precision limit
+# ---------------------------------------------------------
+@pytest.mark.parametrize(
+ "value, description",
+ [
+ (decimal.Decimal("1" + "0" * 38), "39 digits integer"), # 39 digits
+ (decimal.Decimal("9" * 39), "39 nines"), # 39 digits of 9s
+ (
+ decimal.Decimal("12345678901234567890123456789012345678901234567890"),
+ "50 digits",
+ ), # 50 digits
+ (
+ decimal.Decimal("0.111111111111111111111111111111111111111"),
+ "39 decimal places",
+ ), # 39 decimal digits
+ (
+ decimal.Decimal("1" * 20 + "." + "9" * 20),
+ "40 total digits",
+ ), # 40 total digits (20+20)
+ (
+ decimal.Decimal("123456789012345678901234567890.12345678901234567"),
+ "47 total digits",
+ ), # 47 total digits
+ ],
+)
+def test_numeric_beyond_38_digit_precision_negative(cursor, db_connection, value, description):
+ """
+ Negative test: Ensure proper error handling for values exceeding SQL Server's 38-digit precision limit.
+ After our precision validation fix, mssql-python should now gracefully reject values with precision > 38
+ by raising a ValueError with a clear message, matching pyodbc behavior.
+ """
+ # These values should be rejected by our precision validation
+ with pytest.raises(ValueError) as exc_info:
+ cursor.execute("SELECT ?", (value,))
-def test_cursor_del_cleanup_basic(db_connection):
- """Test cursor cleanup and __del__ method existence (Lines 2186-2194)."""
+ error_msg = str(exc_info.value)
+ assert (
+ "Precision of the numeric value is too high" in error_msg
+ ), f"Expected precision error message for {description}, got: {error_msg}"
+ assert (
+ "maximum precision supported by SQL Server is 38" in error_msg
+ ), f"Expected SQL Server precision limit message for {description}, got: {error_msg}"
- # Test that cursor has __del__ method and basic cleanup
- cursor = db_connection.cursor()
- # Test that __del__ method exists
- assert hasattr(cursor, "__del__"), "Cursor should have __del__ method"
+@pytest.mark.parametrize(
+ "values, description",
+ [
+ # Small decimal values with scientific notation
+ (
+ [
+ decimal.Decimal("0.70000000000696"),
+ decimal.Decimal("1E-7"),
+ decimal.Decimal("0.00001"),
+ decimal.Decimal("6.96E-12"),
+ ],
+ "Small decimals with scientific notation",
+ ),
+ # Large decimal values with scientific notation
+ (
+ [
+ decimal.Decimal("4E+8"),
+ decimal.Decimal("1.521E+15"),
+ decimal.Decimal("5.748E+18"),
+ decimal.Decimal("1E+11"),
+ ],
+ "Large decimals with positive exponents",
+ ),
+ # Medium-sized decimals
+ (
+ [
+ decimal.Decimal("123.456"),
+ decimal.Decimal("9999.9999"),
+ decimal.Decimal("1000000.50"),
+ ],
+ "Medium-sized decimals",
+ ),
+ ],
+)
+def test_decimal_scientific_notation_to_varchar(cursor, db_connection, values, description):
+ """
+ Test that Decimal values with scientific notation are properly converted
+ to VARCHAR without triggering 'varchar to numeric' conversion errors.
+ This verifies that the driver correctly handles Decimal to VARCHAR conversion
+ """
+ table_name = "#pytest_decimal_varchar_conversion"
+ try:
+ cursor.execute(f"CREATE TABLE {table_name} (id INT IDENTITY(1,1), val VARCHAR(50))")
- # Close cursor normally
- cursor.close()
- assert cursor.closed, "Cursor should be closed"
+ for val in values:
+ cursor.execute(f"INSERT INTO {table_name} (val) VALUES (?)", (val,))
+ db_connection.commit()
- # Force garbage collection to potentially trigger __del__ cleanup paths
- import gc
+ cursor.execute(f"SELECT val FROM {table_name} ORDER BY id")
+ rows = cursor.fetchall()
- gc.collect()
+ assert len(rows) == len(values), f"Expected {len(values)} rows, got {len(rows)}"
+ for i, (row, expected_val) in enumerate(zip(rows, values)):
+ stored_val = decimal.Decimal(row[0])
+ assert (
+ stored_val == expected_val
+ ), f"{description}: Row {i} mismatch - expected {expected_val}, got {stored_val}"
-def test_scroll_invalid_parameters(cursor):
- """Test scroll with invalid parameters."""
+ finally:
+ try:
+ cursor.execute(f"DROP TABLE {table_name}")
+ db_connection.commit()
+ except Exception:
+ pass
- cursor.execute("SELECT 1")
- # Test invalid mode
- with pytest.raises(mssql_python.ProgrammingError, match="Invalid scroll mode"):
- cursor.scroll(1, mode="invalid")
+SMALL_XML = "- 1
"
+LARGE_XML = "" + "".join(f"- {i}
" for i in range(10000)) + ""
+EMPTY_XML = ""
+INVALID_XML = " " # malformed
- # Test non-integer value
- with pytest.raises(mssql_python.ProgrammingError, match="value must be an integer"):
- cursor.scroll("invalid")
+def test_xml_basic_insert_fetch(cursor, db_connection):
+ """Test insert and fetch of a small XML value."""
+ try:
+ cursor.execute(
+ "CREATE TABLE #pytest_xml_basic (id INT PRIMARY KEY IDENTITY(1,1), xml_col XML NULL);"
+ )
+ db_connection.commit()
-def test_row_uuid_processing_with_braces(cursor, db_connection):
- """Test Row UUID processing with braced GUID strings (Lines 95-103)."""
+ cursor.execute("INSERT INTO #pytest_xml_basic (xml_col) VALUES (?);", SMALL_XML)
+ db_connection.commit()
- try:
- # Drop table if exists
- drop_table_if_exists(cursor, "#pytest_uuid_braces")
+ row = cursor.execute("SELECT xml_col FROM #pytest_xml_basic;").fetchone()
+ assert row[0] == SMALL_XML
+ finally:
+ cursor.execute("DROP TABLE IF EXISTS #pytest_xml_basic;")
+ db_connection.commit()
- # Create table with UNIQUEIDENTIFIER column
- cursor.execute("""
- CREATE TABLE #pytest_uuid_braces (
- id INT IDENTITY(1,1),
- guid_col UNIQUEIDENTIFIER
- )
- """)
- # Insert a GUID with braces (this is how SQL Server often returns them)
- test_guid = "12345678-1234-5678-9ABC-123456789ABC"
- cursor.execute("INSERT INTO #pytest_uuid_braces (guid_col) VALUES (?)", [test_guid])
+def test_xml_empty_and_null(cursor, db_connection):
+ """Test insert and fetch of empty XML and NULL values."""
+ try:
+ cursor.execute(
+ "CREATE TABLE #pytest_xml_empty_null (id INT PRIMARY KEY IDENTITY(1,1), xml_col XML NULL);"
+ )
db_connection.commit()
- # Configure native_uuid=True to trigger UUID processing
- original_setting = None
- if hasattr(cursor.connection, "_settings") and "native_uuid" in cursor.connection._settings:
- original_setting = cursor.connection._settings["native_uuid"]
- cursor.connection._settings["native_uuid"] = True
+ cursor.execute("INSERT INTO #pytest_xml_empty_null (xml_col) VALUES (?);", EMPTY_XML)
+ cursor.execute("INSERT INTO #pytest_xml_empty_null (xml_col) VALUES (?);", None)
+ db_connection.commit()
- # Fetch the data - this should trigger lines 95-103 in row.py
- cursor.execute("SELECT guid_col FROM #pytest_uuid_braces")
- row = cursor.fetchone()
+ rows = [
+ r[0]
+ for r in cursor.execute(
+ "SELECT xml_col FROM #pytest_xml_empty_null ORDER BY id;"
+ ).fetchall()
+ ]
+ assert rows[0] == EMPTY_XML
+ assert rows[1] is None
+ finally:
+ cursor.execute("DROP TABLE IF EXISTS #pytest_xml_empty_null;")
+ db_connection.commit()
- # The Row class should process the GUID and convert it to UUID object
- # Line 99: clean_value = value.strip("{}")
- # Line 100: processed_values[i] = uuid.UUID(clean_value)
- assert row is not None, "Should return a row"
- # The GUID should be processed correctly regardless of brace format
- guid_value = row[0]
+def test_xml_large_insert(cursor, db_connection):
+ """Test insert and fetch of a large XML value to verify streaming/DAE."""
+ try:
+ cursor.execute(
+ "CREATE TABLE #pytest_xml_large (id INT PRIMARY KEY IDENTITY(1,1), xml_col XML NULL);"
+ )
+ db_connection.commit()
- # Restore original setting
- if original_setting is not None and hasattr(cursor.connection, "_settings"):
- cursor.connection._settings["native_uuid"] = original_setting
+ cursor.execute("INSERT INTO #pytest_xml_large (xml_col) VALUES (?);", LARGE_XML)
+ db_connection.commit()
- except Exception as e:
- pytest.fail(f"UUID processing with braces test failed: {e}")
+ row = cursor.execute("SELECT xml_col FROM #pytest_xml_large;").fetchone()
+ assert row[0] == LARGE_XML
finally:
- drop_table_if_exists(cursor, "#pytest_uuid_braces")
+ cursor.execute("DROP TABLE IF EXISTS #pytest_xml_large;")
db_connection.commit()
-def test_row_uuid_processing_sql_guid_type(cursor, db_connection):
- """Test Row UUID processing with SQL_GUID type detection (Lines 111-119)."""
-
+def test_xml_batch_insert(cursor, db_connection):
+ """Test batch insert (executemany) of multiple XML values."""
try:
- # Drop table if exists
- drop_table_if_exists(cursor, "#pytest_sql_guid_type")
+ cursor.execute(
+ "CREATE TABLE #pytest_xml_batch (id INT PRIMARY KEY IDENTITY(1,1), xml_col XML NULL);"
+ )
+ db_connection.commit()
- # Create table with UNIQUEIDENTIFIER column
- cursor.execute("""
- CREATE TABLE #pytest_sql_guid_type (
- id INT,
- guid_col UNIQUEIDENTIFIER
- )
- """)
+ xmls = [f"- {i}
" for i in range(5)]
+ cursor.executemany(
+ "INSERT INTO #pytest_xml_batch (xml_col) VALUES (?);", [(x,) for x in xmls]
+ )
+ db_connection.commit()
- # Insert test data
- test_guid = "ABCDEF12-3456-7890-ABCD-1234567890AB"
+ rows = [
+ r[0]
+ for r in cursor.execute("SELECT xml_col FROM #pytest_xml_batch ORDER BY id;").fetchall()
+ ]
+ assert rows == xmls
+ finally:
+ cursor.execute("DROP TABLE IF EXISTS #pytest_xml_batch;")
+ db_connection.commit()
+
+
+def test_xml_malformed_input(cursor, db_connection):
+ """Verify driver raises error for invalid XML input."""
+ try:
cursor.execute(
- "INSERT INTO #pytest_sql_guid_type (id, guid_col) VALUES (?, ?)",
- [1, test_guid],
+ "CREATE TABLE #pytest_xml_invalid (id INT PRIMARY KEY IDENTITY(1,1), xml_col XML NULL);"
)
db_connection.commit()
- # Configure native_uuid=True to trigger UUID processing
- original_setting = None
- if hasattr(cursor.connection, "_settings") and "native_uuid" in cursor.connection._settings:
- original_setting = cursor.connection._settings["native_uuid"]
- cursor.connection._settings["native_uuid"] = True
+ with pytest.raises(Exception):
+ cursor.execute("INSERT INTO #pytest_xml_invalid (xml_col) VALUES (?);", INVALID_XML)
+ finally:
+ cursor.execute("DROP TABLE IF EXISTS #pytest_xml_invalid;")
+ db_connection.commit()
- # Fetch the data - this should trigger lines 111-119 in row.py
- cursor.execute("SELECT id, guid_col FROM #pytest_sql_guid_type")
- row = cursor.fetchone()
- # Line 111: sql_type = description[i][1]
- # Line 112: if sql_type == -11: # SQL_GUID
- # Line 115: processed_values[i] = uuid.UUID(value.strip("{}"))
- assert row is not None, "Should return a row"
- assert row[0] == 1, "ID should be 1"
+# ==================== CODE COVERAGE TEST CASES ====================
- # The GUID column should be processed
- guid_value = row[1]
- # Restore original setting
- if original_setting is not None and hasattr(cursor.connection, "_settings"):
- cursor.connection._settings["native_uuid"] = original_setting
+def test_decimal_special_values_coverage(cursor):
+ """Test decimal processing with special values like NaN and Infinity (Lines 213-221)."""
+ from decimal import Decimal
- except Exception as e:
- pytest.fail(f"UUID processing SQL_GUID type test failed: {e}")
- finally:
- drop_table_if_exists(cursor, "#pytest_sql_guid_type")
- db_connection.commit()
+ # Test special decimal values that have string exponents
+ test_values = [
+ Decimal("NaN"), # Should have str exponent 'n'
+ Decimal("Infinity"), # Should have str exponent 'F'
+ Decimal("-Infinity"), # Should have str exponent 'F'
+ ]
+
+ for special_val in test_values:
+ try:
+ # This should trigger the special value handling path (lines 217-218)
+ # But there's a bug in the code - it doesn't handle string exponents properly after line 218
+ cursor._get_numeric_data(special_val)
+ except (ValueError, TypeError) as e:
+ # Expected - either ValueError for unsupported values or TypeError due to str/int comparison
+ # This exercises the special value code path (lines 217-218) even though it errors later
+ assert (
+ "not supported" in str(e)
+ or "Precision of the numeric value is too high" in str(e)
+ or "'>' not supported between instances of 'str' and 'int'" in str(e)
+ )
+ except Exception as e:
+ # Other exceptions are also acceptable as we're testing error paths
+ pass
-def test_row_output_converter_overflow_error(cursor, db_connection):
- """Test Row output converter OverflowError handling (Lines 186-195)."""
+def test_decimal_negative_exponent_edge_cases(cursor):
+ """Test decimal processing with negative exponents (Lines 230-239)."""
+ from decimal import Decimal
+
+ # Test case where digits < abs(exponent) -> triggers lines 234-235
+ # Example: 0.0001 -> digits=(1,), exponent=-4 -> precision=4, scale=4
+ test_decimal = Decimal("0.0001") # digits=(1,), exponent=-4
try:
- # Create a table with integer column
- drop_table_if_exists(cursor, "#pytest_overflow_test")
- cursor.execute("""
- CREATE TABLE #pytest_overflow_test (
- id INT,
- small_int TINYINT -- TINYINT can only hold 0-255
- )
- """)
+ cursor._get_numeric_data(test_decimal)
+ except ValueError as e:
+ # This is expected - the method should process it and potentially raise precision error
+ pass
- # Insert a valid value first
- cursor.execute("INSERT INTO #pytest_overflow_test (id, small_int) VALUES (?, ?)", [1, 100])
- db_connection.commit()
- # Create a custom output converter that will cause OverflowError
- def problematic_converter(value):
- if isinstance(value, int) and value == 100:
- # This will cause an OverflowError when trying to convert to bytes
- # by simulating a value that's too large for the byte size
- raise OverflowError("int too big to convert to bytes")
- return value
+def test_decimal_string_conversion_edge_cases(cursor):
+ """Test decimal string conversion edge cases (Lines 248-262)."""
+ from decimal import Decimal
+
+ # Test case 1: positive exponent (line 252)
+ decimal_with_pos_exp = Decimal("123E2") # Should add zeros
+ try:
+ cursor._get_numeric_data(decimal_with_pos_exp)
+ except ValueError:
+ pass # Expected for large values
+
+ # Test case 2: negative exponent with padding needed (line 255)
+ decimal_with_neg_exp = Decimal("1E-10") # Should need zero padding
+ try:
+ cursor._get_numeric_data(decimal_with_neg_exp)
+ except ValueError:
+ pass
+
+ # Test case 3: empty string case (line 258)
+ # This is harder to trigger directly, but the logic handles it
+ zero_decimal = Decimal("0")
+ cursor._get_numeric_data(zero_decimal)
- # Add the converter to the connection (if supported)
- if hasattr(cursor.connection, "_output_converters"):
- # Create a converter that will trigger the overflow
- original_converters = getattr(cursor.connection, "_output_converters", {})
- cursor.connection._output_converters = {-6: problematic_converter} # TINYINT SQL type
- # Fetch the data - this should trigger lines 186-195 in row.py
- cursor.execute("SELECT id, small_int FROM #pytest_overflow_test")
- row = cursor.fetchone()
+def test_decimal_precision_special_values_executemany(cursor):
+ """Test _get_decimal_precision with special values (Lines 354-362)."""
+ from decimal import Decimal
- # Line 188: except OverflowError as e:
- # Lines 190-194: if hasattr(self._cursor, "log"): self._cursor.log(...)
- # Line 195: # Keep the original value in this case
- assert row is not None, "Should return a row"
- assert row[0] == 1, "ID should be 1"
+ # Test special values in executemany context
+ test_values = [Decimal("NaN"), Decimal("Infinity"), Decimal("-Infinity")]
- # The overflow should be handled and original value kept
- assert row[1] == 100, "Value should be kept as original due to overflow handling"
+ for special_val in test_values:
+ try:
+ # This should trigger the special value handling (line 358)
+ precision = cursor._get_decimal_precision(special_val)
+ assert precision == 38 # Should return default precision
+ except Exception:
+ # Some special values might not be supported
+ pass
- # Restore original converters
- if hasattr(cursor.connection, "_output_converters"):
- cursor.connection._output_converters = original_converters
- except Exception as e:
- pytest.fail(f"Output converter OverflowError test failed: {e}")
- finally:
- drop_table_if_exists(cursor, "#pytest_overflow_test")
- db_connection.commit()
+def test_cursor_close_connection_tracking_error(db_connection):
+ """Test cursor close with connection tracking error (Lines 578-586)."""
+ cursor = db_connection.cursor()
-def test_row_output_converter_general_exception(cursor, db_connection):
- """Test Row output converter general exception handling (Lines 198-206)."""
+ # Corrupt the connection's cursor tracking to cause error
+ original_cursors = db_connection._cursors
+
+ # Replace with something that will cause an error on discard
+ class ErrorSet:
+ def discard(self, item):
+ raise RuntimeError("Simulated cursor tracking error")
+
+ db_connection._cursors = ErrorSet()
try:
- # Create a table with string column
- drop_table_if_exists(cursor, "#pytest_exception_test")
- cursor.execute("""
- CREATE TABLE #pytest_exception_test (
- id INT,
- text_col VARCHAR(50)
- )
- """)
+ # This should trigger the exception handling in close() (line 582)
+ cursor.close()
+ # Should complete without raising the tracking error
+ assert cursor.closed
+ finally:
+ # Restore original cursor tracking
+ db_connection._cursors = original_cursors
- # Insert test data
- cursor.execute(
- "INSERT INTO #pytest_exception_test (id, text_col) VALUES (?, ?)",
- [1, "test_value"],
- )
- db_connection.commit()
- # Create a custom output converter that will raise a general exception
- def failing_converter(value):
- if value == "test_value":
- raise RuntimeError("Custom converter error for testing")
- return value
+def test_setinputsizes_validation_errors(cursor):
+ """Test setinputsizes parameter validation (Lines 645-669)."""
+ from mssql_python.constants import ConstantsDDBC
- # Add the converter to the connection (if supported)
- original_converters = {}
- if hasattr(cursor.connection, "_output_converters"):
- original_converters = getattr(cursor.connection, "_output_converters", {})
- cursor.connection._output_converters = {12: failing_converter} # VARCHAR SQL type
+ # Test invalid column_size (lines 649-651)
+ with pytest.raises(ValueError, match="Invalid column size"):
+ cursor.setinputsizes([(ConstantsDDBC.SQL_VARCHAR.value, -1, 0)])
- # Fetch the data - this should trigger lines 198-206 in row.py
- cursor.execute("SELECT id, text_col FROM #pytest_exception_test")
- row = cursor.fetchone()
+ with pytest.raises(ValueError, match="Invalid column size"):
+ cursor.setinputsizes([(ConstantsDDBC.SQL_VARCHAR.value, "invalid", 0)])
- # Line 199: except Exception as e:
- # Lines 201-205: if hasattr(self._cursor, "log"): self._cursor.log(...)
- # Line 206: # If conversion fails, keep the original value
- assert row is not None, "Should return a row"
- assert row[0] == 1, "ID should be 1"
+ # Test invalid decimal_digits (lines 654-656)
+ with pytest.raises(ValueError, match="Invalid decimal digits"):
+ cursor.setinputsizes([(ConstantsDDBC.SQL_DECIMAL.value, 10, -1)])
- # The exception should be handled and original value kept
- assert row[1] == "test_value", "Value should be kept as original due to exception handling"
+ with pytest.raises(ValueError, match="Invalid decimal digits"):
+ cursor.setinputsizes([(ConstantsDDBC.SQL_DECIMAL.value, 10, "invalid")])
- # Restore original converters
- if hasattr(cursor.connection, "_output_converters"):
- cursor.connection._output_converters = original_converters
+ # Test invalid SQL type (lines 665-667)
+ with pytest.raises(ValueError, match="Invalid SQL type"):
+ cursor.setinputsizes([99999]) # Invalid SQL type constant
- except Exception as e:
- pytest.fail(f"Output converter general exception test failed: {e}")
- finally:
- drop_table_if_exists(cursor, "#pytest_exception_test")
- db_connection.commit()
+ with pytest.raises(ValueError, match="Invalid SQL type"):
+ cursor.setinputsizes(["invalid"]) # Non-integer SQL type
-def test_row_cursor_log_method_availability(cursor, db_connection):
- """Test Row checking for cursor.log method availability (Lines 190, 201)."""
+def test_executemany_decimal_column_size_adjustment(cursor, db_connection):
+ """Test executemany decimal column size adjustment (Lines 739-747)."""
try:
- # Create test data
- drop_table_if_exists(cursor, "#pytest_log_check")
- cursor.execute("""
- CREATE TABLE #pytest_log_check (
- id INT,
- value_col INT
- )
- """)
-
- cursor.execute("INSERT INTO #pytest_log_check (id, value_col) VALUES (?, ?)", [1, 42])
- db_connection.commit()
+ # Create table with decimal column
+ cursor.execute("CREATE TABLE #test_decimal_adjust (id INT, decimal_col DECIMAL(38,10))")
- # Test that cursor has log method or doesn't have it
- # Lines 190 and 201: if hasattr(self._cursor, "log"):
- cursor.execute("SELECT id, value_col FROM #pytest_log_check")
- row = cursor.fetchone()
+ # Test with decimal parameters that should trigger column size adjustment
+ params = [
+ (1, decimal.Decimal("123.456")),
+ (2, decimal.Decimal("999.999")),
+ ]
- assert row is not None, "Should return a row"
- assert row[0] == 1, "ID should be 1"
- assert row[1] == 42, "Value should be 42"
+ # This should trigger the decimal column size adjustment logic (lines 743-746)
+ cursor.executemany(
+ "INSERT INTO #test_decimal_adjust (id, decimal_col) VALUES (?, ?)", params
+ )
- # The hasattr check should complete without error
- # This covers the conditional log method availability checks
+ # Verify data was inserted correctly
+ cursor.execute("SELECT COUNT(*) FROM #test_decimal_adjust")
+ count = cursor.fetchone()[0]
+ assert count == 2
- except Exception as e:
- pytest.fail(f"Cursor log method availability test failed: {e}")
finally:
- drop_table_if_exists(cursor, "#pytest_log_check")
- db_connection.commit()
+ cursor.execute("DROP TABLE IF EXISTS #test_decimal_adjust")
-def test_all_numeric_types_with_nulls(cursor, db_connection):
- """Test NULL handling for all numeric types to ensure processor functions handle NULLs correctly"""
- try:
- drop_table_if_exists(cursor, "#pytest_all_numeric_nulls")
- cursor.execute("""
- CREATE TABLE #pytest_all_numeric_nulls (
- int_col INT,
- bigint_col BIGINT,
- smallint_col SMALLINT,
- tinyint_col TINYINT,
- bit_col BIT,
- real_col REAL,
- float_col FLOAT
- )
- """)
- db_connection.commit()
+def test_scroll_no_result_set_error(cursor):
+ """Test scroll without active result set (Lines 906-914, 2207-2215)."""
- # Insert row with all NULLs
- cursor.execute(
- "INSERT INTO #pytest_all_numeric_nulls VALUES (NULL, NULL, NULL, NULL, NULL, NULL, NULL)"
- )
- # Insert row with actual values
- cursor.execute(
- "INSERT INTO #pytest_all_numeric_nulls VALUES (42, 9223372036854775807, 32767, 255, 1, 3.14, 2.718281828)"
- )
- db_connection.commit()
+ # Test decrement rownumber without result set (lines 910-913)
+ cursor._rownumber = 5
+ cursor._has_result_set = False
- cursor.execute("SELECT * FROM #pytest_all_numeric_nulls ORDER BY int_col ASC")
- rows = cursor.fetchall()
+ with pytest.raises(mssql_python.InterfaceError, match="Cannot decrement rownumber"):
+ cursor._decrement_rownumber()
- # First row should be all NULLs
- assert len(rows) == 2, "Should have exactly 2 rows"
- assert all(val is None for val in rows[0]), "First row should be all NULLs"
+ # Test scroll without result set (lines 2211-2214)
+ with pytest.raises(mssql_python.ProgrammingError, match="No active result set"):
+ cursor.scroll(1)
- # Second row should have actual values
- assert rows[1][0] == 42, "INT column should be 42"
- assert rows[1][1] == 9223372036854775807, "BIGINT column should match"
- assert rows[1][2] == 32767, "SMALLINT column should be 32767"
- assert rows[1][3] == 255, "TINYINT column should be 255"
- assert rows[1][4] == True, "BIT column should be True"
- assert abs(rows[1][5] - 3.14) < 0.01, "REAL column should be approximately 3.14"
- assert (
- abs(rows[1][6] - 2.718281828) < 0.0001
- ), "FLOAT column should be approximately 2.718281828"
- except Exception as e:
- pytest.fail(f"All numeric types NULL test failed: {e}")
- finally:
- drop_table_if_exists(cursor, "#pytest_all_numeric_nulls")
- db_connection.commit()
+def test_timeout_setting_and_logging(cursor):
+ """Test timeout setting with logging (Lines 1006-1014, 1678-1688)."""
+ # Test timeout setting in execute (lines 1010, 1682-1684)
+ cursor.timeout = 30
-def test_lob_data_types(cursor, db_connection):
- """Test LOB (Large Object) data types to ensure LOB fallback paths are exercised"""
try:
- drop_table_if_exists(cursor, "#pytest_lob_test")
- cursor.execute("""
- CREATE TABLE #pytest_lob_test (
- id INT,
- text_lob VARCHAR(MAX),
- ntext_lob NVARCHAR(MAX),
- binary_lob VARBINARY(MAX)
- )
- """)
- db_connection.commit()
+ # This should trigger timeout setting and logging
+ cursor.execute("SELECT 1")
+ cursor.fetchall()
+
+ # Test with executemany as well
+ cursor.executemany("SELECT ?", [(1,), (2,)])
+
+ except Exception:
+ # Timeout setting might fail in some environments, which is okay
+ # The important part is that we exercise the code path
+ pass
+
+
+def test_column_description_validation(cursor):
+ """Test column description validation (Lines 1116-1124)."""
+
+ # Execute query to get column descriptions
+ cursor.execute("SELECT CAST('test' AS NVARCHAR(50)) as col1, CAST(123 as INT) as col2")
+
+ # The description should be populated and validated
+ assert cursor.description is not None
+ assert len(cursor.description) == 2
- # Create large data that will trigger LOB handling
- large_text = "A" * 10000 # 10KB text
- large_ntext = "B" * 10000 # 10KB unicode text
- large_binary = b"\x01\x02\x03\x04" * 2500 # 10KB binary
+ # Each description should have 7 elements per PEP-249
+ for desc in cursor.description:
+ assert len(desc) == 7, f"Column description should have 7 elements, got {len(desc)}"
- cursor.execute(
- "INSERT INTO #pytest_lob_test VALUES (?, ?, ?, ?)",
- (1, large_text, large_ntext, large_binary),
- )
- db_connection.commit()
- cursor.execute("SELECT id, text_lob, ntext_lob, binary_lob FROM #pytest_lob_test")
- row = cursor.fetchone()
+def test_column_metadata_error_handling(cursor):
+ """Test column metadata retrieval error handling (Lines 1156-1167)."""
- assert row[0] == 1, "ID should be 1"
- assert row[1] == large_text, "VARCHAR(MAX) LOB data should match"
- assert row[2] == large_ntext, "NVARCHAR(MAX) LOB data should match"
- assert row[3] == large_binary, "VARBINARY(MAX) LOB data should match"
+ # Execute a complex query that might stress metadata retrieval
+ cursor.execute("""
+ SELECT
+ CAST(1 as INT) as int_col,
+ CAST('test' as NVARCHAR(100)) as nvarchar_col,
+ CAST(NEWID() as UNIQUEIDENTIFIER) as guid_col
+ """)
- except Exception as e:
- pytest.fail(f"LOB data types test failed: {e}")
- finally:
- drop_table_if_exists(cursor, "#pytest_lob_test")
- db_connection.commit()
+ # This should exercise the metadata retrieval code paths
+ # If there are any errors, they should be logged but not crash
+ description = cursor.description
+ assert description is not None
+ assert len(description) == 3
-def test_lob_char_column_types(cursor, db_connection):
- """Test LOB fetching specifically for CHAR/VARCHAR columns (covers lines 3313-3314)"""
- try:
- drop_table_if_exists(cursor, "#pytest_lob_char")
- cursor.execute("""
- CREATE TABLE #pytest_lob_char (
- id INT,
- char_lob VARCHAR(MAX)
- )
- """)
- db_connection.commit()
+def test_fetchone_column_mapping_coverage(cursor):
+ """Test fetchone with specialized column mapping (Lines 1185-1215)."""
- # Create data large enough to trigger LOB path (>8000 bytes)
- large_char_data = "X" * 20000 # 20KB text
+ # Execute query that should trigger specialized mapping
+ cursor.execute("SELECT CAST(NEWID() as UNIQUEIDENTIFIER) as guid_col")
- cursor.execute("INSERT INTO #pytest_lob_char VALUES (?, ?)", (1, large_char_data))
- db_connection.commit()
+ # This should trigger the UUID column mapping logic and fetchone specialization
+ row = cursor.fetchone()
+ assert row is not None
- cursor.execute("SELECT id, char_lob FROM #pytest_lob_char")
- row = cursor.fetchone()
+ # Test fetchmany and fetchall as well
+ cursor.execute(
+ "SELECT CAST(NEWID() as UNIQUEIDENTIFIER) as guid_col UNION SELECT CAST(NEWID() as UNIQUEIDENTIFIER)"
+ )
- assert row[0] == 1, "ID should be 1"
- assert row[1] == large_char_data, "VARCHAR(MAX) LOB data should match"
- assert len(row[1]) == 20000, "VARCHAR(MAX) should be 20000 chars"
+ # Test fetchmany (lines 1194-1200)
+ rows = cursor.fetchmany(1)
+ assert len(rows) == 1
- except Exception as e:
- pytest.fail(f"LOB CHAR column test failed: {e}")
- finally:
- drop_table_if_exists(cursor, "#pytest_lob_char")
- db_connection.commit()
+ # Test fetchall (lines 1202-1208)
+ cursor.execute(
+ "SELECT CAST(NEWID() as UNIQUEIDENTIFIER) as guid_col UNION SELECT CAST(NEWID() as UNIQUEIDENTIFIER)"
+ )
+ rows = cursor.fetchall()
+ assert len(rows) == 2
-def test_lob_wchar_column_types(cursor, db_connection):
- """Test LOB fetching specifically for WCHAR/NVARCHAR columns (covers lines 3358-3359)"""
+def test_foreignkeys_parameter_validation(cursor):
+ """Test foreignkeys parameter validation (Lines 1365-1373)."""
+
+ # Test with both table and foreignTable as None (should raise error)
+ with pytest.raises(
+ mssql_python.ProgrammingError,
+ match="Either table or foreignTable must be specified",
+ ):
+ cursor.foreignKeys(table=None, foreignTable=None)
+
+
+def test_tables_error_handling(cursor):
+ """Test tables method error handling (Lines 2396-2404)."""
+
+ # Call tables method - any errors should be logged and re-raised
try:
- drop_table_if_exists(cursor, "#pytest_lob_wchar")
- cursor.execute("""
- CREATE TABLE #pytest_lob_wchar (
- id INT,
- wchar_lob NVARCHAR(MAX)
- )
- """)
- db_connection.commit()
+ cursor.tables(catalog="invalid_catalog_that_does_not_exist_12345")
+ # If this doesn't error, that's fine - we're testing the error handling path
+ except Exception:
+ # Expected - the error should be logged and re-raised (line 2400)
+ pass
- # Create unicode data large enough to trigger LOB path (>4000 characters for NVARCHAR)
- large_wchar_data = "🔥" * 5000 + "Unicode™" * 1000 # Mix of emoji and special chars
- cursor.execute("INSERT INTO #pytest_lob_wchar VALUES (?, ?)", (1, large_wchar_data))
- db_connection.commit()
+def test_callproc_not_supported_error(cursor):
+ """Test callproc NotSupportedError (Lines 2413-2421)."""
- cursor.execute("SELECT id, wchar_lob FROM #pytest_lob_wchar")
- row = cursor.fetchone()
+ # This should always raise NotSupportedError (lines 2417-2420)
+ with pytest.raises(mssql_python.NotSupportedError, match="callproc.*is not yet implemented"):
+ cursor.callproc("test_proc")
- assert row[0] == 1, "ID should be 1"
- assert row[1] == large_wchar_data, "NVARCHAR(MAX) LOB data should match"
- assert "🔥" in row[1], "Should contain emoji characters"
- except Exception as e:
- pytest.fail(f"LOB WCHAR column test failed: {e}")
- finally:
- drop_table_if_exists(cursor, "#pytest_lob_wchar")
- db_connection.commit()
+def test_setoutputsize_no_op(cursor):
+ """Test setoutputsize no-op behavior (Lines 2433-2438)."""
+ # This should be a no-op (line 2437)
+ cursor.setoutputsize(1000) # Should not raise any errors
+ cursor.setoutputsize(1000, 1) # With column parameter
-def test_lob_binary_column_types(cursor, db_connection):
- """Test LOB fetching specifically for BINARY/VARBINARY columns (covers lines 3384-3385)"""
- try:
- drop_table_if_exists(cursor, "#pytest_lob_binary")
- cursor.execute("""
- CREATE TABLE #pytest_lob_binary (
- id INT,
- binary_lob VARBINARY(MAX)
- )
- """)
- db_connection.commit()
- # Create binary data large enough to trigger LOB path (>8000 bytes)
- large_binary_data = bytes(range(256)) * 100 # 25.6KB of varied binary data
+def test_cursor_del_cleanup_basic(db_connection):
+ """Test cursor cleanup and __del__ method existence (Lines 2186-2194)."""
- cursor.execute("INSERT INTO #pytest_lob_binary VALUES (?, ?)", (1, large_binary_data))
- db_connection.commit()
+ # Test that cursor has __del__ method and basic cleanup
+ cursor = db_connection.cursor()
- cursor.execute("SELECT id, binary_lob FROM #pytest_lob_binary")
- row = cursor.fetchone()
+ # Test that __del__ method exists
+ assert hasattr(cursor, "__del__"), "Cursor should have __del__ method"
- assert row[0] == 1, "ID should be 1"
- assert row[1] == large_binary_data, "VARBINARY(MAX) LOB data should match"
- assert len(row[1]) == 25600, "VARBINARY(MAX) should be 25600 bytes"
+ # Close cursor normally
+ cursor.close()
+ assert cursor.closed, "Cursor should be closed"
- except Exception as e:
- pytest.fail(f"LOB BINARY column test failed: {e}")
- finally:
- drop_table_if_exists(cursor, "#pytest_lob_binary")
- db_connection.commit()
+ # Force garbage collection to potentially trigger __del__ cleanup paths
+ import gc
+ gc.collect()
-def test_zero_length_complex_types(cursor, db_connection):
- """Test zero-length data for complex types (covers lines 3531-3533)"""
- try:
- drop_table_if_exists(cursor, "#pytest_zero_length")
- cursor.execute("""
- CREATE TABLE #pytest_zero_length (
- id INT,
- empty_varchar VARCHAR(100),
- empty_nvarchar NVARCHAR(100),
- empty_binary VARBINARY(100)
- )
- """)
- db_connection.commit()
- # Insert empty (non-NULL) values
- cursor.execute("INSERT INTO #pytest_zero_length VALUES (?, ?, ?, ?)", (1, "", "", b""))
- db_connection.commit()
+def test_scroll_invalid_parameters(cursor):
+ """Test scroll with invalid parameters."""
- cursor.execute(
- "SELECT id, empty_varchar, empty_nvarchar, empty_binary FROM #pytest_zero_length"
- )
- row = cursor.fetchone()
+ cursor.execute("SELECT 1")
- assert row[0] == 1, "ID should be 1"
- assert row[1] == "", "Empty VARCHAR should be empty string"
- assert row[2] == "", "Empty NVARCHAR should be empty string"
- assert row[3] == b"", "Empty VARBINARY should be empty bytes"
+ # Test invalid mode
+ with pytest.raises(mssql_python.ProgrammingError, match="Invalid scroll mode"):
+ cursor.scroll(1, mode="invalid")
- except Exception as e:
- pytest.fail(f"Zero-length complex types test failed: {e}")
- finally:
- drop_table_if_exists(cursor, "#pytest_zero_length")
- db_connection.commit()
+ # Test non-integer value
+ with pytest.raises(mssql_python.ProgrammingError, match="value must be an integer"):
+ cursor.scroll("invalid")
-def test_guid_with_nulls(cursor, db_connection):
- """Test GUID type with NULL values"""
+def test_row_uuid_processing_with_braces(cursor, db_connection):
+ """Test Row UUID processing with braced GUID strings (Lines 95-103)."""
+
try:
- drop_table_if_exists(cursor, "#pytest_guid_nulls")
+ # Drop table if exists
+ drop_table_if_exists(cursor, "#pytest_uuid_braces")
+
+ # Create table with UNIQUEIDENTIFIER column
cursor.execute("""
- CREATE TABLE #pytest_guid_nulls (
- id INT,
+ CREATE TABLE #pytest_uuid_braces (
+ id INT IDENTITY(1,1),
guid_col UNIQUEIDENTIFIER
)
- """)
- db_connection.commit()
+ """)
- # Insert NULL GUID
- cursor.execute("INSERT INTO #pytest_guid_nulls VALUES (1, NULL)")
- # Insert actual GUID
- cursor.execute("INSERT INTO #pytest_guid_nulls VALUES (2, NEWID())")
+ # Insert a GUID with braces (this is how SQL Server often returns them)
+ test_guid = "12345678-1234-5678-9ABC-123456789ABC"
+ cursor.execute("INSERT INTO #pytest_uuid_braces (guid_col) VALUES (?)", [test_guid])
db_connection.commit()
- cursor.execute("SELECT id, guid_col FROM #pytest_guid_nulls ORDER BY id")
- rows = cursor.fetchall()
+ # Configure native_uuid=True to trigger UUID processing
+ original_setting = None
+ if hasattr(cursor.connection, "_settings") and "native_uuid" in cursor.connection._settings:
+ original_setting = cursor.connection._settings["native_uuid"]
+ cursor.connection._settings["native_uuid"] = True
+
+ # Fetch the data - this should trigger lines 95-103 in row.py
+ cursor.execute("SELECT guid_col FROM #pytest_uuid_braces")
+ row = cursor.fetchone()
+
+ # The Row class should process the GUID and convert it to UUID object
+ # Line 99: clean_value = value.strip("{}")
+ # Line 100: processed_values[i] = uuid.UUID(clean_value)
+ assert row is not None, "Should return a row"
+
+ # The GUID should be processed correctly regardless of brace format
+ guid_value = row[0]
- assert len(rows) == 2, "Should have exactly 2 rows"
- assert rows[0][1] is None, "First GUID should be NULL"
- assert rows[1][1] is not None, "Second GUID should not be NULL"
+ # Restore original setting
+ if original_setting is not None and hasattr(cursor.connection, "_settings"):
+ cursor.connection._settings["native_uuid"] = original_setting
except Exception as e:
- pytest.fail(f"GUID with NULLs test failed: {e}")
+ pytest.fail(f"UUID processing with braces test failed: {e}")
finally:
- drop_table_if_exists(cursor, "#pytest_guid_nulls")
+ drop_table_if_exists(cursor, "#pytest_uuid_braces")
db_connection.commit()
-def test_datetimeoffset_with_nulls(cursor, db_connection):
- """Test DATETIMEOFFSET type with NULL values"""
+def test_row_uuid_processing_sql_guid_type(cursor, db_connection):
+ """Test Row UUID processing with SQL_GUID type detection (Lines 111-119)."""
+
try:
- drop_table_if_exists(cursor, "#pytest_dto_nulls")
+ # Drop table if exists
+ drop_table_if_exists(cursor, "#pytest_sql_guid_type")
+
+ # Create table with UNIQUEIDENTIFIER column
cursor.execute("""
- CREATE TABLE #pytest_dto_nulls (
+ CREATE TABLE #pytest_sql_guid_type (
id INT,
- dto_col DATETIMEOFFSET
+ guid_col UNIQUEIDENTIFIER
)
- """)
- db_connection.commit()
+ """)
- # Insert NULL DATETIMEOFFSET
- cursor.execute("INSERT INTO #pytest_dto_nulls VALUES (1, NULL)")
- # Insert actual DATETIMEOFFSET
- cursor.execute("INSERT INTO #pytest_dto_nulls VALUES (2, SYSDATETIMEOFFSET())")
+ # Insert test data
+ test_guid = "ABCDEF12-3456-7890-ABCD-1234567890AB"
+ cursor.execute(
+ "INSERT INTO #pytest_sql_guid_type (id, guid_col) VALUES (?, ?)",
+ [1, test_guid],
+ )
db_connection.commit()
- cursor.execute("SELECT id, dto_col FROM #pytest_dto_nulls ORDER BY id")
- rows = cursor.fetchall()
+ # Configure native_uuid=True to trigger UUID processing
+ original_setting = None
+ if hasattr(cursor.connection, "_settings") and "native_uuid" in cursor.connection._settings:
+ original_setting = cursor.connection._settings["native_uuid"]
+ cursor.connection._settings["native_uuid"] = True
- assert len(rows) == 2, "Should have exactly 2 rows"
- assert rows[0][1] is None, "First DATETIMEOFFSET should be NULL"
- assert rows[1][1] is not None, "Second DATETIMEOFFSET should not be NULL"
+ # Fetch the data - this should trigger lines 111-119 in row.py
+ cursor.execute("SELECT id, guid_col FROM #pytest_sql_guid_type")
+ row = cursor.fetchone()
+
+ # Line 111: sql_type = description[i][1]
+ # Line 112: if sql_type == -11: # SQL_GUID
+ # Line 115: processed_values[i] = uuid.UUID(value.strip("{}"))
+ assert row is not None, "Should return a row"
+ assert row[0] == 1, "ID should be 1"
+
+ # The GUID column should be processed
+ guid_value = row[1]
+
+ # Restore original setting
+ if original_setting is not None and hasattr(cursor.connection, "_settings"):
+ cursor.connection._settings["native_uuid"] = original_setting
except Exception as e:
- pytest.fail(f"DATETIMEOFFSET with NULLs test failed: {e}")
+ pytest.fail(f"UUID processing SQL_GUID type test failed: {e}")
finally:
- drop_table_if_exists(cursor, "#pytest_dto_nulls")
+ drop_table_if_exists(cursor, "#pytest_sql_guid_type")
db_connection.commit()
-def test_decimal_conversion_edge_cases(cursor, db_connection):
- """Test DECIMAL/NUMERIC type conversion including edge cases"""
+def test_row_output_converter_overflow_error(cursor, db_connection):
+ """Test Row output converter OverflowError handling (Lines 186-195)."""
+
try:
- drop_table_if_exists(cursor, "#pytest_decimal_edge")
+ # Create a table with integer column
+ drop_table_if_exists(cursor, "#pytest_overflow_test")
cursor.execute("""
- CREATE TABLE #pytest_decimal_edge (
+ CREATE TABLE #pytest_overflow_test (
id INT,
- dec_col DECIMAL(18, 4)
+ small_int TINYINT -- TINYINT can only hold 0-255
)
- """)
- db_connection.commit()
+ """)
- # Insert various decimal values including edge cases
- test_values = [
- (1, "123.4567"),
- (2, "0.0001"),
- (3, "-999999999999.9999"),
- (4, "999999999999.9999"),
- (5, "0.0000"),
- ]
+ # Insert a valid value first
+ cursor.execute("INSERT INTO #pytest_overflow_test (id, small_int) VALUES (?, ?)", [1, 100])
+ db_connection.commit()
- for id_val, dec_val in test_values:
- cursor.execute(
- "INSERT INTO #pytest_decimal_edge VALUES (?, ?)", (id_val, decimal.Decimal(dec_val))
- )
+ # Create a custom output converter that will cause OverflowError
+ def problematic_converter(value):
+ if isinstance(value, int) and value == 100:
+ # This will cause an OverflowError when trying to convert to bytes
+ # by simulating a value that's too large for the byte size
+ raise OverflowError("int too big to convert to bytes")
+ return value
- # Also insert NULL
- cursor.execute("INSERT INTO #pytest_decimal_edge VALUES (6, NULL)")
- db_connection.commit()
+ # Add the converter to the connection (if supported)
+ if hasattr(cursor.connection, "_output_converters"):
+ # Create a converter that will trigger the overflow
+ original_converters = getattr(cursor.connection, "_output_converters", {})
+ cursor.connection._output_converters = {-6: problematic_converter} # TINYINT SQL type
- cursor.execute("SELECT id, dec_col FROM #pytest_decimal_edge ORDER BY id")
- rows = cursor.fetchall()
+ # Fetch the data - this should trigger lines 186-195 in row.py
+ cursor.execute("SELECT id, small_int FROM #pytest_overflow_test")
+ row = cursor.fetchone()
- assert len(rows) == 6, "Should have exactly 6 rows"
+ # Line 188: except OverflowError as e:
+ # Lines 190-194: if hasattr(self._cursor, "log"): self._cursor.log(...)
+ # Line 195: # Keep the original value in this case
+ assert row is not None, "Should return a row"
+ assert row[0] == 1, "ID should be 1"
- # Verify the values
- for i, (id_val, expected_str) in enumerate(test_values):
- assert rows[i][0] == id_val, f"Row {i} ID should be {id_val}"
- assert rows[i][1] == decimal.Decimal(
- expected_str
- ), f"Row {i} decimal should match {expected_str}"
+ # The overflow should be handled and original value kept
+ assert row[1] == 100, "Value should be kept as original due to overflow handling"
- # Verify NULL
- assert rows[5][0] == 6, "Last row ID should be 6"
- assert rows[5][1] is None, "Last decimal should be NULL"
+ # Restore original converters
+ if hasattr(cursor.connection, "_output_converters"):
+ cursor.connection._output_converters = original_converters
except Exception as e:
- pytest.fail(f"Decimal conversion edge cases test failed: {e}")
+ pytest.fail(f"Output converter OverflowError test failed: {e}")
finally:
- drop_table_if_exists(cursor, "#pytest_decimal_edge")
+ drop_table_if_exists(cursor, "#pytest_overflow_test")
db_connection.commit()
-def test_fixed_length_char_type(cursor, db_connection):
- """Test SQL_CHAR (fixed-length CHAR) column processor path (Lines 3464-3467)"""
+def test_row_output_converter_general_exception(cursor, db_connection):
+ """Test Row output converter general exception handling (Lines 198-206)."""
+
try:
- cursor.execute("CREATE TABLE #pytest_char_test (id INT, char_col CHAR(10))")
- cursor.execute("INSERT INTO #pytest_char_test VALUES (1, 'hello')")
- cursor.execute("INSERT INTO #pytest_char_test VALUES (2, 'world')")
+ # Create a table with string column
+ drop_table_if_exists(cursor, "#pytest_exception_test")
+ cursor.execute("""
+ CREATE TABLE #pytest_exception_test (
+ id INT,
+ text_col VARCHAR(50)
+ )
+ """)
- cursor.execute("SELECT char_col FROM #pytest_char_test ORDER BY id")
- rows = cursor.fetchall()
+ # Insert test data
+ cursor.execute(
+ "INSERT INTO #pytest_exception_test (id, text_col) VALUES (?, ?)",
+ [1, "test_value"],
+ )
+ db_connection.commit()
- # CHAR pads with spaces to fixed length
- assert len(rows) == 2, "Should fetch 2 rows"
- assert rows[0][0].rstrip() == "hello", "First CHAR value should be 'hello'"
- assert rows[1][0].rstrip() == "world", "Second CHAR value should be 'world'"
+ # Create a custom output converter that will raise a general exception
+ def failing_converter(value):
+ if value == "test_value":
+ raise RuntimeError("Custom converter error for testing")
+ return value
- cursor.execute("DROP TABLE #pytest_char_test")
- except Exception as e:
- pytest.fail(f"Fixed-length CHAR test failed: {e}")
+ # Add the converter to the connection (if supported)
+ original_converters = {}
+ if hasattr(cursor.connection, "_output_converters"):
+ original_converters = getattr(cursor.connection, "_output_converters", {})
+ cursor.connection._output_converters = {12: failing_converter} # VARCHAR SQL type
+ # Fetch the data - this should trigger lines 198-206 in row.py
+ cursor.execute("SELECT id, text_col FROM #pytest_exception_test")
+ row = cursor.fetchone()
-def test_fixed_length_nchar_type(cursor, db_connection):
- """Test SQL_WCHAR (fixed-length NCHAR) column processor path (Lines 3469-3472)"""
- try:
- cursor.execute("CREATE TABLE #pytest_nchar_test (id INT, nchar_col NCHAR(10))")
- cursor.execute("INSERT INTO #pytest_nchar_test VALUES (1, N'hello')")
- cursor.execute("INSERT INTO #pytest_nchar_test VALUES (2, N'世界')") # Unicode test
+ # Line 199: except Exception as e:
+ # Lines 201-205: if hasattr(self._cursor, "log"): self._cursor.log(...)
+ # Line 206: # If conversion fails, keep the original value
+ assert row is not None, "Should return a row"
+ assert row[0] == 1, "ID should be 1"
- cursor.execute("SELECT nchar_col FROM #pytest_nchar_test ORDER BY id")
- rows = cursor.fetchall()
+ # The exception should be handled and original value kept
+ assert row[1] == "test_value", "Value should be kept as original due to exception handling"
- # NCHAR pads with spaces to fixed length
- assert len(rows) == 2, "Should fetch 2 rows"
- assert rows[0][0].rstrip() == "hello", "First NCHAR value should be 'hello'"
- assert rows[1][0].rstrip() == "世界", "Second NCHAR value should be '世界'"
+ # Restore original converters
+ if hasattr(cursor.connection, "_output_converters"):
+ cursor.connection._output_converters = original_converters
- cursor.execute("DROP TABLE #pytest_nchar_test")
except Exception as e:
- pytest.fail(f"Fixed-length NCHAR test failed: {e}")
+ pytest.fail(f"Output converter general exception test failed: {e}")
+ finally:
+ drop_table_if_exists(cursor, "#pytest_exception_test")
+ db_connection.commit()
-def test_fixed_length_binary_type(cursor, db_connection):
- """Test SQL_BINARY (fixed-length BINARY) column processor path (Lines 3474-3477)"""
+def test_row_cursor_log_method_availability(cursor, db_connection):
+ """Test Row checking for cursor.log method availability (Lines 190, 201)."""
+
try:
- cursor.execute("CREATE TABLE #pytest_binary_test (id INT, binary_col BINARY(8))")
- cursor.execute("INSERT INTO #pytest_binary_test VALUES (1, 0x0102030405)")
- cursor.execute("INSERT INTO #pytest_binary_test VALUES (2, 0xAABBCCDD)")
+ # Create test data
+ drop_table_if_exists(cursor, "#pytest_log_check")
+ cursor.execute("""
+ CREATE TABLE #pytest_log_check (
+ id INT,
+ value_col INT
+ )
+ """)
- cursor.execute("SELECT binary_col FROM #pytest_binary_test ORDER BY id")
- rows = cursor.fetchall()
+ cursor.execute("INSERT INTO #pytest_log_check (id, value_col) VALUES (?, ?)", [1, 42])
+ db_connection.commit()
- # BINARY pads with zeros to fixed length (8 bytes)
- assert len(rows) == 2, "Should fetch 2 rows"
- assert len(rows[0][0]) == 8, "BINARY(8) should be 8 bytes"
- assert len(rows[1][0]) == 8, "BINARY(8) should be 8 bytes"
- # First 5 bytes should match, rest padded with zeros
- assert (
- rows[0][0][:5] == b"\x01\x02\x03\x04\x05"
- ), "First BINARY value should start with inserted bytes"
- assert rows[0][0][5:] == b"\x00\x00\x00", "BINARY should be zero-padded"
+ # Test that cursor has log method or doesn't have it
+ # Lines 190 and 201: if hasattr(self._cursor, "log"):
+ cursor.execute("SELECT id, value_col FROM #pytest_log_check")
+ row = cursor.fetchone()
- cursor.execute("DROP TABLE #pytest_binary_test")
- except Exception as e:
- pytest.fail(f"Fixed-length BINARY test failed: {e}")
- # The hasattr check should complete without error
- # This covers the conditional log method availability checks
+ assert row is not None, "Should return a row"
+ assert row[0] == 1, "ID should be 1"
+ assert row[1] == 42, "Value should be 42"
+
+ # The hasattr check should complete without error
+ # This covers the conditional log method availability checks
except Exception as e:
pytest.fail(f"Cursor log method availability test failed: {e}")
@@ -15005,7 +13738,7 @@ def test_fetchall_with_integrity_constraint(cursor, db_connection):
# Cleanup
try:
cursor.execute("DROP TABLE IF EXISTS #uniq_cons_test")
- except:
+ except Exception:
pass