Skip to content
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,9 @@ endmacro()
# Tests
if(BUILD_TESTS)
enable_testing()
add_cloudsql_test(catalog_coverage_tests tests/catalog_coverage_tests.cpp)
add_cloudsql_test(transaction_coverage_tests tests/transaction_coverage_tests.cpp)
add_cloudsql_test(utils_coverage_tests tests/utils_coverage_tests.cpp)
add_cloudsql_test(cloudSQL_tests tests/cloudSQL_tests.cpp)
add_cloudsql_test(server_tests tests/server_tests.cpp)
add_cloudsql_test(statement_tests tests/statement_tests.cpp)
Expand Down
158 changes: 158 additions & 0 deletions tests/catalog_coverage_tests.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
/**
* @file catalog_coverage_tests.cpp
* @brief Targeted unit tests to increase coverage of the Catalog module
*/

#include <gtest/gtest.h>

#include <cstring>
#include <vector>

#include "catalog/catalog.hpp"
#include "common/value.hpp"
#include "distributed/raft_types.hpp"

using namespace cloudsql;

namespace {

/**
* @brief Tests Catalog behavior with missing entities and invalid lookups.
*/
TEST(CatalogCoverageTests, MissingEntities) {
auto catalog = Catalog::create();

// Invalid table lookup
EXPECT_FALSE(catalog->get_table(9999).has_value());
EXPECT_FALSE(catalog->table_exists(9999));
EXPECT_FALSE(catalog->table_exists_by_name("non_existent"));
EXPECT_FALSE(catalog->get_table_by_name("non_existent").has_value());

// Invalid index lookup
EXPECT_FALSE(catalog->get_index(8888).has_value());
EXPECT_TRUE(catalog->get_table_indexes(9999).empty());

// Dropping non-existent entities
EXPECT_FALSE(catalog->drop_table(9999));
EXPECT_FALSE(catalog->drop_index(8888));

// Update stats for non-existent table
EXPECT_FALSE(catalog->update_table_stats(9999, 100));
}

/**
* @brief Tests Catalog behavior with duplicate entities and creation edge cases.
*/
TEST(CatalogCoverageTests, DuplicateEntities) {
auto catalog = Catalog::create();
std::vector<ColumnInfo> cols = {{"id", common::ValueType::TYPE_INT64, 0}};

oid_t tid = catalog->create_table("test_table", cols);
ASSERT_NE(tid, 0);

// Duplicate table creation should throw
EXPECT_THROW(catalog->create_table("test_table", cols), std::runtime_error);

// Create an index
oid_t iid = catalog->create_index("idx_id", tid, {0}, IndexType::BTree, true);
ASSERT_NE(iid, 0);

// Duplicate index creation should throw
EXPECT_THROW(catalog->create_index("idx_id", tid, {0}, IndexType::BTree, true),
std::runtime_error);

// Creating index on missing table
EXPECT_EQ(catalog->create_index("idx_missing", 9999, {0}, IndexType::BTree, false), 0);
}

/**
* @brief Helper to serialize CreateTable command for Raft simulation
*/
std::vector<uint8_t> serialize_create_table(const std::string& name,
const std::vector<ColumnInfo>& columns) {
std::vector<uint8_t> data;
data.push_back(1); // Type 1

uint32_t name_len = name.size();
size_t off = data.size();
data.resize(off + 4 + name_len);
std::memcpy(data.data() + off, &name_len, 4);
std::memcpy(data.data() + off + 4, name.data(), name_len);

uint32_t col_count = columns.size();
off = data.size();
data.resize(off + 4);
std::memcpy(data.data() + off, &col_count, 4);

for (const auto& col : columns) {
uint32_t cname_len = col.name.size();
off = data.size();
data.resize(off + 4 + cname_len + 1 + 2);
std::memcpy(data.data() + off, &cname_len, 4);
std::memcpy(data.data() + off + 4, col.name.data(), cname_len);
data[off + 4 + cname_len] = static_cast<uint8_t>(col.type);
std::memcpy(data.data() + off + 4 + cname_len + 1, &col.position, 2);
}

uint32_t shard_count = 1;
off = data.size();
data.resize(off + 4);
std::memcpy(data.data() + off, &shard_count, 4);

std::string addr = "127.0.0.1";
uint32_t addr_len = addr.size();
uint32_t sid = 0;
uint16_t port = 6441;

off = data.size();
data.resize(off + 4 + addr_len + 4 + 2);
std::memcpy(data.data() + off, &addr_len, 4);
std::memcpy(data.data() + off + 4, addr.data(), addr_len);
std::memcpy(data.data() + off + 4 + addr_len, &sid, 4);
std::memcpy(data.data() + off + 4 + addr_len + 4, &port, 2);

return data;
}

/**
* @brief Tests the Raft state machine application (apply) in the Catalog.
*/
TEST(CatalogCoverageTests, RaftApply) {
auto catalog = Catalog::create();

// 1. Replay CreateTable
std::vector<ColumnInfo> cols = {{"id", common::ValueType::TYPE_INT64, 0}};
std::vector<uint8_t> create_data = serialize_create_table("raft_table", cols);

raft::LogEntry entry;
entry.term = 1;
entry.index = 1;
entry.data = create_data;

catalog->apply(entry);
EXPECT_TRUE(catalog->table_exists_by_name("raft_table"));

auto table_opt = catalog->get_table_by_name("raft_table");
ASSERT_TRUE(table_opt.has_value());
oid_t tid = (*table_opt)->table_id;

// 2. Replay DropTable
std::vector<uint8_t> drop_data;
drop_data.push_back(2); // Type 2
drop_data.resize(5);
std::memcpy(drop_data.data() + 1, &tid, 4);

entry.index = 2;
entry.data = drop_data;

catalog->apply(entry);
EXPECT_FALSE(catalog->table_exists(tid));
EXPECT_FALSE(catalog->table_exists_by_name("raft_table"));

// 3. Replay with empty data (should do nothing)
entry.index = 3;
entry.data.clear();
catalog->apply(entry);
}

} // namespace
171 changes: 171 additions & 0 deletions tests/transaction_coverage_tests.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
/**
* @file transaction_coverage_tests.cpp
* @brief Targeted unit tests to increase coverage of Transaction and Lock Manager
*/

#include <gtest/gtest.h>

#include <atomic>
#include <chrono>
#include <cstdio>
#include <thread>
#include <vector>

#include "catalog/catalog.hpp"
#include "common/config.hpp"
#include "storage/buffer_pool_manager.hpp"
#include "storage/heap_table.hpp"
#include "storage/storage_manager.hpp"
#include "transaction/lock_manager.hpp"
#include "transaction/transaction.hpp"
#include "transaction/transaction_manager.hpp"

using namespace cloudsql;
using namespace cloudsql::transaction;
using namespace cloudsql::storage;

namespace {

/**
* @class TransactionCoverageTests
* @brief Fixture for transaction-related coverage tests to ensure proper resource management.
*/
class TransactionCoverageTests : public ::testing::Test {
protected:
void SetUp() override {
catalog = Catalog::create();
disk_manager = std::make_unique<StorageManager>("./test_data");
bpm = std::make_unique<BufferPoolManager>(config::Config::DEFAULT_BUFFER_POOL_SIZE,
*disk_manager);
lm = std::make_unique<LockManager>();
tm = std::make_unique<TransactionManager>(*lm, *catalog, *bpm, nullptr);

std::vector<ColumnInfo> cols = {{"id", common::ValueType::TYPE_INT64, 0},
{"val", common::ValueType::TYPE_TEXT, 1}};
catalog->create_table("rollback_stress", cols);

executor::Schema schema;
schema.add_column("id", common::ValueType::TYPE_INT64);
schema.add_column("val", common::ValueType::TYPE_TEXT);

table = std::make_unique<HeapTable>("rollback_stress", *bpm, schema);
table->create();

txn = nullptr;
}

void TearDown() override {
if (txn != nullptr) {
tm->abort(txn);
}
table.reset();
tm.reset();
lm.reset();
bpm.reset();
disk_manager.reset();
catalog.reset();

static_cast<void>(std::remove("./test_data/rollback_stress.heap"));
}

std::unique_ptr<Catalog> catalog;
std::unique_ptr<StorageManager> disk_manager;
std::unique_ptr<BufferPoolManager> bpm;
std::unique_ptr<LockManager> lm;
std::unique_ptr<TransactionManager> tm;
std::unique_ptr<HeapTable> table;
Transaction* txn;
};

/**
* @brief Stress tests the LockManager with concurrent shared and exclusive requests.
*/
TEST(TransactionCoverageTestsStandalone, LockManagerConcurrency) {
LockManager lm;
const int num_readers = 5;
std::vector<std::thread> readers;
std::atomic<int> shared_granted{0};
std::atomic<bool> stop{false};

Transaction writer_txn(100);

// Writers holds exclusive lock initially
ASSERT_TRUE(lm.acquire_exclusive(&writer_txn, "RESOURCE"));

for (int i = 0; i < num_readers; ++i) {
readers.emplace_back([&, i]() {
Transaction reader_txn(i);
if (lm.acquire_shared(&reader_txn, "RESOURCE")) {
shared_granted++;
while (!stop) {
std::this_thread::yield();
}
lm.unlock(&reader_txn, "RESOURCE");
}
});
}

// Readers should be blocked by the writer
std::this_thread::sleep_for(std::chrono::milliseconds(200));
EXPECT_EQ(shared_granted.load(), 0);

// Release writer lock, readers should proceed
lm.unlock(&writer_txn, "RESOURCE");

// Wait for all readers to get the lock
for (int i = 0; i < 50 && shared_granted.load() < num_readers; ++i) {
std::this_thread::sleep_for(std::chrono::milliseconds(50));
}
EXPECT_EQ(shared_granted.load(), num_readers);

stop = true;
for (auto& t : readers) {
t.join();
}
}

/**
* @brief Tests deep rollback functionality via the Undo Log.
*/
TEST_F(TransactionCoverageTests, DeepRollback) {
txn = tm->begin();

// 1. Insert some data
auto rid1 = table->insert(
executor::Tuple({common::Value::make_int64(1), common::Value::make_text("A")}),
txn->get_id());
txn->add_undo_log(UndoLog::Type::INSERT, "rollback_stress", rid1);

auto rid2 = table->insert(
executor::Tuple({common::Value::make_int64(2), common::Value::make_text("B")}),
txn->get_id());
txn->add_undo_log(UndoLog::Type::INSERT, "rollback_stress", rid2);

// 2. Update data
table->remove(rid1, txn->get_id()); // Mark old version deleted
auto rid1_new = table->insert(
executor::Tuple({common::Value::make_int64(1), common::Value::make_text("A_NEW")}),
txn->get_id());
txn->add_undo_log(UndoLog::Type::UPDATE, "rollback_stress", rid1_new, rid1);

// 3. Delete data
table->remove(rid2, txn->get_id());
txn->add_undo_log(UndoLog::Type::DELETE, "rollback_stress", rid2);

EXPECT_EQ(table->tuple_count(), 1U); // rid1_new is active, rid1 and rid2 are logically deleted

// 4. Abort
tm->abort(txn);
txn = nullptr; // Marked as aborted and handled by TearDown if still set

// 5. Verify restoration
EXPECT_EQ(table->tuple_count(),
0U); // Inserted rows should be physically removed or logically invisible

// The table should be empty because we aborted the inserts
auto iter = table->scan();
executor::Tuple t;
EXPECT_FALSE(iter.next(t));
Comment on lines +141 to +176
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Seed committed rows if you want to verify restore semantics.

Because every tuple in this scenario is created by the transaction that later aborts, the final tuple_count() == 0 assertion only proves “abort returns the table to empty.” It does not make the restoration half of DELETE undo or the old_rid half of UPDATE undo observable. Seed committed rows first, or split this into a second test, then abort a transaction that updates/deletes them and assert the original rows are visible again.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@tests/transaction_coverage_tests.cpp` around lines 133 - 168, The test
currently creates all rows inside the same transaction that is later aborted, so
restore semantics for UPDATE.old_rid and DELETE undo aren’t exercised; modify
the test to seed one or more committed rows first (use a separate transaction
that calls table->insert and commits via the transaction manager before starting
the txn used for modifications), then in the transaction under test perform
table->remove, table->insert and txn->add_undo_log (including
UndoLog::Type::UPDATE with the old_rid and UndoLog::Type::DELETE), call
tm->abort(txn), and finally assert via table->tuple_count() and table->scan()
that the original committed rows are visible again; alternatively split into two
tests where one seeds committed rows and the other aborts modifications to them.

}

} // namespace
Loading