Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 43 additions & 0 deletions .github/workflows/diskquota-jdbc-integration.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
name: DiskQuota JDBC Integration

on:
push:
tags:
- "**"
pull_request:
paths:
- ".github/workflows/diskquota-jdbc-integration.yml"
- "pom.xml"
- "geowebcache/pom.xml"
- "geowebcache/core/**"
- "geowebcache/diskquota/**"

concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true

jobs:
testcontainers:
name: DiskQuota JDBC Testcontainers (Postgres + Oracle XE)
runs-on: ubuntu-latest
strategy:
matrix:
java-version: [ 17, 21 ]
steps:
- uses: actions/checkout@v6
- uses: actions/setup-java@v5
with:
distribution: 'temurin'
java-version: ${{ matrix.java-version }}
cache: 'maven'

- name: Tests against PostgreSQL and Oracle XE TestContainers
run: |
mvn verify -f geowebcache/pom.xml -pl :gwc-diskquota-jdbc -am \
-Ponline \
-DskipTests=true \
-DskipITs=false -B -ntp

- name: Remove SNAPSHOT jars from repository
run: |
find .m2/repository -name "*SNAPSHOT*" -type d | xargs rm -rf {}
64 changes: 64 additions & 0 deletions geowebcache/diskquota/jdbc/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,30 @@
<artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
<!-- testcontainers for integration tests against real DB engines; see *IT classes under src/test -->
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>postgresql</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>oracle-xe</artifactId>
<scope>test</scope>
</dependency>
<!-- Modern Oracle JDBC driver (the legacy ojdbc14 in the 'oracle' profile only loads when -Doracle is set
and is for Oracle 10g; the testcontainers IT uses the current driver against gvenzl/oracle-xe). -->
<dependency>
<groupId>com.oracle.database.jdbc</groupId>
<artifactId>ojdbc11</artifactId>
<version>23.4.0.24.05</version>
<scope>test</scope>
</dependency>
</dependencies>

<profiles>
Expand All @@ -76,5 +100,45 @@
</dependency>
</dependencies>
</profile>
<profile>
<!-- activates failsafe for the testcontainers-backed *IT tests under
src/test/java/org/geowebcache/diskquota/jdbc/tests/container/ -->
<id>online</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<forkCount>1</forkCount>
<reuseForks>false</reuseForks>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<!-- skips the testcontainers ITs (used by CI builds without Docker) -->
<id>excludeDockerTests</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<forkCount>1</forkCount>
<reuseForks>false</reuseForks>
<excludes>
<exclude>org.geowebcache.diskquota.jdbc.tests.container.*IT</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
*/
package org.geowebcache.diskquota.jdbc;

import java.util.Arrays;
import java.util.List;

/**
* HSQL dialect for the quota store
Expand All @@ -25,69 +25,44 @@ public HSQLDialect() {

TABLE_CREATION_MAP.put(
"TILESET",
Arrays.asList( //
"CREATE CACHED TABLE ${schema}TILESET (\n"
+ //
" KEY VARCHAR("
+ TILESET_KEY_SIZE
+ ") PRIMARY KEY,\n"
+ //
" LAYER_NAME VARCHAR("
+ LAYER_NAME_SIZE
+ "),\n"
+ //
" GRIDSET_ID VARCHAR("
+ GRIDSET_ID_SIZE
+ "),\n"
+ //
" BLOB_FORMAT VARCHAR("
+ BLOB_FORMAT_SIZE
+ "),\n"
+ //
" PARAMETERS_ID VARCHAR("
+ PARAMETERS_ID_SIZE
+ "),\n"
+ //
" BYTES NUMERIC("
+ BYTES_SIZE
+ ") DEFAULT 0 NOT NULL\n"
+ //
")", //
List.of( //
"""
CREATE CACHED TABLE ${schema}TILESET (
KEY VARCHAR(%d) PRIMARY KEY,
LAYER_NAME VARCHAR(%d),
GRIDSET_ID VARCHAR(%d),
BLOB_FORMAT VARCHAR(%d),
PARAMETERS_ID VARCHAR(%d),
BYTES NUMERIC(%d) DEFAULT 0 NOT NULL
)
"""
.formatted(
TILESET_KEY_SIZE,
LAYER_NAME_SIZE,
GRIDSET_ID_SIZE,
BLOB_FORMAT_SIZE,
PARAMETERS_ID_SIZE,
BYTES_SIZE), //
"CREATE INDEX TILESET_LAYER ON ${schema}TILESET(LAYER_NAME)" //
));

TABLE_CREATION_MAP.put(
"TILEPAGE",
Arrays.asList(
"CREATE CACHED TABLE ${schema}TILEPAGE (\n"
+ //
" KEY VARCHAR("
+ TILEPAGE_KEY_SIZE
+ ") PRIMARY KEY,\n"
+ //
" TILESET_ID VARCHAR("
+ TILESET_KEY_SIZE
+ ") REFERENCES ${schema}TILESET(KEY) ON DELETE CASCADE,\n"
+ //
" PAGE_Z SMALLINT,\n"
+ //
" PAGE_X INTEGER,\n"
+ //
" PAGE_Y INTEGER,\n"
+ //
" CREATION_TIME_MINUTES INTEGER,\n"
+ //
" FREQUENCY_OF_USE FLOAT,\n"
+ //
" LAST_ACCESS_TIME_MINUTES INTEGER,\n"
+ //
" FILL_FACTOR FLOAT,\n"
+ //
" NUM_HITS NUMERIC("
+ NUM_HITS_SIZE
+ ")\n"
+ //
")", //
List.of(
"""
CREATE CACHED TABLE ${schema}TILEPAGE (
KEY VARCHAR(%d) PRIMARY KEY,
TILESET_ID VARCHAR(%d) REFERENCES ${schema}TILESET(KEY) ON UPDATE CASCADE ON DELETE CASCADE,
PAGE_Z SMALLINT,
PAGE_X INTEGER,
PAGE_Y INTEGER,
CREATION_TIME_MINUTES INTEGER,
FREQUENCY_OF_USE FLOAT,
LAST_ACCESS_TIME_MINUTES INTEGER,
FILL_FACTOR FLOAT,
NUM_HITS NUMERIC(%d)
)"""
.formatted(TILEPAGE_KEY_SIZE, TILESET_KEY_SIZE, NUM_HITS_SIZE), //
"CREATE INDEX TILEPAGE_TILESET ON ${schema}TILEPAGE(TILESET_ID, FILL_FACTOR)",
"CREATE INDEX TILEPAGE_FREQUENCY ON ${schema}TILEPAGE(FREQUENCY_OF_USE DESC)",
"CREATE INDEX TILEPAGE_LAST_ACCESS ON ${schema}TILEPAGE(LAST_ACCESS_TIME_MINUTES DESC)"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
*/
package org.geowebcache.diskquota.jdbc;

import java.util.Arrays;
import java.util.List;

/**
Expand All @@ -34,69 +33,45 @@ static int numberPrecision(int n) {
public OracleDialect() {
TABLE_CREATION_MAP.put(
"TILESET",
Arrays.asList( //
"CREATE TABLE ${schema}TILESET (\n"
+ //
" KEY VARCHAR("
+ TILESET_KEY_SIZE
+ ") PRIMARY KEY,\n"
+ //
" LAYER_NAME VARCHAR("
+ LAYER_NAME_SIZE
+ "),\n"
+ //
" GRIDSET_ID VARCHAR("
+ GRIDSET_ID_SIZE
+ "),\n"
+ //
" BLOB_FORMAT VARCHAR("
+ BLOB_FORMAT_SIZE
+ "),\n"
+ //
" PARAMETERS_ID VARCHAR("
+ PARAMETERS_ID_SIZE
+ "),\n"
+ //
" BYTES NUMBER("
+ numberPrecision(BYTES_SIZE)
+ ") DEFAULT 0 NOT NULL\n"
+ //
") ORGANIZATION INDEX", //
List.of( //
"""
CREATE TABLE ${schema}TILESET (
KEY VARCHAR(%d) PRIMARY KEY,
LAYER_NAME VARCHAR(%d),
GRIDSET_ID VARCHAR(%d),
BLOB_FORMAT VARCHAR(%d),
PARAMETERS_ID VARCHAR(%d),
BYTES NUMBER(%d) DEFAULT 0 NOT NULL
) ORGANIZATION INDEX
"""
.formatted(
TILESET_KEY_SIZE,
LAYER_NAME_SIZE,
GRIDSET_ID_SIZE,
BLOB_FORMAT_SIZE,
PARAMETERS_ID_SIZE,
numberPrecision(BYTES_SIZE)), //
"CREATE INDEX TILESET_LAYER ON TILESET(LAYER_NAME)" //
));

TABLE_CREATION_MAP.put(
"TILEPAGE",
Arrays.asList(
"CREATE TABLE ${schema}TILEPAGE (\n"
+ //
" KEY VARCHAR("
+ TILEPAGE_KEY_SIZE
+ ") PRIMARY KEY,\n"
+ //
" TILESET_ID VARCHAR("
+ TILESET_KEY_SIZE
+ ") REFERENCES ${schema}TILESET(KEY) ON DELETE CASCADE,\n"
+ //
" PAGE_Z SMALLINT,\n"
+ //
" PAGE_X INTEGER,\n"
+ //
" PAGE_Y INTEGER,\n"
+ //
" CREATION_TIME_MINUTES INTEGER,\n"
+ //
" FREQUENCY_OF_USE FLOAT,\n"
+ //
" LAST_ACCESS_TIME_MINUTES INTEGER,\n"
+ //
" FILL_FACTOR FLOAT,\n"
+ //
" NUM_HITS NUMBER("
+ numberPrecision(NUM_HITS_SIZE)
+ ")\n"
+ //
") ORGANIZATION INDEX", //
List.of(
"""
CREATE TABLE ${schema}TILEPAGE (
KEY VARCHAR(%d) PRIMARY KEY,
TILESET_ID VARCHAR(%d) REFERENCES ${schema}TILESET(KEY) ON DELETE CASCADE,
PAGE_Z SMALLINT,
PAGE_X INTEGER,
PAGE_Y INTEGER,
CREATION_TIME_MINUTES INTEGER,
FREQUENCY_OF_USE FLOAT,
LAST_ACCESS_TIME_MINUTES INTEGER,
FILL_FACTOR FLOAT,
NUM_HITS NUMBER(%d)
) ORGANIZATION INDEX
"""
.formatted(TILEPAGE_KEY_SIZE, TILESET_KEY_SIZE, numberPrecision(NUM_HITS_SIZE)), //
"CREATE INDEX TILEPAGE_TILESET ON TILEPAGE(TILESET_ID)",
"CREATE INDEX TILEPAGE_FILL_FACTOR ON TILEPAGE(FILL_FACTOR)",
"CREATE INDEX TILEPAGE_FREQUENCY ON TILEPAGE(FREQUENCY_OF_USE DESC)",
Expand All @@ -108,6 +83,40 @@ protected void addEmtpyTableReference(StringBuilder sb) {
sb.append("FROM DUAL");
}

/**
* No-op: Oracle does not support {@code ON UPDATE CASCADE} on foreign keys, so there is nothing portable to
* migrate. Companion to {@link #getRenameLayerStatement(String, String, String)}, which preserves the legacy
* LAYER_NAME-only behavior on this dialect.
*/
@Override
public void migrateForeignKeys(String schema, SimpleJdbcTemplate template) {
// intentional no-op
}

/**
* Oracle does not support {@code ON UPDATE CASCADE} on foreign keys, so the {@code TILEPAGE.TILESET_ID -> TILESET
* .KEY} FK declared above only cascades on delete. As a result this dialect cannot safely rewrite {@code TILESET
* .KEY} during a rename without first dealing with the dangling {@code TILEPAGE} rows.
*
* <p>For now Oracle keeps the legacy behavior of only updating {@code LAYER_NAME}; lookups by id against the
* renamed layer will continue to miss the row and cause {@code getOrCreateTileSet} to insert duplicates. Fixing
* this on Oracle (e.g. via {@code DEFERRABLE INITIALLY DEFERRED} constraints, or by disabling the FK around the
* rename) is tracked separately.
*/
@Override
public String getRenameLayerStatement(String schema, String oldLayerName, String newLayerName) {
StringBuilder sb = new StringBuilder("UPDATE ");
if (schema != null) {
sb.append(schema).append(".");
}
sb.append("TILESET SET LAYER_NAME = :")
.append(newLayerName)
.append(" WHERE LAYER_NAME = :")
.append(oldLayerName);

return sb.toString();
}

@Override
public String getLeastFrequentlyUsedPage(String schema, List<String> layerParamNames) {
StringBuilder sb = new StringBuilder("SELECT * FROM (");
Expand Down
Loading
Loading