diff --git a/geowebcache/core/pom.xml b/geowebcache/core/pom.xml
index f846c2357..834e59324 100644
--- a/geowebcache/core/pom.xml
+++ b/geowebcache/core/pom.xml
@@ -145,16 +145,18 @@
- com.fasterxml.jackson.core
+ tools.jackson.corejackson-databind
+ 3.0.2com.fasterxml.jackson.corejackson-annotations
- com.fasterxml.jackson.core
+ tools.jackson.corejackson-core
+ 3.0.2org.apache.httpcomponents.client5
diff --git a/geowebcache/core/src/main/java/org/geowebcache/config/XMLFileResourceProvider.java b/geowebcache/core/src/main/java/org/geowebcache/config/XMLFileResourceProvider.java
index 2028ea951..03a0fbac2 100644
--- a/geowebcache/core/src/main/java/org/geowebcache/config/XMLFileResourceProvider.java
+++ b/geowebcache/core/src/main/java/org/geowebcache/config/XMLFileResourceProvider.java
@@ -32,8 +32,8 @@
import org.geowebcache.storage.DefaultStorageFinder;
import org.geowebcache.util.ApplicationContextProvider;
import org.geowebcache.util.GWCVars;
+import org.jspecify.annotations.NonNull;
import org.springframework.context.ApplicationContext;
-import org.springframework.lang.NonNull;
import org.springframework.web.context.WebApplicationContext;
/** Default implementation of ConfigurationResourceProvider that uses the file system. */
diff --git a/geowebcache/diskquota/jdbc/src/main/java/org/geowebcache/diskquota/jdbc/JDBCQuotaStore.java b/geowebcache/diskquota/jdbc/src/main/java/org/geowebcache/diskquota/jdbc/JDBCQuotaStore.java
index 7d5054cb0..bcd9cee3c 100644
--- a/geowebcache/diskquota/jdbc/src/main/java/org/geowebcache/diskquota/jdbc/JDBCQuotaStore.java
+++ b/geowebcache/diskquota/jdbc/src/main/java/org/geowebcache/diskquota/jdbc/JDBCQuotaStore.java
@@ -51,7 +51,6 @@
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.transaction.TransactionStatus;
import org.springframework.transaction.support.TransactionCallback;
-import org.springframework.transaction.support.TransactionCallbackWithoutResult;
import org.springframework.transaction.support.TransactionTemplate;
/**
@@ -134,37 +133,33 @@ public void initialize() {
throw new IllegalStateException(
"Please provide both the sql dialect and the data " + "source before calling inizialize");
}
- tt.execute(new TransactionCallbackWithoutResult() {
+ tt.executeWithoutResult(status -> {
- @Override
- protected void doInTransactionWithoutResult(TransactionStatus status) {
- // setup the tables if necessary
- dialect.initializeTables(schema, jt);
+ // setup the tables if necessary
+ dialect.initializeTables(schema, jt);
- // get the existing table names
- List existingLayers =
- jt.query(dialect.getAllLayersQuery(schema), (rs, rowNum) -> rs.getString(1));
+ // get the existing table names
+ List existingLayers = jt.query(dialect.getAllLayersQuery(schema), (rs, rowNum) -> rs.getString(1));
- // compare with the ones available in the config
- final Set layerNames = calculator.getLayerNames();
- final Set layersToDelete = new HashSet<>(existingLayers);
- layersToDelete.removeAll(layerNames);
+ // compare with the ones available in the config
+ final Set layerNames = calculator.getLayerNames();
+ final Set layersToDelete = new HashSet<>(existingLayers);
+ layersToDelete.removeAll(layerNames);
- // remove all the layers we don't need
- for (String layerName : layersToDelete) {
- deleteLayer(layerName);
- }
+ // remove all the layers we don't need
+ for (String layerName : layersToDelete) {
+ deleteLayer(layerName);
+ }
- // add any missing tileset
- for (String layerName : layerNames) {
- createLayerInternal(layerName);
- }
+ // add any missing tileset
+ for (String layerName : layerNames) {
+ createLayerInternal(layerName);
+ }
- // create the global quota if necessary
- Quota global = getUsedQuotaByTileSetIdInternal(GLOBAL_QUOTA_NAME);
- if (global == null) {
- createLayerInternal(GLOBAL_QUOTA_NAME);
- }
+ // create the global quota if necessary
+ Quota global = getUsedQuotaByTileSetIdInternal(GLOBAL_QUOTA_NAME);
+ if (global == null) {
+ createLayerInternal(GLOBAL_QUOTA_NAME);
}
});
}
@@ -175,21 +170,17 @@ public void createLayer(String layerName) throws InterruptedException {
}
private void createLayerInternal(final String layerName) {
- tt.execute(new TransactionCallbackWithoutResult() {
-
- @Override
- protected void doInTransactionWithoutResult(TransactionStatus status) {
- Set layerTileSets;
- if (!GLOBAL_QUOTA_NAME.equals(layerName)) {
- layerTileSets = calculator.getTileSetsFor(layerName);
- } else {
- layerTileSets = Collections.singleton(new TileSet(GLOBAL_QUOTA_NAME));
- }
- for (TileSet tset : layerTileSets) {
- // other nodes in the cluster might be trying to create the same layer,
- // so use getOrCreate
- getOrCreateTileSet(tset);
- }
+ tt.executeWithoutResult(status -> {
+ Set layerTileSets;
+ if (!GLOBAL_QUOTA_NAME.equals(layerName)) {
+ layerTileSets = calculator.getTileSetsFor(layerName);
+ } else {
+ layerTileSets = Collections.singleton(new TileSet(GLOBAL_QUOTA_NAME));
+ }
+ for (TileSet tset : layerTileSets) {
+ // other nodes in the cluster might be trying to create the same layer,
+ // so use getOrCreate
+ getOrCreateTileSet(tset);
}
});
}
@@ -259,77 +250,61 @@ private Quota nonNullQuota(Quota optionalQuota) {
@Override
public void deleteLayer(final String layerName) {
- tt.execute(new TransactionCallbackWithoutResult() {
-
- @Override
- protected void doInTransactionWithoutResult(TransactionStatus status) {
- deleteLayerInternal(layerName);
- }
+ tt.executeWithoutResult(status -> {
+ deleteLayerInternal(layerName);
});
}
@Override
public void deleteGridSubset(final String layerName, final String gridSetId) {
- tt.execute(new TransactionCallbackWithoutResult() {
-
- @Override
- protected void doInTransactionWithoutResult(TransactionStatus status) {
- // get the disk quota used by the layer gridset
- Quota quota = getUsedQuotaByLayerGridset(layerName, gridSetId);
- // we will subtracting the current disk quota value
- quota.setBytes(quota.getBytes().negate());
- // update the global disk quota by subtracting the value above
- String updateQuota = dialect.getUpdateQuotaStatement(schema, "tileSetId", "bytes");
- Map params = new HashMap<>();
- params.put("tileSetId", GLOBAL_QUOTA_NAME);
- params.put("bytes", new BigDecimal(quota.getBytes()));
- jt.update(updateQuota, params);
- // delete layer gridset
- String statement = dialect.getLayerGridDeletionStatement(schema, "layerName", "gridSetId");
- params = new HashMap<>();
- params.put("layerName", layerName);
- params.put("gridSetId", gridSetId);
- jt.update(statement, params);
- }
+ tt.executeWithoutResult(status -> {
+ // get the disk quota used by the layer gridset
+ Quota quota = getUsedQuotaByLayerGridset(layerName, gridSetId);
+ // we will subtracting the current disk quota value
+ quota.setBytes(quota.getBytes().negate());
+ // update the global disk quota by subtracting the value above
+ String updateQuota = dialect.getUpdateQuotaStatement(schema, "tileSetId", "bytes");
+ Map params = new HashMap<>();
+ params.put("tileSetId", GLOBAL_QUOTA_NAME);
+ params.put("bytes", new BigDecimal(quota.getBytes()));
+ jt.update(updateQuota, params);
+ // delete layer gridset
+ String statement = dialect.getLayerGridDeletionStatement(schema, "layerName", "gridSetId");
+ params = new HashMap<>();
+ params.put("layerName", layerName);
+ params.put("gridSetId", gridSetId);
+ jt.update(statement, params);
});
}
public void deleteLayerInternal(final String layerName) {
getUsedQuotaByLayerName(layerName);
- tt.execute(new TransactionCallbackWithoutResult() {
-
- @Override
- protected void doInTransactionWithoutResult(TransactionStatus arg0) {
- // update the global quota
- Quota quota = getUsedQuotaByLayerName(layerName);
- quota.setBytes(quota.getBytes().negate());
- String updateQuota = dialect.getUpdateQuotaStatement(schema, "tileSetId", "bytes");
- Map params = new HashMap<>();
- params.put("tileSetId", GLOBAL_QUOTA_NAME);
- params.put("bytes", new BigDecimal(quota.getBytes()));
- jt.update(updateQuota, params);
-
- // delete the layer
- log.info("Deleting disk quota information for layer '" + layerName + "'");
- String statement = dialect.getLayerDeletionStatement(schema, "layerName");
- jt.update(statement, Collections.singletonMap("layerName", layerName));
- }
+ tt.executeWithoutResult(status -> {
+ // update the global quota
+ Quota quota = getUsedQuotaByLayerName(layerName);
+ quota.setBytes(quota.getBytes().negate());
+ String updateQuota = dialect.getUpdateQuotaStatement(schema, "tileSetId", "bytes");
+ Map params = new HashMap<>();
+ params.put("tileSetId", GLOBAL_QUOTA_NAME);
+ params.put("bytes", new BigDecimal(quota.getBytes()));
+ jt.update(updateQuota, params);
+
+ // delete the layer
+ log.info("Deleting disk quota information for layer '" + layerName + "'");
+ String statement = dialect.getLayerDeletionStatement(schema, "layerName");
+ jt.update(statement, Collections.singletonMap("layerName", layerName));
});
}
@Override
public void renameLayer(final String oldLayerName, final String newLayerName) throws InterruptedException {
- tt.execute(new TransactionCallbackWithoutResult() {
-
- @Override
- protected void doInTransactionWithoutResult(TransactionStatus status) {
- String sql = dialect.getRenameLayerStatement(schema, "oldName", "newName");
- Map params = new HashMap<>();
- params.put("oldName", oldLayerName);
- params.put("newName", newLayerName);
- int updated = jt.update(sql, params);
- log.info("Updated " + updated + " tile sets after layer rename");
- }
+ tt.executeWithoutResult(status -> {
+ String sql = dialect.getRenameLayerStatement(schema, "oldName", "newName");
+ Map params = new HashMap<>();
+ params.put("oldName", oldLayerName);
+ params.put("newName", newLayerName);
+ int updated = jt.update(sql, params);
+ log.info("Updated " + updated + " tile sets after layer rename");
});
}
@@ -428,97 +403,14 @@ public TilePageCalculator getTilePageCalculator() {
public void addToQuotaAndTileCounts(
final TileSet tileSet, final Quota quotaDiff, final Collection tileCountDiffs)
throws InterruptedException {
- tt.execute(new TransactionCallbackWithoutResult() {
-
- @Override
- protected void doInTransactionWithoutResult(TransactionStatus status) {
- getOrCreateTileSet(tileSet);
- updateQuotas(tileSet, quotaDiff);
-
- if (tileCountDiffs != null) {
- // sort the payloads by page id as a deadlock avoidance measure, out
- // of order updates may result in deadlock with the
- // addHitsAndSetAccessTime method
- List sorted = sortPayloads(tileCountDiffs);
- for (PageStatsPayload payload : sorted) {
- upsertTilePageFillFactor(payload);
- }
- }
- }
-
- private void updateQuotas(final TileSet tileSet, final Quota quotaDiff) {
- if (log.isLoggable(Level.FINE)) {
- log.info("Applying quota diff " + quotaDiff.getBytes() + " on tileset " + tileSet);
- }
-
- String updateQuota = dialect.getUpdateQuotaStatement(schema, "tileSetId", "bytes");
- Map params = new HashMap<>();
- params.put("tileSetId", tileSet.getId());
- params.put("bytes", new BigDecimal(quotaDiff.getBytes()));
- jt.update(updateQuota, params);
- params.put("tileSetId", GLOBAL_QUOTA_NAME);
- jt.update(updateQuota, params);
- }
-
- private void upsertTilePageFillFactor(PageStatsPayload payload) {
- if (log.isLoggable(Level.FINE)) {
- log.info("Applying page stats payload " + payload);
- }
-
- // see http://en.wikipedia.org/wiki/Merge_(SQL)
- // Even the Merge command that some databases support is prone to race
- // conditions
- // under concurrent load, but we don't want to lose data and it's difficult
- // to
- // tell apart the race conditions from other failures, so we use tolerant
- // commands
- // and loop over them.
- // Loop conditions: we find the page stats, but they are deleted before we
- // can
- // update
- // them, we don't find the page stats, but they are inserted before we can
- // do so, in
- // both cases we re-start from zero
- TilePage page = payload.getPage();
- final byte level = page.getZoomLevel();
- final BigInteger tilesPerPage = calculator.getTilesPerPage(tileSet, level);
-
- int modified = 0;
- int count = 0;
- while (modified == 0 && count < maxLoops) {
- try {
- count++;
- PageStats stats = getPageStats(page.getKey());
- if (stats != null) {
- float oldFillFactor = stats.getFillFactor();
- stats.addTiles(payload.getNumTiles(), tilesPerPage);
- // if no change, bail out early
- if (oldFillFactor == stats.getFillFactor()) {
- return;
- }
-
- // update the record in the db
- modified = updatePageFillFactor(page, stats, oldFillFactor);
- } else {
- // create the stats and update the fill factor
- stats = new PageStats(0);
- stats.addTiles(payload.getNumTiles(), tilesPerPage);
-
- modified = createNewPageStats(stats, page);
- }
- } catch (PessimisticLockingFailureException e) {
- if (log.isLoggable(Level.FINE)) {
- log.log(Level.FINE, "Deadlock while updating page stats, will retry", e);
- }
- }
- }
+ tt.executeWithoutResult(status -> {
+ getOrCreateTileSet(tileSet);
+ updateQuotas(tileSet, quotaDiff);
- if (modified == 0) {
- throw new ConcurrencyFailureException("Failed to create or update page stats for page "
- + payload.getPage()
- + " after "
- + count
- + " attempts");
+ if (tileCountDiffs != null) {
+ List sorted = sortPayloads(tileCountDiffs);
+ for (PageStatsPayload payload : sorted) {
+ upsertTilePageFillFactor(tileSet, payload);
}
}
});
@@ -535,6 +427,82 @@ protected List sortPayloads(Collection tileC
return result;
}
+ private void updateQuotas(final TileSet tileSet, final Quota quotaDiff) {
+ if (log.isLoggable(Level.FINE)) {
+ log.info("Applying quota diff " + quotaDiff.getBytes() + " on tileset " + tileSet);
+ }
+
+ String updateQuota = dialect.getUpdateQuotaStatement(schema, "tileSetId", "bytes");
+ Map params = new HashMap<>();
+ params.put("tileSetId", tileSet.getId());
+ params.put("bytes", new BigDecimal(quotaDiff.getBytes()));
+ jt.update(updateQuota, params);
+ params.put("tileSetId", GLOBAL_QUOTA_NAME);
+ jt.update(updateQuota, params);
+ }
+
+ private void upsertTilePageFillFactor(final TileSet tileSet, PageStatsPayload payload) {
+ if (log.isLoggable(Level.FINE)) {
+ log.info("Applying page stats payload " + payload);
+ }
+
+ // see http://en.wikipedia.org/wiki/Merge_(SQL)
+ // Even the Merge command that some databases support is prone to race
+ // conditions
+ // under concurrent load, but we don't want to lose data and it's difficult
+ // to
+ // tell apart the race conditions from other failures, so we use tolerant
+ // commands
+ // and loop over them.
+ // Loop conditions: we find the page stats, but they are deleted before we
+ // can
+ // update
+ // them, we don't find the page stats, but they are inserted before we can
+ // do so, in
+ // both cases we re-start from zero
+ TilePage page = payload.getPage();
+ final byte level = page.getZoomLevel();
+ final BigInteger tilesPerPage = calculator.getTilesPerPage(tileSet, level);
+
+ int modified = 0;
+ int count = 0;
+ while (modified == 0 && count < maxLoops) {
+ try {
+ count++;
+ PageStats stats = getPageStats(page.getKey());
+ if (stats != null) {
+ float oldFillFactor = stats.getFillFactor();
+ stats.addTiles(payload.getNumTiles(), tilesPerPage);
+ // if no change, bail out early
+ if (oldFillFactor == stats.getFillFactor()) {
+ return;
+ }
+
+ // update the record in the db
+ modified = updatePageFillFactor(page, stats, oldFillFactor);
+ } else {
+ // create the stats and update the fill factor
+ stats = new PageStats(0);
+ stats.addTiles(payload.getNumTiles(), tilesPerPage);
+
+ modified = createNewPageStats(stats, page);
+ }
+ } catch (PessimisticLockingFailureException e) {
+ if (log.isLoggable(Level.FINE)) {
+ log.log(Level.FINE, "Deadlock while updating page stats, will retry", e);
+ }
+ }
+ }
+
+ if (modified == 0) {
+ throw new ConcurrencyFailureException("Failed to create or update page stats for page "
+ + payload.getPage()
+ + " after "
+ + count
+ + " attempts");
+ }
+ }
+
private int updatePageFillFactor(TilePage page, PageStats stats, float oldFillFactor) {
if (log.isLoggable(Level.FINE)) {
log.info("Updating page " + page + " fill factor from " + oldFillFactor + " to " + stats.getFillFactor());
@@ -755,27 +723,23 @@ public TilePage mapRow(ResultSet rs, int rowNum) throws SQLException {
@Override
public void deleteParameters(final String layerName, final String parametersId) {
- tt.execute(new TransactionCallbackWithoutResult() {
-
- @Override
- protected void doInTransactionWithoutResult(TransactionStatus status) {
- // first gather the disk quota used by the gridset, and update the global
- // quota
- Quota quota = getUsedQuotaByParametersId(parametersId);
- quota.setBytes(quota.getBytes().negate());
- String updateQuota = dialect.getUpdateQuotaStatement(schema, "tileSetId", "bytes");
- Map params = new HashMap<>();
- params.put("tileSetId", GLOBAL_QUOTA_NAME);
- params.put("bytes", new BigDecimal(quota.getBytes()));
- jt.update(updateQuota, params);
-
- // then delete all the gridsets with the specified id
- String statement = dialect.getLayerParametersDeletionStatement(schema, "layerName", "parametersId");
- params = new HashMap<>();
- params.put("layerName", layerName);
- params.put("parametersId", parametersId);
- jt.update(statement, params);
- }
+ tt.executeWithoutResult(status -> {
+ // first gather the disk quota used by the gridset, and update the global
+ // quota
+ Quota quota = getUsedQuotaByParametersId(parametersId);
+ quota.setBytes(quota.getBytes().negate());
+ String updateQuota = dialect.getUpdateQuotaStatement(schema, "tileSetId", "bytes");
+ Map params = new HashMap<>();
+ params.put("tileSetId", GLOBAL_QUOTA_NAME);
+ params.put("bytes", new BigDecimal(quota.getBytes()));
+ jt.update(updateQuota, params);
+
+ // then delete all the gridsets with the specified id
+ String statement = dialect.getLayerParametersDeletionStatement(schema, "layerName", "parametersId");
+ params = new HashMap<>();
+ params.put("layerName", layerName);
+ params.put("parametersId", parametersId);
+ jt.update(statement, params);
});
}
diff --git a/geowebcache/mbtiles/src/main/java/org/geowebcache/mbtiles/layer/MBTilesInfo.java b/geowebcache/mbtiles/src/main/java/org/geowebcache/mbtiles/layer/MBTilesInfo.java
index daedec129..02aaf6298 100644
--- a/geowebcache/mbtiles/src/main/java/org/geowebcache/mbtiles/layer/MBTilesInfo.java
+++ b/geowebcache/mbtiles/src/main/java/org/geowebcache/mbtiles/layer/MBTilesInfo.java
@@ -16,9 +16,6 @@
import static org.geotools.mbtiles.MBTilesFile.SPHERICAL_MERCATOR;
import static org.geotools.mbtiles.MBTilesFile.WORLD_ENVELOPE;
-import com.fasterxml.jackson.core.JsonProcessingException;
-import com.fasterxml.jackson.core.type.TypeReference;
-import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.util.List;
import java.util.logging.Level;
@@ -35,6 +32,9 @@
import org.geowebcache.grid.BoundingBox;
import org.geowebcache.layer.meta.TileJSON;
import org.geowebcache.layer.meta.VectorLayerMetadata;
+import tools.jackson.core.JacksonException;
+import tools.jackson.core.type.TypeReference;
+import tools.jackson.databind.ObjectMapper;
/** Info Object storing basic MBTiles Cached info */
public class MBTilesInfo {
@@ -146,15 +146,20 @@ public void decorateTileJSON(TileJSON tileJSON) {
int index = -1;
if (json != null && ((index = json.indexOf("[")) > 0)) {
// skip the "vector_layers initial part and go straight to the array
- json = json.substring(index, json.length() - 1).trim();
- ObjectMapper mapper = new ObjectMapper();
- List layers = null;
- try {
- layers = mapper.readValue(json, new TypeReference<>() {});
- } catch (JsonProcessingException e) {
- throw new IllegalArgumentException("Exception occurred while parsing the layers metadata. " + e);
+ // Find the closing bracket for the array
+ int endIndex = json.indexOf("]", index);
+ if (endIndex > 0) {
+ json = json.substring(index, endIndex + 1).trim();
+ ObjectMapper mapper = new ObjectMapper();
+ List layers = null;
+ try {
+ layers = mapper.readValue(json, new TypeReference<>() {});
+ } catch (JacksonException e) {
+ throw new IllegalArgumentException(
+ "Exception occurred while parsing the layers metadata. " + e);
+ }
+ tileJSON.setLayers(layers);
}
- tileJSON.setLayers(layers);
}
}
}
diff --git a/geowebcache/pom.xml b/geowebcache/pom.xml
index a4239a8bf..9480778e0 100644
--- a/geowebcache/pom.xml
+++ b/geowebcache/pom.xml
@@ -53,8 +53,8 @@
35-SNAPSHOT
- 6.2.12
- 6.5.5
+ 7.0.2
+ 7.0.21.4.211.18.02.0.0-M4
@@ -254,7 +254,6 @@
joda-time${joda-time.version}
-
@@ -264,13 +263,6 @@
test
-
-
- org.apache.httpcomponents.client5
- httpclient5
- 5.4.4
-
-
@@ -336,7 +328,7 @@
maven-failsafe-plugin
- 3.5.3
+ 3.5.4
@@ -387,7 +379,7 @@
maven-surefire-plugin
- 3.5.3
+ 3.5.4
diff --git a/geowebcache/rest/pom.xml b/geowebcache/rest/pom.xml
index ebe42681c..59da37122 100644
--- a/geowebcache/rest/pom.xml
+++ b/geowebcache/rest/pom.xml
@@ -29,10 +29,6 @@
com.google.guavaguava
-
- jakarta.servlet
- jakarta.servlet-api
-
diff --git a/geowebcache/rest/src/main/java/org/geowebcache/rest/controller/SeedController.java b/geowebcache/rest/src/main/java/org/geowebcache/rest/controller/SeedController.java
index 9bd49c2dd..6ad36c0d9 100644
--- a/geowebcache/rest/src/main/java/org/geowebcache/rest/controller/SeedController.java
+++ b/geowebcache/rest/src/main/java/org/geowebcache/rest/controller/SeedController.java
@@ -62,13 +62,22 @@ public class SeedController {
/** GET method for querying running GWC tasks */
@RequestMapping(
- value = "/seed.json",
+ value = "/seed",
method = RequestMethod.GET,
produces = {MediaType.APPLICATION_JSON_VALUE})
public ResponseEntity> doGet(HttpServletRequest req) {
return seedService.getRunningTasks(req);
}
+ /** GET method for querying running GWC tasks with path extension */
+ @RequestMapping(
+ value = "/seed.json",
+ method = RequestMethod.GET,
+ produces = {MediaType.APPLICATION_JSON_VALUE})
+ public ResponseEntity> doGetJson(HttpServletRequest req) {
+ return seedService.getRunningTasks(req);
+ }
+
/** GET method for querying running tasks for the provided layer */
@RequestMapping(
value = "/seed/{layer:.+}.json",
@@ -106,7 +115,7 @@ public ResponseEntity doPost(HttpServletRequest request) {
}
/**
- * POST method for Seeding and Truncating
+ * POST method for Seeding and Truncating via form submission
*
* @param params Query parameters, including urlencoded form values
*/
@@ -133,6 +142,7 @@ public ResponseEntity> doPost(
}
}
+ /** POST method for JSON seeding/truncating with path extension. */
@RequestMapping(value = "/seed/{layer}.json", method = RequestMethod.POST)
public ResponseEntity> seedOrTruncateWithJsonPayload(
HttpServletRequest request, InputStream inputStream, @PathVariable(name = "layer") String layerName) {
@@ -142,6 +152,7 @@ public ResponseEntity> seedOrTruncateWithJsonPayload(
return seedService.doSeeding(request, layerName, extension, body);
}
+ /** POST method for XML seeding/truncating with path extension. */
@RequestMapping(value = "/seed/{layer}.xml", method = RequestMethod.POST)
public ResponseEntity> seedOrTruncateWithXmlPayload(
HttpServletRequest request, InputStream inputStream, @PathVariable(name = "layer") String layerName) {
@@ -151,6 +162,32 @@ public ResponseEntity> seedOrTruncateWithXmlPayload(
return seedService.doSeeding(request, layerName, extension, body);
}
+ /** POST method for JSON seeding/truncating without path extension. */
+ @RequestMapping(
+ value = "/seed/{layer:[^.]+}",
+ method = RequestMethod.POST,
+ consumes = {MediaType.APPLICATION_JSON_VALUE})
+ public ResponseEntity> seedOrTruncateJson(
+ HttpServletRequest request, InputStream inputStream, @PathVariable(name = "layer") String layerName) {
+
+ String body = readBody(inputStream);
+ String extension = "json";
+ return seedService.doSeeding(request, layerName, extension, body);
+ }
+
+ /** POST method for XML seeding/truncating without path extension. */
+ @RequestMapping(
+ value = "/seed/{layer:[^.]+}",
+ method = RequestMethod.POST,
+ consumes = {MediaType.APPLICATION_XML_VALUE, MediaType.TEXT_XML_VALUE})
+ public ResponseEntity> seedOrTruncateXml(
+ HttpServletRequest request, InputStream inputStream, @PathVariable(name = "layer") String layerName) {
+
+ String body = readBody(inputStream);
+ String extension = "xml";
+ return seedService.doSeeding(request, layerName, extension, body);
+ }
+
private String readBody(InputStream inputStream) {
return new BufferedReader(new InputStreamReader(inputStream)).lines().collect(Collectors.joining("\n"));
}
diff --git a/geowebcache/rest/src/main/java/org/geowebcache/rest/filter/SuffixStripFilter.java b/geowebcache/rest/src/main/java/org/geowebcache/rest/filter/SuffixStripFilter.java
new file mode 100644
index 000000000..b7699c67c
--- /dev/null
+++ b/geowebcache/rest/src/main/java/org/geowebcache/rest/filter/SuffixStripFilter.java
@@ -0,0 +1,87 @@
+/**
+ * This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General
+ * Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
+ * later version.
+ *
+ *
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *
You should have received a copy of the GNU Lesser General Public License along with this program. If not, see
+ * .
+ *
+ * @author Cécile Vuilleumier, Camptocamp, Copyright 2026
+ */
+package org.geowebcache.rest.filter;
+
+import jakarta.servlet.Filter;
+import jakarta.servlet.FilterChain;
+import jakarta.servlet.ServletException;
+import jakarta.servlet.ServletRequest;
+import jakarta.servlet.ServletResponse;
+import jakarta.servlet.http.HttpServletRequest;
+import jakarta.servlet.http.HttpServletRequestWrapper;
+import java.io.IOException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Servlet filter for GeoWebCache
+ *
+ *
Extracts the path suffix (extension) and stores it for content negotiation. Removes the extension from the path
+ * for path mapping.
+ */
+public class SuffixStripFilter implements Filter {
+
+ private static final Pattern EXTENSION_PATTERN = Pattern.compile("^(.*?)\\.(json|xml)$");
+
+ public static final String FORMAT_ATTRIBUTE = "gwc.formatExtension";
+
+ @Override
+ public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
+ throws IOException, ServletException {
+
+ if (request instanceof HttpServletRequest httpRequest) {
+ String requestURI = httpRequest.getRequestURI();
+ Matcher matcher = EXTENSION_PATTERN.matcher(requestURI);
+
+ if (matcher.matches()) {
+ String pathWithoutExtension = matcher.group(1);
+ String extension = matcher.group(2);
+
+ // Wrap the request to return modified paths
+ HttpServletRequestWrapper wrapper = new HttpServletRequestWrapper(httpRequest) {
+ @Override
+ public String getRequestURI() {
+ return pathWithoutExtension;
+ }
+
+ @Override
+ public StringBuffer getRequestURL() {
+ StringBuffer url =
+ new StringBuffer(super.getRequestURL().toString());
+ int extIndex = url.lastIndexOf("." + extension);
+ if (extIndex > 0) {
+ url.delete(extIndex, url.length());
+ }
+ return url;
+ }
+
+ @Override
+ public String getServletPath() {
+ String servletPath = super.getServletPath();
+ return servletPath.replaceFirst("\\." + extension + "$", "");
+ }
+ };
+
+ // Store extension for content negotiation
+ wrapper.setAttribute(FORMAT_ATTRIBUTE, extension);
+
+ chain.doFilter(wrapper, response);
+ return;
+ }
+ }
+
+ // No extension found, pass through unchanged
+ chain.doFilter(request, response);
+ }
+}
diff --git a/geowebcache/rest/src/main/java/org/geowebcache/rest/negotiation/SuffixContentNegotiationStrategy.java b/geowebcache/rest/src/main/java/org/geowebcache/rest/negotiation/SuffixContentNegotiationStrategy.java
new file mode 100644
index 000000000..1e41b5fa1
--- /dev/null
+++ b/geowebcache/rest/src/main/java/org/geowebcache/rest/negotiation/SuffixContentNegotiationStrategy.java
@@ -0,0 +1,52 @@
+/**
+ * This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General
+ * Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
+ * later version.
+ *
+ *
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *
You should have received a copy of the GNU Lesser General Public License along with this program. If not, see
+ * .
+ *
+ * @author Cécile Vuilleumier, Camptocamp, Copyright 2026
+ */
+package org.geowebcache.rest.negotiation;
+
+import jakarta.servlet.http.HttpServletRequest;
+import java.util.Collections;
+import java.util.List;
+import org.springframework.http.MediaType;
+import org.springframework.web.accept.ContentNegotiationStrategy;
+import org.springframework.web.context.request.NativeWebRequest;
+
+/**
+ * Spring ContentNegotiationStrategy for GeoWebCache
+ *
+ *