diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index b1dee1f2..510aa52f 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -42,7 +42,7 @@ jobs:
cache: 'maven'
- name: Build with Maven
- run: mvn -B verify --file pom.xml
+ run: mvn clean -B verify --file pom.xml
- name: Build Docker Image
uses: docker/build-push-action@v5
diff --git a/Dockerfile b/Dockerfile
index b6ef41f5..21eabdcb 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -4,18 +4,17 @@ VOLUME /tmp
ENV MAX_HEAP_PERCENTAGE=70
COPY ./server/target/ogcapi-java-server-*-exec.jar app.jar
-ENTRYPOINT [\
- "java",\
- "-XX:MaxRAMPercentage=${MAX_HEAP_PERCENTAGE}",\
- "-Delasticsearch.index.name=${INDEX_NAME}",\
- "-Delasticsearch.cloud_optimized_index.name=${CO_INDEX_NAME}",\
- "-Delasticsearch.vocabs_index.name=${VOCABS_INDEX_NAME}",\
- "-Dapi.host=${HOST}:${PORT}",\
- "-Dserver.port=${PORT}",\
- "-Delasticsearch.serverUrl=${ELASTIC_URL}",\
- "-Delasticsearch.apiKey=${ELASTIC_KEY}",\
- "-Ddata-access-service.host=${DAS_HOST}",\
- "-Ddata-access-service.secret=${DAS_SECRET}",\
- "--enable-preview",\
- "-jar",\
- "/app.jar"]
+ENTRYPOINT ["/bin/sh", "-c", "java \
+ -XX:MaxRAMPercentage=${MAX_HEAP_PERCENTAGE} \
+ -Delasticsearch.index.name=${INDEX_NAME} \
+ -Delasticsearch.cloud_optimized_index.name=${CO_INDEX_NAME} \
+ -Delasticsearch.vocabs_index.name=${VOCABS_INDEX_NAME} \
+ -Dapi.host=${HOST}:${PORT} \
+ -Dserver.port=${PORT} \
+ -Delasticsearch.serverUrl=${ELASTIC_URL} \
+ -Delasticsearch.apiKey=${ELASTIC_KEY} \
+ -Ddata-access-service.host=${DAS_HOST} \
+ -Ddata-access-service.secret=${DAS_SECRET} \
+ --enable-preview \
+ -jar \
+ /app.jar"]
diff --git a/server/pom.xml b/server/pom.xml
index 3efac3b7..53ca5b50 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -266,6 +266,9 @@
maven-surefire-plugin
--enable-preview
+
+ test
+
diff --git a/server/src/main/java/au/org/aodn/ogcapi/server/core/model/enumeration/CQLFields.java b/server/src/main/java/au/org/aodn/ogcapi/server/core/model/enumeration/CQLFields.java
index 3e68ab7d..ca7e2073 100644
--- a/server/src/main/java/au/org/aodn/ogcapi/server/core/model/enumeration/CQLFields.java
+++ b/server/src/main/java/au/org/aodn/ogcapi/server/core/model/enumeration/CQLFields.java
@@ -184,7 +184,8 @@ public enum CQLFields implements CQLFieldsInterface {
CQLElasticSetting.score.getSetting(),
CQLElasticSetting.score.getSetting(),
null,
- (order) -> new SortOptions.Builder().field(f -> f.field(CQLElasticSetting.score.getSetting()).order(order))
+ (order) -> new SortOptions.Builder()
+ .field(f -> f.field(CQLElasticSetting.score.getSetting()).order(order))
),
// Rank score is an internal calculated score, it is different from the one use by ElasticSearch,
// @see es-indexer RankingService
diff --git a/server/src/main/java/au/org/aodn/ogcapi/server/core/service/ElasticSearch.java b/server/src/main/java/au/org/aodn/ogcapi/server/core/service/ElasticSearch.java
index 30d2bac2..a960fbea 100644
--- a/server/src/main/java/au/org/aodn/ogcapi/server/core/service/ElasticSearch.java
+++ b/server/src/main/java/au/org/aodn/ogcapi/server/core/service/ElasticSearch.java
@@ -315,6 +315,7 @@ public ElasticSearchBase.SearchResult searchByParameters(Li
}
}
catch(Exception e) {
+ log.warn("Error parsing score assume null", e);
// OK to ignore as accept null as the value
}
// Get the search after
diff --git a/server/src/main/resources/log4j2-spring.xml b/server/src/main/resources/log4j2-spring.xml
index 208e747d..6565e772 100644
--- a/server/src/main/resources/log4j2-spring.xml
+++ b/server/src/main/resources/log4j2-spring.xml
@@ -36,7 +36,7 @@
-
+
diff --git a/server/src/test/java/au/org/aodn/ogcapi/server/BaseTestClass.java b/server/src/test/java/au/org/aodn/ogcapi/server/BaseTestClass.java
index f76d0f59..3e90aa45 100644
--- a/server/src/test/java/au/org/aodn/ogcapi/server/BaseTestClass.java
+++ b/server/src/test/java/au/org/aodn/ogcapi/server/BaseTestClass.java
@@ -7,6 +7,7 @@
import co.elastic.clients.elasticsearch._types.query_dsl.QueryBuilders;
import co.elastic.clients.elasticsearch.core.*;
import co.elastic.clients.elasticsearch.core.bulk.BulkResponseItem;
+import co.elastic.clients.elasticsearch.core.search.Hit;
import co.elastic.clients.elasticsearch.indices.CreateIndexRequest;
import co.elastic.clients.transport.rest_client.RestClientTransport;
import com.fasterxml.jackson.databind.JsonNode;
@@ -241,6 +242,11 @@ protected void insertJsonToElasticIndex(String index, String[] filenames) throws
logger.debug(response.toString());
assertEquals(filenames.length, response.hits().hits().size(), "Number of docs stored is correct");
+ for (Hit hit : response.hits().hits()) {
+ if(hit.source() != null) {
+ logger.debug("Stored the following id {}", hit.source().get("id"));
+ }
+ }
}
protected void insertJsonToElasticRecordIndex(String... filenames) throws IOException {
diff --git a/server/src/test/java/au/org/aodn/ogcapi/server/features/RestApiTest.java b/server/src/test/java/au/org/aodn/ogcapi/server/features/RestApiTest.java
index beb47960..9a8b6fa4 100644
--- a/server/src/test/java/au/org/aodn/ogcapi/server/features/RestApiTest.java
+++ b/server/src/test/java/au/org/aodn/ogcapi/server/features/RestApiTest.java
@@ -44,6 +44,7 @@ public void clear() {
@BeforeEach
public void afterTest() {
super.clearElasticIndex();
+ super.createElasticIndex();
}
@Test
@@ -51,244 +52,12 @@ public void afterTest() {
public void verifyClusterIsHealthy() throws IOException {
super.assertClusterHealthResponse();
}
- /**
- * We want to test the pageableSearch inside the elastic search is right or wrong by setting up more than 4 canned data, then
- * query all to get them back even the search result return from elastic is break down into 4 + 2
- */
- @Test
- public void verifyCorrectInternalPagingLargeData() throws IOException {
- assertEquals(4, pageSize, "This test only works with small page");
-
- // Given 6 records and we set page to 4, that means each query elastic return 4 record only
- // and the logic to load the reset can kick in.
- super.insertJsonToElasticRecordIndex(
- "5c418118-2581-4936-b6fd-d6bedfe74f62.json",
- "19da2ce7-138f-4427-89de-a50c724f5f54.json",
- "516811d7-cd1e-207a-e0440003ba8c79dd.json",
- "7709f541-fc0c-4318-b5b9-9053aa474e0e.json",
- "bc55eff4-7596-3565-e044-00144fdd4fa6.json",
- "bf287dfe-9ce4-4969-9c59-51c39ea4d011.json");
-
- // Call rest api directly and get query result
- ResponseEntity collections = testRestTemplate.exchange(
- getBasePath() + "/collections",
- HttpMethod.GET,
- null,
- new ParameterizedTypeReference<>() {});
-
- assertEquals(HttpStatus.OK, collections.getStatusCode(), "Get status OK");
- assertEquals(6, Objects.requireNonNull(collections.getBody()).getCollections().size(), "Total equals");
- assertEquals(6, collections.getBody().getTotal(), "Get total works");
-
- // Now make sure all id exist
- Set ids = new HashSet<>(List.of(
- "5c418118-2581-4936-b6fd-d6bedfe74f62",
- "19da2ce7-138f-4427-89de-a50c724f5f54",
- "516811d7-cd1e-207a-e0440003ba8c79dd",
- "7709f541-fc0c-4318-b5b9-9053aa474e0e",
- "bc55eff4-7596-3565-e044-00144fdd4fa6",
- "bf287dfe-9ce4-4969-9c59-51c39ea4d011"
- ));
-
- for(Collection collection : Objects.requireNonNull(collections.getBody()).getCollections()) {
- assertTrue(ids.contains(collection.getId()),"Contains " + collection.getId());
- }
- }
- /**
- * with page_size set, the max number of record return will equals page_size
- */
- @Test
- public void verifyCorrectPageSizeDataReturn() throws IOException {
- assertEquals(4, pageSize, "This test only works with small page");
-
- // Given 6 records and we set page to 4, that means each query elastic return 4 record only
- // and the logic to load the reset can kick in.
- super.insertJsonToElasticRecordIndex(
- "5c418118-2581-4936-b6fd-d6bedfe74f62.json",
- "19da2ce7-138f-4427-89de-a50c724f5f54.json",
- "516811d7-cd1e-207a-e0440003ba8c79dd.json",
- "7709f541-fc0c-4318-b5b9-9053aa474e0e.json",
- "bc55eff4-7596-3565-e044-00144fdd4fa6.json",
- "bf287dfe-9ce4-4969-9c59-51c39ea4d011.json");
-
- // Call rest api directly and get query result
- ResponseEntity collections = testRestTemplate.exchange(
- getBasePath() + "/collections?filter=page_size=3",
- HttpMethod.GET,
- null,
- new ParameterizedTypeReference<>() {});
-
- assertEquals(HttpStatus.OK, collections.getStatusCode(), "Get status OK");
- // Given request page size is 3, only 3 return this time
- assertEquals(3,
- Objects.requireNonNull(collections.getBody()).getCollections().size(),
- "Record return size correct"
- );
- // Total number of record should be this
- assertEquals(6, collections.getBody().getTotal(), "Get total works");
-
- // The search after give you the value to go to next batch
- assertEquals(3, collections.getBody().getSearchAfter().size(), "search_after three fields");
- assertEquals("1.0", collections.getBody().getSearchAfter().get(0), "Search after 1 value");
- assertEquals(
- "100",
- collections.getBody().getSearchAfter().get(1),
- "search_after 2 arg"
- );
- assertEquals(
- "str:bf287dfe-9ce4-4969-9c59-51c39ea4d011",
- collections.getBody().getSearchAfter().get(2),
- "search_after 3 arg"
- );
-
- // Now make sure all id exist
- Set ids = new HashSet<>(List.of(
- "5c418118-2581-4936-b6fd-d6bedfe74f62",
- "19da2ce7-138f-4427-89de-a50c724f5f54",
- "bf287dfe-9ce4-4969-9c59-51c39ea4d011"
- ));
-
- for(Collection collection : Objects.requireNonNull(collections.getBody()).getCollections()) {
- assertTrue(ids.contains(collection.getId()),"Contains " + collection.getId());
- }
-
- // Now if we provided the search after we should get the next batch
- collections = testRestTemplate.exchange(
- getBasePath() + "/collections?filter=page_size=3 AND search_after=" +
- String.format("'%s||%s||%s'",
- collections.getBody().getSearchAfter().get(0),
- collections.getBody().getSearchAfter().get(1),
- collections.getBody().getSearchAfter().get(2)),
- HttpMethod.GET,
- null,
- new ParameterizedTypeReference<>() {});
-
- assertEquals(HttpStatus.OK, collections.getStatusCode(), "Get status OK");
- // Given request page size is 3, only 3 return this time
- assertEquals(3,
- Objects.requireNonNull(collections.getBody()).getCollections().size(),
- "Record return size correct"
- );
- ids = new HashSet<>(List.of(
- "7709f541-fc0c-4318-b5b9-9053aa474e0e",
- "bc55eff4-7596-3565-e044-00144fdd4fa6",
- "516811d7-cd1e-207a-e0440003ba8c79dd"
- ));
-
- for(Collection collection : Objects.requireNonNull(collections.getBody()).getCollections()) {
- assertTrue(ids.contains(collection.getId()),"Contains in next batch " + collection.getId());
- }
- }
- /**
- * Extreme case, page size set to 1 and query text "dataset" and page one by one. Only part of the json
- * will be return, the sort value should give you the next item and you will be able to go to next one.
- * The first sort value is the relevant and because of query text the value will be something greater than 1.0
- */
@Test
- public void verifyCorrectPageSizeDataReturnWithQuery() throws IOException {
+ public void verifyCorrectPageSizeAndScoreWithQuery2() throws IOException {
assertEquals(4, pageSize, "This test only works with small page");
- // Given 6 records and we set page to 4, that means each query elastic return 4 record only
- // and the logic to load the reset can kick in.
- super.insertJsonToElasticRecordIndex(
- "5c418118-2581-4936-b6fd-d6bedfe74f62.json",
- "19da2ce7-138f-4427-89de-a50c724f5f54.json",
- "516811d7-cd1e-207a-e0440003ba8c79dd.json",
- "7709f541-fc0c-4318-b5b9-9053aa474e0e.json",
- "bc55eff4-7596-3565-e044-00144fdd4fa6.json",
- "bf287dfe-9ce4-4969-9c59-51c39ea4d011.json");
-
- // Call rest api directly and get query result with search on "dataset"
- ResponseEntity collections = testRestTemplate.exchange(
- getBasePath() + "/collections?q=dataset&filter=page_size=1",
- HttpMethod.GET,
- null,
- new ParameterizedTypeReference<>() {
- });
-
- assertEquals(HttpStatus.OK, collections.getStatusCode(), "Get status OK");
- // Given request page size is 1
- assertEquals(1,
- Objects.requireNonNull(collections.getBody()).getCollections().size(),
- "Record return size correct"
- );
- // Total number of record should be this
- assertEquals(5, collections.getBody().getTotal(), "Get total works");
-
- // The search after give you the value to go to next batch
- assertEquals(3, collections.getBody().getSearchAfter().size(), "search_after three fields");
- assertEquals(
- "str:bc55eff4-7596-3565-e044-00144fdd4fa6",
- collections.getBody().getSearchAfter().get(2),
- "search_after 2 arg"
- );
-
- // Now the same search, same page but search_after the result above given sort value
- // intended to give space after comma for negative test
- collections = testRestTemplate.exchange(
- getBasePath() + "/collections?q=dataset&filter=page_size=1 AND search_after=" +
- String.format("'%s||%s||%s'",
- collections.getBody().getSearchAfter().get(0),
- collections.getBody().getSearchAfter().get(1),
- "bc55eff4-7596-3565-e044-00144fdd4fa6"),
- HttpMethod.GET,
- null,
- new ParameterizedTypeReference<>() {
- });
-
- assertEquals(HttpStatus.OK, collections.getStatusCode(), "Get status OK");
- assertEquals(1,
- Objects.requireNonNull(collections.getBody()).getCollections().size(),
- "Record return size correct"
- );
- // Total number of record should be this as the same search criteria applies
- assertEquals(5, collections.getBody().getTotal(), "Get total works");
-
- // The search after give you the value to go to next batch
- assertEquals(3, collections.getBody().getSearchAfter().size(), "search_after three fields");
- assertEquals(
- "str:7709f541-fc0c-4318-b5b9-9053aa474e0e",
- collections.getBody().getSearchAfter().get(2),
- "search_after 3 arg"
- );
-
- // Now the same search, diff page but search_after the result above given sort value
- // set a bigger page size which exceed more than record hit as negative test
- collections = testRestTemplate.exchange(
- getBasePath() + "/collections?q=dataset&filter=page_size=3 AND search_after=" +
- String.format("'%s||%s ||%s'",
- collections.getBody().getSearchAfter().get(0),
- collections.getBody().getSearchAfter().get(1),
- "5c418118-2581-4936-b6fd-d6bedfe74f62"),
- HttpMethod.GET,
- null,
- new ParameterizedTypeReference<>() {
- });
-
- assertEquals(HttpStatus.OK, collections.getStatusCode(), "Get status OK");
- assertEquals(3,
- Objects.requireNonNull(collections.getBody()).getCollections().size(),
- "Record return size correct, total hit is 4, we move to the third record"
- );
- // Total number of record should be this as the same search criteria applies
- assertEquals(5, collections.getBody().getTotal(), "Get total works");
-
- // The search after give you the value to go to next batch
- assertEquals(3, collections.getBody().getSearchAfter().size(), "search_after three fields");
- assertEquals(
- "str:19da2ce7-138f-4427-89de-a50c724f5f54",
- collections.getBody().getSearchAfter().get(2),
- "search_after 3 value"
- );
- }
- /**
- * Similar to verifyCorrectPageSizeDataReturnWithQuery and add score in the query,
- * this is used to verify a bug fix where page_size and score crash the query
- */
- @Test
- public void verifyCorrectPageSizeAndScoreWithQuery() throws IOException {
- assertEquals(4, pageSize, "This test only works with small page");
+ logger.debug("Start verifyCorrectPageSizeAndScoreWithQuery");
// Given 6 records and we set page to 4, that means each query elastic return 4 record only
// and the logic to load the reset can kick in.
@@ -305,275 +74,9 @@ public void verifyCorrectPageSizeAndScoreWithQuery() throws IOException {
getBasePath() + "/collections?q=dataset&filter=page_size=1 AND score>=1.3",
HttpMethod.GET,
null,
- new ParameterizedTypeReference<>() {
- });
-
- assertEquals(HttpStatus.OK, collections.getStatusCode(), "Get status OK");
- // Given request page size is 1
- assertEquals(1,
- Objects.requireNonNull(collections.getBody()).getCollections().size(),
- "Record return size correct"
- );
- // Total number of record should be this
- assertEquals(5, collections.getBody().getTotal(), "Get total works");
-
- // The search after give you the value to go to next batch
- assertEquals(3, collections.getBody().getSearchAfter().size(), "search_after three fields");
- assertEquals(
- "80",
- collections.getBody().getSearchAfter().get(1),
- "search_after 2 value"
- );
- assertEquals(
- "str:bc55eff4-7596-3565-e044-00144fdd4fa6",
- collections.getBody().getSearchAfter().get(2),
- "search_after 3 value"
- );
-
- // Now the same search, same page but search_after the result above given sort value
- // intended to give space after comma for negative test
- collections = testRestTemplate.exchange(
- getBasePath() + "/collections?q=dataset&filter=page_size=6 AND score>=1.3 AND search_after=" +
- String.format("'%s|| %s || %s'",
- collections.getBody().getSearchAfter().get(0),
- collections.getBody().getSearchAfter().get(1),
- "bc55eff4-7596-3565-e044-00144fdd4fa6"),
- HttpMethod.GET,
- null,
- new ParameterizedTypeReference<>() {
- });
-
- assertEquals(HttpStatus.OK, collections.getStatusCode(), "Get status OK");
- assertEquals(4,
- Objects.requireNonNull(collections.getBody()).getCollections().size(),
- "Record return size correct"
- );
- // Total number of record should be this as the same search criteria applies
- assertEquals(5, collections.getBody().getTotal(), "Get total works");
-
- // The search after give you the value to go to next batch
- assertEquals(3, collections.getBody().getSearchAfter().size(), "search_after three fields");
- assertEquals(
- "str:5c418118-2581-4936-b6fd-d6bedfe74f62",
- collections.getBody().getSearchAfter().get(2),
- "Search after 2 value"
- );
- }
-
- @Test
- public void verifyGetSingleCollection() throws IOException {
- super.insertJsonToElasticRecordIndex(
- "516811d7-cd1e-207a-e0440003ba8c79dd.json",
- "7709f541-fc0c-4318-b5b9-9053aa474e0e.json"
- );
-
- // Call rest api directly and get query result
- ResponseEntity collection = testRestTemplate.getForEntity(
- getBasePath() + "/collections/516811d7-cd1e-207a-e0440003ba8c79dd",
- Collection.class);
-
- assertNotNull(collection.getBody(), "Body not null");
- assertEquals(
- "516811d7-cd1e-207a-e0440003ba8c79dd",
- collection.getBody().getId(),
- "Correct UUID - 516811d7-cd1e-207a-e0440003ba8c79dd");
- }
-
- @Test
- public void verifyBBoxCorrect() throws IOException {
- super.insertJsonToElasticRecordIndex(
- "ae86e2f5-eaaf-459e-a405-e654d85adb9c.json",
- "7709f541-fc0c-4318-b5b9-9053aa474e0e.json"
- );
-
- // Call rest api directly and get query result
- ResponseEntity collection = testRestTemplate.getForEntity(
- getBasePath() + "/collections/ae86e2f5-eaaf-459e-a405-e654d85adb9c",
- Collection.class);
-
- assertNotNull(collection.getBody(), "Body not null");
-
- List> bbox = collection.getBody().getExtent().getSpatial().getBbox();
- assertEquals(
- 24,
- bbox.size(),
- "Count of bbox");
-
- // Should be something like this but order may be diff
- // "bbox" : [
- // [ 113.0, -43.0, 154.0, -9.0 ], [ 115.0, -21.0, 117.0, -19.0 ], [ 114.0, -21.0, 115.0, -20.0 ],
- // [ 152.0, -22.0, 153.0, -21.0 ], [ 113.0, -22.0, 114.0, -21.0 ], [ 151.0, -24.0, 153.0, -22.0 ],
- // [ 130.0, -10.0, 131.0, -9.0 ], [ 121.0, -17.0, 122.0, -15.0 ], [ 130.0, -13.0, 131.0, -12.0 ],
- // [ 127.0, -14.0, 129.0, -9.0 ], [ 145.0, -15.0, 146.0, -14.0 ], [ 123.0, -15.0, 124.0, -14.0 ],
- // [ 119.0, -18.0, 120.0, -17.0 ], [ 147.0, -20.0, 148.0, -18.0 ], [ 153.0, -28.0, 154.0, -27.0 ],
- // [ 153.0, -31.0, 154.0, -30.0 ], [ 137.0, -34.0, 138.0, -33.0 ], [ 114.0, -33.0, 116.0, -31.0 ],
- // [ 121.0, -34.0, 122.0, -33.0 ], [ 151.0, -35.0, 152.0, -33.0 ], [ 150.0, -37.0, 151.0, -36.0 ],
- // [ 134.0, -37.0, 137.0, -34.0 ], [ 141.0, -39.0, 142.0, -38.0 ], [ 148.0, -43.0, 149.0, -42.0 ] ],
- Optional> target = bbox.stream()
- .filter(box -> box.get(0).doubleValue() == 141.0)
- .filter(box -> box.get(1).doubleValue() == -39.0)
- .filter(box -> box.get(2).doubleValue() == 142.0)
- .filter(box -> box.get(3).doubleValue() == -38.0)
- .findFirst();
-
- assertTrue(target.isPresent(), "Target bbox found 1");
-
- target = bbox.stream()
- .filter(box -> box.get(0).doubleValue() == 152.0)
- .filter(box -> box.get(1).doubleValue() == -22.0)
- .filter(box -> box.get(2).doubleValue() == 153.0)
- .filter(box -> box.get(3).doubleValue() == -21.0)
- .findFirst();
-
- assertTrue(target.isPresent(), "Target bbox found 2");
-
- logger.info(bbox.get(0).toString());
- // The first is the overall bounding box
- assertEquals(113.0, bbox.get(0).get(0).doubleValue(), "Overall bounding box coor 1");
- assertEquals(-43.0, bbox.get(0).get(1).doubleValue(), "Overall bounding box coor 2");
- assertEquals(154.0, bbox.get(0).get(2).doubleValue(), "Overall bounding box coor 3");
- assertEquals(-9.0, bbox.get(0).get(3).doubleValue(), "Overall bounding box coor 4");
- }
- /**
- * Verify the function correctly sum up the values for feature id summary
- * @throws IOException - Not expect to throw
- */
- @Disabled("Skipping this test temporarily")
- @Test
- public void verifyAggregationFeatureSummaryCorrect() throws IOException {
- super.insertJsonToElasticCODataIndex(
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample1.0.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample1.1.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample1.2.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample2.0.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample3.0.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample3.1.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample3.2.json"
- );
-
- // Call rest api directly and get query result
- ResponseEntity collection = testRestTemplate.getForEntity(
- getBasePath() + "/collections/35234913-aa3c-48ec-b9a4-77f822f66ef8/items/summary",
- FeatureCollectionGeoJSON.class);
-
- assertNotNull(collection.getBody(), "Body not null");
-
- FeatureCollectionGeoJSON json = collection.getBody();
- assertEquals(3, json.getFeatures().size(), "Features correct");
-
- // Sort make sure compare always same order
- List sf = json.getFeatures().stream()
- .sorted((a,b) -> b.getGeometry().hashCode() - a.getGeometry().hashCode())
- .toList();
- // Sample1
- FeatureGeoJSON featureGeoJSON1 = new FeatureGeoJSON();
- featureGeoJSON1.setType(FeatureGeoJSON.TypeEnum.FEATURE);
- featureGeoJSON1.setGeometry(new PointGeoJSON()
- .type(PointGeoJSON.TypeEnum.POINT)
- .coordinates(List.of(BigDecimal.valueOf(159.26), BigDecimal.valueOf(-24.72)))
- );
- featureGeoJSON1.setProperties(Map.of(
- FeatureProperty.COUNT.getValue(), 42.0,
- FeatureProperty.START_TIME.getValue(), "2023-02-01T00:00:00.000Z",
- FeatureProperty.END_TIME.getValue(), "2023-02-01T00:00:00.000Z"
-
- ));
- assertEquals(featureGeoJSON1, sf.get(0), "featureGeoJSON1");
-
- // Sample3
- FeatureGeoJSON featureGeoJSON2 = new FeatureGeoJSON();
- featureGeoJSON2.setType(FeatureGeoJSON.TypeEnum.FEATURE);
- featureGeoJSON2.setGeometry(new PointGeoJSON()
- .type(PointGeoJSON.TypeEnum.POINT)
- .coordinates(List.of(BigDecimal.valueOf(154.81), BigDecimal.valueOf(-26.2)))
- );
- featureGeoJSON2.setProperties(Map.of(
- FeatureProperty.COUNT.getValue(), 48.0,
- FeatureProperty.START_TIME.getValue(), "2023-02-01T00:00:00.000Z",
- FeatureProperty.END_TIME.getValue(), "2024-03-01T00:00:00.000Z"
-
- ));
- assertEquals(featureGeoJSON2, sf.get(1), "featureGeoJSON2");
-
- FeatureGeoJSON featureGeoJSON3 = new FeatureGeoJSON();
- featureGeoJSON3.setType(FeatureGeoJSON.TypeEnum.FEATURE);
- featureGeoJSON3.setGeometry(new PointGeoJSON()
- .type(PointGeoJSON.TypeEnum.POINT)
- .coordinates(List.of(BigDecimal.valueOf(153.56), BigDecimal.valueOf(-26.59)))
- );
- featureGeoJSON3.setProperties(Map.of(
- FeatureProperty.COUNT.getValue(), 14.0,
- FeatureProperty.START_TIME.getValue(), "2023-02-01T00:00:00.000Z",
- FeatureProperty.END_TIME.getValue(), "2023-02-01T00:00:00.000Z"
-
- ));
- assertEquals(featureGeoJSON3, sf.get(2), "featureGeoJSON3");
- }
- /**
- * We add more sample data and will trigger page load.
- * @throws IOException - Not expect to throw
- */
- @Disabled("Skipping this test temporarily")
- @Test
- public void verifyAggregationFeatureSummaryWithPageCorrect() throws IOException {
- assertEquals(4, pageSize, "This test only works with small page");
-
- super.insertJsonToElasticCODataIndex(
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample1.0.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample1.1.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample1.2.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample2.0.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample3.0.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample3.1.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample3.2.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample4.0.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample5.0.json",
- "cloudoptimized/35234913-aa3c-48ec-b9a4-77f822f66ef8/sample5.1.json"
- );
-
- // Call rest api directly and get query result
- ResponseEntity collection = testRestTemplate.getForEntity(
- getBasePath() + "/collections/35234913-aa3c-48ec-b9a4-77f822f66ef8/items/summary",
- FeatureCollectionGeoJSON.class);
-
- assertNotNull(collection.getBody(), "Body not null");
-
- FeatureCollectionGeoJSON json = collection.getBody();
- assertEquals(5, json.getFeatures().size(), "Features correct");
-
- // Sort make sure compare always same order
- List sf = json.getFeatures().stream()
- .sorted((a,b) -> b.getGeometry().hashCode() - a.getGeometry().hashCode())
- .toList();
-
- // Sample1
- FeatureGeoJSON featureGeoJSON1 = new FeatureGeoJSON();
- featureGeoJSON1.setType(FeatureGeoJSON.TypeEnum.FEATURE);
- featureGeoJSON1.setGeometry(new PointGeoJSON()
- .type(PointGeoJSON.TypeEnum.POINT)
- .coordinates(List.of(BigDecimal.valueOf(163.56), BigDecimal.valueOf(-26.59)))
- );
- featureGeoJSON1.setProperties(Map.of(
- FeatureProperty.COUNT.getValue(), 14.0,
- FeatureProperty.START_TIME.getValue(), "2023-02-01T00:00:00.000Z",
- FeatureProperty.END_TIME.getValue(), "2023-02-01T00:00:00.000Z"
-
- ));
- assertEquals(featureGeoJSON1, sf.get(0), "featureGeoJSON1");
-
- // Sample5
- FeatureGeoJSON featureGeoJSON2 = new FeatureGeoJSON();
- featureGeoJSON2.setType(FeatureGeoJSON.TypeEnum.FEATURE);
- featureGeoJSON2.setGeometry(new PointGeoJSON()
- .type(PointGeoJSON.TypeEnum.POINT)
- .coordinates(List.of(BigDecimal.valueOf(163.56), BigDecimal.valueOf(-126.59)))
- );
- featureGeoJSON2.setProperties(Map.of(
- FeatureProperty.COUNT.getValue(), 20.0,
- FeatureProperty.START_TIME.getValue(), "2022-12-01T00:00:00.000Z",
- FeatureProperty.END_TIME.getValue(), "2023-02-01T00:00:00.000Z"
+ new ParameterizedTypeReference<>() {});
- ));
- assertEquals(featureGeoJSON2, sf.get(1), "featureGeoJSON2");
+ collections.getBody().getCollections().forEach(i -> logger.info("uuid {}, {}", i.getId(), collections.getBody().getSearchAfter()));
+ assertFalse(true);
}
}
diff --git a/server/src/test/resources/databag/8cdcdcad-399b-4bed-8cb2-29c486b6b124.json b/server/src/test/resources/databag/8cdcdcad-399b-4bed-8cb2-29c486b6b124.json
index 007f0c29..be802a8f 100644
--- a/server/src/test/resources/databag/8cdcdcad-399b-4bed-8cb2-29c486b6b124.json
+++ b/server/src/test/resources/databag/8cdcdcad-399b-4bed-8cb2-29c486b6b124.json
@@ -1,6 +1,6 @@
{
"title": "IMOS - National Reef Monitoring Network Sub-Facility",
- "description": "The National Reef Monitoring Network brings together shallow reef surveys conducted around Australia into a centralised database. The IMOS National Reef Monitoring Network sub-Facility collates, cleans, stores and makes this data rapidly available from contributors including: Reef Life Survey, Parks Australia, Department of Biodiversity, Conservation and Attractions (Western Australia), Department of Environment, Water and Natural Resources (South Australia), Department of Primary Industries (New South Wales), Tasmanian Parks and Wildlife Service and Parks Victoria. The data provided by the National Reef Monitoring Network contributes to establishing and supporting national marine baselines, and assisting with the management of Commonwealth and State marine reserves. Reef Life Survey (RLS) and the Australian Temperate Reef Network (ATRC) aims to improve biodiversity conservation and the sustainable management of marine resources by coordinating surveys of rocky and coral reefs using scientific methods, with the ultimate goal to improve coastal stewardship. Our activities depend on the skills of marine scientists, experienced and motivated recreational SCUBA divers, partnerships with management agencies and university researchers, and active input from the ATRC partners and RLS Advisory Committee RLS and ATRC data are freely available to the public for non-profit purposes, so not only managers, but also groups such as local dive clubs or schools may use these data to look at changes over time in their own local reefs. By making data freely available and through public outputs, RLS and ATRC aims to raise broader community awareness of the status of Australia’s marine biodiversity and associated conservation issues.",
+ "description": "No value is fine, this case is make sure short form of National Reef Monitoring Network not appear here",
"extent": {
"bbox": [
[
@@ -40,7 +40,7 @@
},
"creation": "2021-05-21T12:00:00",
"revision": "2024-07-04T03:50:26",
- "ai:description": "The National Reef Monitoring Network brings together shallow reef surveys conducted around Australia into a centralised database. The IMOS National Reef Monitoring Network sub-Facility collates, cleans, stores and makes this data rapidly available from contributors including: - Reef Life Surve - Parks Australi - Department of Biodiversity, Conservation and Attractions (Western Australia) - Department of Environment, Water and Natural Resources (South Australia) - Department of Primary Industries (New South Wales) - Tasmanian Parks and Wildlife Service - Parks Victoria. The data provided by the National Reef Monitoring Network contributes to establishing and supporting national marine baselines, and assisting with the management of Commonwealth and State marine reserves. Reef Life Survey (RLS) and the Australian Temperate Reef Network (ATRC) aims to improve biodiversity conservation and the sustainable management of marine resources by coordinating surveys of rocky and coral reefs using scientific methods, with the ultimate goal to improve coastal stewardship. Our activities depend on the skills of marine scientists, experienced and motivated recreational SCUBA divers, partnerships with management agencies and university researchers, and active input from the ATRC partners and RLS Advisory Committee. RLS and ATRC data are freely available to the public for non-profit purposes, so not only managers, but also groups such as local dive clubs or schools may use these data to look at changes over time in their own local reefs. By making data freely available and through public outputs, RLS and ATRC aims to raise broader community awareness of the status of Australia’s marine biodiversity and associated conservation issues.",
+ "ai:description": "No value is fine",
"dataset_provider": "IMOS",
"update_frequency": "other",
"proj:geometry": {
@@ -11300,8 +11300,7 @@
"href": "uuid:0f65b7ae-1f6f-4a55-b804-1c991f791e1a",
"rel": "sibling",
"type": "application/json",
- "title": "{\"title\":\"IMOS - Autonomous Underwater Vehicles - AUV Iver",
- "recordAbstract": "The IMOS Autonomous Underwater Vehicles Facility has an Autonomous Underwater Vehicle (AUV) called Iver (IMOS platform code:IVER), capable of undertaking high resolution geo-referenced survey work. This platform is a modified Ocean Server Iver2 AUV that is hand deployable off RHIBs and other small vessels in addition to being deployable off larger vessels.\n\nThis AUV has been modified for benthic imaging, including the addition of USBL and DVL for more accurate navigation and the addition of high resolution stereo cameras and strobes. The submersible is equipped with a suite of oceanographic sensors including high resolution stereo cameras (6MP each), depth sensor, Doppler Velocity Log (DVL), Compass and Ultra Short Baseline (USBL). The vehicle is controlled by an on-board PC stack which is used to log sensor information and run the vehicle's control algorithms.\n\nThe vehicle has demonstrated its capacity to collect high resolution, near bottom imagery on trajectories over smooth terrain that has been used to generate 3D meshes and ortho-mosaics.\"}"
+ "title": "Any works"
}
],
"license": "Creative Commons Attribution 4.0 International License",
@@ -11793,7 +11792,7 @@
"National Reef Monitoring Network Sub-Facility, Integrated Marine Observing System (IMOS)"
]
},
- "sci:citation": "{\"suggestedCitation\":\"The citation in a list of references is: \\\"Reef Life Survey (RLS); Institute for Marine and Antarctic Studies (IMAS); Parks Victoria; Department of Primary Industries (DPI), New South Wales Government; Parks and Wildlife Tasmania; Department for Environment and Water (DEWNR), South Australia, Integrated Marine Observing System (IMOS) [year-of-data-download], National Reef Monitoring Network Sub-Facility, [data-access-URL], accessed [date-of-access].\\\"\",\"useLimitations\":[\"Data, products and services from IMOS are provided \\\"as is\\\" without any warranty as to fitness for a particular purpose.\"],\"otherConstraints\":null}",
+ "sci:citation": "Not importand for testing",
"type": "Collection",
"stac_version": "1.0.0",
"stac_extensions": [
diff --git a/server/src/test/resources/portal_records_index_schema.json b/server/src/test/resources/portal_records_index_schema.json
index 62c4e28f..9d851676 100644
--- a/server/src/test/resources/portal_records_index_schema.json
+++ b/server/src/test/resources/portal_records_index_schema.json
@@ -202,6 +202,9 @@
},
"summaries": {
"properties": {
+ "ai:description": {
+ "type": "text"
+ },
"score": {
"type": "long"
},
@@ -253,9 +256,6 @@
},
"statement": {
"type": "text"
- },
- "ai:description": {
- "type": "text"
}
}
},