Skip to content

Commit 4e2addf

Browse files
committed
Use stdout for datapoint exports
1 parent 6e9bad2 commit 4e2addf

3 files changed

Lines changed: 111 additions & 109 deletions

File tree

manager/src/main/java/org/openremote/manager/datapoint/AssetDatapointResourceImpl.java

Lines changed: 16 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -46,11 +46,10 @@
4646
import org.openremote.model.http.RequestParams;
4747
import org.openremote.model.security.ClientRole;
4848
import org.openremote.model.syslog.SyslogCategory;
49+
import org.openremote.model.util.UniqueIdentifierGenerator;
4950
import org.openremote.model.value.MetaItemType;
5051

51-
import java.io.File;
52-
import java.io.FileInputStream;
53-
import java.util.concurrent.ScheduledFuture;
52+
import java.io.*;
5453
import java.util.logging.Level;
5554
import java.util.logging.Logger;
5655
import java.util.zip.ZipEntry;
@@ -206,19 +205,23 @@ public void getDatapointExport(AsyncResponse asyncResponse, String attributeRefs
206205

207206
DATA_EXPORT_LOG.info("User '" + getUsername() + "' started data export for " + attributeRefsString + " from " + fromTimestamp + " to " + toTimestamp + " in format " + format);
208207

209-
ScheduledFuture<File> exportFuture = assetDatapointService.exportDatapoints(attributeRefs, fromTimestamp, toTimestamp, format);
208+
PipedInputStream pipedInputStream = assetDatapointService.exportDatapoints(attributeRefs, fromTimestamp, toTimestamp, format);
210209

211-
asyncResponse.register((ConnectionCallback) disconnected -> exportFuture.cancel(true));
212-
213-
File exportFile = null;
210+
asyncResponse.register((ConnectionCallback) disconnected -> {
211+
try {
212+
pipedInputStream.close();
213+
} catch (IOException e) {
214+
throw new RuntimeException(e);
215+
}
216+
});
214217

215218
try {
216-
exportFile = exportFuture.get();
217219

218-
try (FileInputStream fin = new FileInputStream(exportFile);
220+
try (InputStream fin = pipedInputStream;
219221
ZipOutputStream zipOut = new ZipOutputStream(response.getOutputStream())) {
220222

221-
ZipEntry zipEntry = new ZipEntry(exportFile.getName());
223+
String fileName = UniqueIdentifierGenerator.generateId() + ".csv";
224+
ZipEntry zipEntry = new ZipEntry(fileName);
222225
zipOut.putNextEntry(zipEntry);
223226
IOUtils.copy(fin, zipOut);
224227
zipOut.closeEntry();
@@ -231,22 +234,16 @@ public void getDatapointExport(AsyncResponse asyncResponse, String attributeRefs
231234
response
232235
);
233236
} catch (Exception ex) {
234-
exportFuture.cancel(true);
237+
pipedInputStream.close();
235238
asyncResponse.resume(new WebApplicationException(Response.Status.INTERNAL_SERVER_ERROR));
236239
DATA_EXPORT_LOG.log(Level.SEVERE, "Exception in ScheduledFuture: ", ex);
237-
} finally {
238-
if (exportFile != null && exportFile.exists()) {
239-
try {
240-
exportFile.delete();
241-
} catch (Exception e) {
242-
DATA_EXPORT_LOG.log(Level.SEVERE, "Failed to delete temporary export file: " + exportFile.getPath(), e);
243-
}
244-
}
245240
}
246241
} catch (JsonProcessingException ex) {
247242
asyncResponse.resume(new BadRequestException(ex));
248243
} catch (DatapointQueryTooLargeException dqex) {
249244
asyncResponse.resume(new WebApplicationException(dqex, Response.Status.REQUEST_ENTITY_TOO_LARGE));
245+
} catch (IOException e) {
246+
asyncResponse.resume(new WebApplicationException(Response.Status.INTERNAL_SERVER_ERROR));
250247
}
251248
}
252249
}

manager/src/main/java/org/openremote/manager/datapoint/AssetDatapointService.java

Lines changed: 88 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
package org.openremote.manager.datapoint;
22

3+
import org.hibernate.Session;
34
import org.openremote.agent.protocol.ProtocolDatapointService;
45
import org.openremote.container.timer.TimerService;
56
import org.openremote.manager.asset.OutdatedAttributeEvent;
67
import org.openremote.model.datapoint.DatapointExportFormat;
78
import org.openremote.model.datapoint.DatapointQueryTooLargeException;
8-
import org.openremote.model.util.UniqueIdentifierGenerator;
99
import org.openremote.manager.asset.AssetProcessingException;
1010
import org.openremote.manager.asset.AssetStorageService;
1111
import org.openremote.manager.event.ClientEventService;
@@ -24,16 +24,17 @@
2424
import org.openremote.model.util.Pair;
2525
import org.openremote.model.value.MetaHolder;
2626
import org.openremote.model.value.MetaItemType;
27+
import org.postgresql.PGConnection;
28+
import org.postgresql.copy.CopyManager;
2729

28-
import java.io.File;
30+
import java.io.*;
2931
import java.nio.file.Files;
3032
import java.nio.file.Path;
3133
import java.sql.Date;
3234
import java.time.*;
3335
import java.util.Arrays;
3436
import java.util.List;
3537
import java.util.Map;
36-
import java.util.concurrent.ScheduledFuture;
3738
import java.util.concurrent.TimeUnit;
3839
import java.util.logging.Level;
3940
import java.util.logging.Logger;
@@ -253,25 +254,25 @@ protected String buildWhereClause(List<Pair<String, Attribute<?>>> attributes, b
253254
* container so it can be accessed by this process.
254255
* Backwards compatible overload with default format.
255256
*/
256-
public ScheduledFuture<File> exportDatapoints(AttributeRef[] attributeRefs,
257+
public PipedInputStream exportDatapoints(AttributeRef[] attributeRefs,
257258
long fromTimestamp,
258-
long toTimestamp) {
259+
long toTimestamp) throws IOException {
259260
return exportDatapoints(attributeRefs, fromTimestamp, toTimestamp, DatapointExportFormat.CSV);
260261
}
261262

262263
/**
263264
* Exports datapoints as CSV using SQL; the export path used in the SQL query must also be mapped into the manager
264265
* container so it can be accessed by this process.
265266
*/
266-
public ScheduledFuture<File> exportDatapoints(AttributeRef[] attributeRefs,
267+
public PipedInputStream exportDatapoints(AttributeRef[] attributeRefs,
267268
long fromTimestamp,
268269
long toTimestamp,
269-
DatapointExportFormat format) {
270+
DatapointExportFormat format) throws IOException {
270271
try {
271272
String query = getSelectExportQuery(attributeRefs, fromTimestamp, toTimestamp);
272273

273274
// Verify the query is 'legal' and can be executed
274-
if(canQueryDatapoints(query, null, datapointExportLimit)) {
275+
if (canQueryDatapoints(query, null, datapointExportLimit)) {
275276
return doExportDatapoints(attributeRefs, fromTimestamp, toTimestamp, format);
276277
}
277278
throw new RuntimeException("Could not export datapoints.");
@@ -283,76 +284,89 @@ public ScheduledFuture<File> exportDatapoints(AttributeRef[] attributeRefs,
283284
}
284285
}
285286

286-
protected ScheduledFuture<File> doExportDatapoints(AttributeRef[] attributeRefs,
287+
protected PipedInputStream doExportDatapoints(AttributeRef[] attributeRefs,
287288
long fromTimestamp,
288289
long toTimestamp,
289-
DatapointExportFormat format) {
290-
291-
return scheduledExecutorService.schedule(() -> {
292-
String fileName = UniqueIdentifierGenerator.generateId() + ".csv";
293-
if (format == DatapointExportFormat.CSV_CROSSTAB) {
294-
String attributeFilter = getAttributeFilter(attributeRefs);
295-
StringBuilder sb = new StringBuilder(String.format(
296-
"copy (select * from crosstab( " +
297-
"'select ad.timestamp, a.name || '' \\: '' || ad.attribute_name as header, ad.value " +
298-
"from asset_datapoint ad " +
299-
"join asset a on ad.entity_id = a.id " +
300-
"where ad.timestamp >= to_timestamp(%d) and ad.timestamp <= to_timestamp(%d) and (%s) " +
301-
"order by ad.timestamp, header', " +
302-
"'select distinct a.name || '' \\: '' || ad.attribute_name as header " +
303-
"from asset_datapoint ad " +
304-
"join asset a on ad.entity_id = a.id " +
305-
"where %s " +
306-
"order by header') " +
307-
"as ct(timestamp timestamp, %s) " +
308-
") to '/storage/" + EXPORT_STORAGE_DIR_NAME + "/" + fileName + "' delimiter ',' CSV HEADER;",
309-
fromTimestamp / 1000, toTimestamp / 1000, attributeFilter, attributeFilter, getAttributeColumns(attributeRefs)
310-
));
311-
persistenceService.doTransaction(em -> em.createNativeQuery(sb.toString()).executeUpdate());
312-
} else if (format == DatapointExportFormat.CSV_CROSSTAB_MINUTE) {
313-
String attributeFilter = getAttributeFilter(attributeRefs);
314-
StringBuilder sb = new StringBuilder(String.format(
315-
"copy (select * from crosstab( " +
316-
"'select public.time_bucket(''%s'', ad.timestamp) as bucket_timestamp, " +
317-
"a.name || '' \\: '' || ad.attribute_name as header, " +
318-
"CASE " +
319-
" WHEN jsonb_typeof((array_agg(ad.value))[1]) = ''number'' THEN " +
320-
" round(avg((ad.value#>>''{}'')::numeric) FILTER (WHERE jsonb_typeof(ad.value) = ''number''), 3)::text " +
321-
" ELSE (array_agg(ad.value ORDER BY ad.timestamp DESC) FILTER (WHERE jsonb_typeof(ad.value) != ''number''))[1]#>>''{}''" +
322-
"END as value " +
323-
"from asset_datapoint ad " +
324-
"join asset a on ad.entity_id = a.id " +
325-
"where ad.timestamp >= to_timestamp(%d) and ad.timestamp <= to_timestamp(%d) and (%s) " +
326-
"group by bucket_timestamp, header " +
327-
"order by bucket_timestamp, header', " +
328-
"'select distinct a.name || '' \\: '' || ad.attribute_name as header " +
329-
"from asset_datapoint ad " +
330-
"join asset a on ad.entity_id = a.id " +
331-
"where %s " +
332-
"order by header') " +
333-
"as ct(timestamp timestamp, %s) " +
334-
") to '/storage/" + EXPORT_STORAGE_DIR_NAME + "/" + fileName + "' delimiter ',' CSV HEADER;",
335-
"1 minute", fromTimestamp / 1000, toTimestamp / 1000, attributeFilter, attributeFilter, getAttributeColumns(attributeRefs)
336-
));
337-
338-
persistenceService.doTransaction(em -> em.createNativeQuery(sb.toString()).executeUpdate());
339-
}
340-
else {
341-
StringBuilder sb = new StringBuilder("copy (")
342-
.append(getSelectExportQuery(attributeRefs, fromTimestamp, toTimestamp))
343-
.append(") to '/storage/")
344-
.append(EXPORT_STORAGE_DIR_NAME)
345-
.append("/")
346-
.append(fileName)
347-
.append("' delimiter ',' CSV HEADER;");
348-
persistenceService.doTransaction(em -> em.createNativeQuery(sb.toString()).executeUpdate());
349-
}
350-
290+
DatapointExportFormat format) throws IOException {
291+
// Increase buffer size (default is only 1 KB)
292+
PipedInputStream in = new PipedInputStream(1024 * 1024 * 4); // 4 MB
293+
PipedOutputStream out = new PipedOutputStream(in);
294+
295+
StringBuilder sb;
296+
297+
final String TO_STDOUT_WITH_CSV_FORMAT = ") TO STDOUT WITH (FORMAT CSV, HEADER, DELIMITER ',');";
298+
299+
if (format == DatapointExportFormat.CSV_CROSSTAB) {
300+
String attributeFilter = getAttributeFilter(attributeRefs);
301+
sb = new StringBuilder(String.format(
302+
"copy (select * from crosstab( " +
303+
"'select ad.timestamp, a.name || '' \\: '' || ad.attribute_name as header, ad.value " +
304+
"from asset_datapoint ad " +
305+
"join asset a on ad.entity_id = a.id " +
306+
"where ad.timestamp >= to_timestamp(%d) and ad.timestamp <= to_timestamp(%d) and (%s) " +
307+
"order by ad.timestamp, header', " +
308+
"'select distinct a.name || '' \\: '' || ad.attribute_name as header " +
309+
"from asset_datapoint ad " +
310+
"join asset a on ad.entity_id = a.id " +
311+
"where %s " +
312+
"order by header') " +
313+
"as ct(timestamp timestamp, %s) " +
314+
TO_STDOUT_WITH_CSV_FORMAT,
315+
fromTimestamp / 1000, toTimestamp / 1000, attributeFilter, attributeFilter, getAttributeColumns(attributeRefs)
316+
));
317+
} else if (format == DatapointExportFormat.CSV_CROSSTAB_MINUTE) {
318+
String attributeFilter = getAttributeFilter(attributeRefs);
319+
sb = new StringBuilder(String.format(
320+
"copy (select * from crosstab( " +
321+
"'select public.time_bucket(''%s'', ad.timestamp) as bucket_timestamp, " +
322+
"a.name || '' \\: '' || ad.attribute_name as header, " +
323+
"CASE " +
324+
" WHEN jsonb_typeof((array_agg(ad.value))[1]) = ''number'' THEN " +
325+
" round(avg((ad.value#>>''{}'')::numeric) FILTER (WHERE jsonb_typeof(ad.value) = ''number''), 3)::text " +
326+
" ELSE (array_agg(ad.value ORDER BY ad.timestamp DESC) FILTER (WHERE jsonb_typeof(ad.value) != ''number''))[1]#>>''{}''" +
327+
"END as value " +
328+
"from asset_datapoint ad " +
329+
"join asset a on ad.entity_id = a.id " +
330+
"where ad.timestamp >= to_timestamp(%d) and ad.timestamp <= to_timestamp(%d) and (%s) " +
331+
"group by bucket_timestamp, header " +
332+
"order by bucket_timestamp, header', " +
333+
"'select distinct a.name || '' \\: '' || ad.attribute_name as header " +
334+
"from asset_datapoint ad " +
335+
"join asset a on ad.entity_id = a.id " +
336+
"where %s " +
337+
"order by header') " +
338+
"as ct(timestamp timestamp, %s) " +
339+
TO_STDOUT_WITH_CSV_FORMAT,
340+
"1 minute", fromTimestamp / 1000, toTimestamp / 1000, attributeFilter, attributeFilter, getAttributeColumns(attributeRefs)
341+
));
342+
} else {
343+
sb = new StringBuilder("copy (")
344+
.append(getSelectExportQuery(attributeRefs, fromTimestamp, toTimestamp))
345+
.append(TO_STDOUT_WITH_CSV_FORMAT);
346+
}
351347

348+
scheduledExecutorService.schedule(() -> persistenceService.doTransaction(em -> {
349+
Session session = em.unwrap(Session.class);
350+
session.doWork(connection -> {
351+
PGConnection pgConnection = connection.unwrap(PGConnection.class);
352+
CopyManager copyManager = pgConnection.getCopyAPI();
353+
try {
354+
copyManager.copyOut(sb.toString(), out);
355+
out.flush();
356+
out.close();
357+
} catch (IOException e) {
358+
// Either database connection or output stream failure
359+
getLogger().log(Level.WARNING, "Datapoint export failed", e);
360+
try {
361+
in.close();
362+
} catch (IOException ignored) {
363+
getLogger().log(Level.SEVERE, "Failed to close piped input stream", e);
364+
}
365+
}
366+
});
367+
}), 0, TimeUnit.MILLISECONDS);
352368

353-
// The same path must resolve in both the postgresql container and the manager container
354-
return exportPath.resolve(fileName).toFile();
355-
}, 0, TimeUnit.MILLISECONDS);
369+
return in;
356370
}
357371

358372
/**

0 commit comments

Comments
 (0)