-
Notifications
You must be signed in to change notification settings - Fork 52
Expand file tree
/
Copy pathGtfsPlusController.java
More file actions
288 lines (258 loc) · 13.2 KB
/
GtfsPlusController.java
File metadata and controls
288 lines (258 loc) · 13.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
package com.conveyal.datatools.manager.controllers.api;
import com.conveyal.datatools.common.utils.SparkUtils;
import com.conveyal.datatools.manager.DataManager;
import com.conveyal.datatools.manager.auth.Auth0UserProfile;
import com.conveyal.datatools.manager.gtfsplus.GtfsPlusValidation;
import com.conveyal.datatools.manager.jobs.ProcessSingleFeedJob;
import com.conveyal.datatools.manager.models.FeedVersion;
import com.conveyal.datatools.manager.persistence.FeedStore;
import com.conveyal.datatools.manager.persistence.Persistence;
import com.conveyal.datatools.manager.utils.JobUtils;
import com.conveyal.datatools.manager.utils.json.JsonUtil;
import com.fasterxml.jackson.databind.JsonNode;
import org.eclipse.jetty.http.HttpStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import spark.Request;
import spark.Response;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Set;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import java.util.zip.ZipOutputStream;
import static com.conveyal.datatools.common.utils.SparkUtils.formatJobMessage;
import static com.conveyal.datatools.common.utils.SparkUtils.copyRequestStreamIntoFile;
import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static com.conveyal.datatools.manager.models.FeedRetrievalMethod.PRODUCED_IN_HOUSE_GTFS_PLUS;
import static spark.Spark.delete;
import static spark.Spark.get;
import static spark.Spark.post;
/**
* This handles the GTFS+ specific HTTP endpoints, which allow for validating GTFS+ tables,
* downloading GTFS+ files to a client for editing (for example), and uploading/publishing a GTFS+ zip as
* (for example, one that has been edited) as a new feed version. Here is the workflow in sequence:
*
* 1. User uploads feed version (with or without GTFS+ tables).
* 2. User views validation to determine if errors need amending.
* 3. User makes edits (in client) and uploads modified GTFS+.
* 4. Once user is satisfied with edits. User publishes as new feed version.
*
* Created by demory on 4/13/16.
*/
public class GtfsPlusController {
public static final Logger LOG = LoggerFactory.getLogger(GtfsPlusController.class);
private static final FeedStore gtfsPlusStore = new FeedStore(DataManager.GTFS_PLUS_SUBDIR);
/**
* Upload a GTFS+ file based on a specific feed version and replace (or create)
* the file in the GTFS+ specific feed store.
*/
private static Boolean uploadGtfsPlusFile (Request req, Response res) {
String feedVersionId = req.params("versionid");
File newGtfsFile = gtfsPlusStore.getFeedFile(feedVersionId);
copyRequestStreamIntoFile(req, newGtfsFile);
return true;
}
/**
* Download a GTFS+ file for a specific feed version. If no edited GTFS+ file
* has been uploaded for the feed version, the original feed version will be returned.
*/
private static HttpServletResponse getGtfsPlusFile(Request req, Response res) {
String feedVersionId = req.params("versionid");
LOG.info("Downloading GTFS+ file for FeedVersion {}", feedVersionId);
// check for saved
File file = gtfsPlusStore.getFeed(feedVersionId);
if (file == null) {
return getGtfsPlusFromGtfs(feedVersionId, req, res);
}
LOG.info("Returning updated GTFS+ data");
return SparkUtils.downloadFile(file, file.getName() + ".zip", req, res);
}
/**
* Download only the GTFS+ tables in a zip for a specific feed version.
*/
private static HttpServletResponse getGtfsPlusFromGtfs(String feedVersionId, Request req, Response res) {
LOG.info("Extracting GTFS+ data from main GTFS feed");
FeedVersion version = Persistence.feedVersions.getById(feedVersionId);
File gtfsPlusFile = null;
// create a set of valid GTFS+ table names
Set<String> gtfsPlusTables = new HashSet<>();
for (int i = 0; i < DataManager.gtfsPlusConfig.size(); i++) {
JsonNode tableNode = DataManager.gtfsPlusConfig.get(i);
gtfsPlusTables.add(tableNode.get("name").asText());
}
try {
// create a new zip file to only contain the GTFS+ tables
gtfsPlusFile = File.createTempFile(version.id + "_gtfsplus", ".zip");
try (
ZipOutputStream zos = new ZipOutputStream(new FileOutputStream(gtfsPlusFile));
ZipFile gtfsFile = new ZipFile(version.retrieveGtfsFile())
) {
// iterate through the existing GTFS file, copying any GTFS+ tables
final Enumeration<? extends ZipEntry> entries = gtfsFile.entries();
byte[] buffer = new byte[512];
while (entries.hasMoreElements()) {
final ZipEntry entry = entries.nextElement();
if (!gtfsPlusTables.contains(entry.getName())) continue;
// create a new empty ZipEntry and copy the contents
ZipEntry newEntry = new ZipEntry(entry.getName());
zos.putNextEntry(newEntry);
try (InputStream in = gtfsFile.getInputStream(entry)) {
while (0 < in.available()) {
int read = in.read(buffer);
zos.write(buffer, 0, read);
}
}
zos.closeEntry();
}
}
} catch (IOException e) {
logMessageAndHalt(req, 500, "An error occurred while trying to create a gtfs file", e);
}
return SparkUtils.downloadFile(gtfsPlusFile, gtfsPlusFile.getName() + ".zip", req, res);
}
/** HTTP endpoint used to return the last modified timestamp for a GTFS+ feed. Essentially this is used as a way to
* determine whether any GTFS+ edits have been made to
*/
private static Long getGtfsPlusFileTimestamp(Request req, Response res) {
String feedVersionId = req.params("versionid");
// check for saved GTFS+ data
File file = gtfsPlusStore.getFeed(feedVersionId);
if (file == null) {
FeedVersion feedVersion = Persistence.feedVersions.getById(feedVersionId);
if (feedVersion == null) {
logMessageAndHalt(req, 400, "Feed version ID is not valid");
return null;
}
return feedVersion.fileTimestamp;
} else {
return file.lastModified();
}
}
/**
* Publishes the edited/saved GTFS+ file as a new feed version for the feed source.
* This is the final stage in the GTFS+ validation/editing workflow described in the
* class's javadoc.
*/
private static String publishGtfsPlusFile(Request req, Response res) {
Auth0UserProfile profile = req.attribute("user");
String feedVersionId = req.params("versionid");
LOG.info("Publishing GTFS+ for {}", feedVersionId);
File plusFile = gtfsPlusStore.getFeed(feedVersionId);
if (plusFile == null || !plusFile.exists()) {
logMessageAndHalt(req, 400, "No saved GTFS+ data for version");
}
FeedVersion feedVersion = Persistence.feedVersions.getById(feedVersionId);
// create a set of valid GTFS+ table names
Set<String> gtfsPlusTables = new HashSet<>();
for (int i = 0; i < DataManager.gtfsPlusConfig.size(); i++) {
JsonNode tableNode = DataManager.gtfsPlusConfig.get(i);
gtfsPlusTables.add(tableNode.get("name").asText());
}
File newFeed = null;
try {
// First, create a new zip file to only contain the GTFS+ tables
newFeed = File.createTempFile(feedVersionId + "_new", ".zip");
try (
ZipOutputStream zos = new ZipOutputStream(new FileOutputStream(newFeed));
ZipFile gtfsFile = new ZipFile(feedVersion.retrieveGtfsFile())
) {
// Next, iterate through the existing GTFS file, copying all non-GTFS+ tables.
final Enumeration<? extends ZipEntry> entries = gtfsFile.entries();
byte[] buffer = new byte[512];
while (entries.hasMoreElements()) {
final ZipEntry entry = entries.nextElement();
// skip GTFS+ and non-standard tables
if (gtfsPlusTables.contains(entry.getName()) || entry.getName().startsWith("_")) continue;
// create a new empty ZipEntry and copy the contents
ZipEntry newEntry = new ZipEntry(entry.getName());
zos.putNextEntry(newEntry);
try (InputStream in = gtfsFile.getInputStream(entry)) {
while (0 < in.available()) {
int read = in.read(buffer);
zos.write(buffer, 0, read);
}
}
zos.closeEntry();
}
// iterate through the GTFS+ file, copying all entries
try (ZipFile plusZipFile = new ZipFile(plusFile)) {
final Enumeration<? extends ZipEntry> plusEntries = plusZipFile.entries();
while (plusEntries.hasMoreElements()) {
final ZipEntry entry = plusEntries.nextElement();
ZipEntry newEntry = new ZipEntry(entry.getName());
zos.putNextEntry(newEntry);
try (InputStream in = plusZipFile.getInputStream(entry)) {
while (0 < in.available()) {
int read = in.read(buffer);
zos.write(buffer, 0, read);
}
}
zos.closeEntry();
}
}
}
} catch (IOException e) {
logMessageAndHalt(req, 500, "Error creating combined GTFS/GTFS+ file", e);
}
// Create a new feed version to represent the published GTFS+.
FeedVersion newFeedVersion = new FeedVersion(feedVersion.parentFeedSource(), PRODUCED_IN_HOUSE_GTFS_PLUS);
File newGtfsFile = null;
try (FileInputStream fis = new FileInputStream(newFeed)) {
newGtfsFile = newFeedVersion.newGtfsFile(fis);
} catch (IOException e) {
e.printStackTrace();
logMessageAndHalt(req, 500, "Error reading GTFS file input stream", e);
}
if (newGtfsFile == null) {
logMessageAndHalt(req, 500, "GTFS input file must not be null");
return null;
}
newFeedVersion.originNamespace = feedVersion.namespace;
// Must be handled by executor because it takes a long time.
ProcessSingleFeedJob processSingleFeedJob = new ProcessSingleFeedJob(newFeedVersion, profile, true);
JobUtils.heavyExecutor.execute(processSingleFeedJob);
return formatJobMessage(processSingleFeedJob.jobId, "Feed version is processing.");
}
/**
* HTTP endpoint that validates GTFS+ tables for a specific feed version (or its saved/edited GTFS+). If the feed
* version already has GTFS+ validation results, those will be returned instead of re-validating.
*/
private static GtfsPlusValidation getGtfsPlusValidation(Request req, Response res) {
try {
String feedVersionId = req.params("versionid");
return GtfsPlusValidation.validate(feedVersionId);
} catch(Exception e) {
logMessageAndHalt(req, 500, "Could not read GTFS+ zip file", e);
}
return null;
}
/**
* HTTP endpoint to delete the GTFS+ specific edits made for a feed version. In other words, this will revert to
* referencing the original GTFS+ files for a feed version. Note: this will not delete the feed version itself.
*/
private static String deleteGtfsPlusFile(Request req, Response res) {
String feedVersionId = req.params("versionid");
File file = gtfsPlusStore.getFeed(feedVersionId);
if (file == null) {
logMessageAndHalt(req, HttpStatus.NOT_FOUND_404, "No GTFS+ file found for feed version");
return null;
}
file.delete();
return SparkUtils.formatJSON("message", "GTFS+ edits deleted successfully.");
}
public static void register(String apiPrefix) {
post(apiPrefix + "secure/gtfsplus/:versionid", GtfsPlusController::uploadGtfsPlusFile, JsonUtil.objectMapper::writeValueAsString);
get(apiPrefix + "secure/gtfsplus/:versionid", GtfsPlusController::getGtfsPlusFile);
delete(apiPrefix + "secure/gtfsplus/:versionid", GtfsPlusController::deleteGtfsPlusFile);
get(apiPrefix + "secure/gtfsplus/:versionid/timestamp", GtfsPlusController::getGtfsPlusFileTimestamp, JsonUtil.objectMapper::writeValueAsString);
get(apiPrefix + "secure/gtfsplus/:versionid/validation", GtfsPlusController::getGtfsPlusValidation, JsonUtil.objectMapper::writeValueAsString);
post(apiPrefix + "secure/gtfsplus/:versionid/publish", GtfsPlusController::publishGtfsPlusFile, JsonUtil.objectMapper::writeValueAsString);
}
}