diff --git a/afs-cassandra/src/test/java/com/powsybl/afs/cassandra/CassandraAppStorageTest.java b/afs-cassandra/src/test/java/com/powsybl/afs/cassandra/CassandraAppStorageTest.java index e2452d98..748c7ce0 100644 --- a/afs-cassandra/src/test/java/com/powsybl/afs/cassandra/CassandraAppStorageTest.java +++ b/afs-cassandra/src/test/java/com/powsybl/afs/cassandra/CassandraAppStorageTest.java @@ -22,7 +22,7 @@ public class CassandraAppStorageTest extends AbstractAppStorageTest { public CassandraCQLUnit cassandraCQLUnit = new CassandraCQLUnit(new ClassPathCQLDataSet("afs.cql", CassandraConstants.AFS_KEYSPACE), null, 20000L); @Override - protected AppStorage createStorage() { + public AppStorage createStorage() { return new CassandraAppStorage("test", () -> new CassandraTestContext(cassandraCQLUnit), new CassandraAppStorageConfig(), new InMemoryEventsBus()); } diff --git a/afs-mapdb/src/main/java/com/powsybl/afs/mapdb/MapDbAppFileSystemConfig.java b/afs-mapdb/src/main/java/com/powsybl/afs/mapdb/MapDbAppFileSystemConfig.java index 8df06574..4df922c4 100644 --- a/afs-mapdb/src/main/java/com/powsybl/afs/mapdb/MapDbAppFileSystemConfig.java +++ b/afs-mapdb/src/main/java/com/powsybl/afs/mapdb/MapDbAppFileSystemConfig.java @@ -8,6 +8,7 @@ import com.powsybl.afs.AfsException; import com.powsybl.afs.storage.AbstractAppFileSystemConfig; +import com.powsybl.commons.config.ModuleConfig; import com.powsybl.commons.config.PlatformConfig; import java.nio.file.Files; @@ -31,30 +32,32 @@ public static List load() { public static List load(PlatformConfig platformConfig) { return platformConfig.getOptionalModuleConfig("mapdb-app-file-system") - .map(moduleConfig -> { - List configs = new ArrayList<>(); - if (moduleConfig.hasProperty("drive-name") - && moduleConfig.hasProperty("db-file")) { - String driveName = moduleConfig.getStringProperty("drive-name"); - boolean remotelyAccessible = moduleConfig.getBooleanProperty("remotely-accessible", DEFAULT_REMOTELY_ACCESSIBLE); - Path rootDir = moduleConfig.getPathProperty("db-file"); - configs.add(new MapDbAppFileSystemConfig(driveName, remotelyAccessible, rootDir)); - } - int maxAdditionalDriveCount = moduleConfig.getIntProperty("max-additional-drive-count", 0); - for (int i = 0; i < maxAdditionalDriveCount; i++) { - if (moduleConfig.hasProperty("drive-name-" + i) - && moduleConfig.hasProperty("db-file-" + i)) { - String driveName = moduleConfig.getStringProperty("drive-name-" + i); - boolean remotelyAccessible = moduleConfig.getBooleanProperty("remotely-accessible-" + i, DEFAULT_REMOTELY_ACCESSIBLE); - Path rootDir = moduleConfig.getPathProperty("db-file-" + i); - configs.add(new MapDbAppFileSystemConfig(driveName, remotelyAccessible, rootDir)); - } - } - return configs; - }) + .map(MapDbAppFileSystemConfig::load) .orElse(Collections.emptyList()); } + public static List load(ModuleConfig moduleConfig) { + List configs = new ArrayList<>(); + if (moduleConfig.hasProperty("drive-name") + && moduleConfig.hasProperty("db-file")) { + String driveName = moduleConfig.getStringProperty("drive-name"); + boolean remotelyAccessible = moduleConfig.getBooleanProperty("remotely-accessible", DEFAULT_REMOTELY_ACCESSIBLE); + Path rootDir = moduleConfig.getPathProperty("db-file"); + configs.add(new MapDbAppFileSystemConfig(driveName, remotelyAccessible, rootDir)); + } + int maxAdditionalDriveCount = moduleConfig.getIntProperty("max-additional-drive-count", 0); + for (int i = 0; i < maxAdditionalDriveCount; i++) { + if (moduleConfig.hasProperty("drive-name-" + i) + && moduleConfig.hasProperty("db-file-" + i)) { + String driveName = moduleConfig.getStringProperty("drive-name-" + i); + boolean remotelyAccessible = moduleConfig.getBooleanProperty("remotely-accessible-" + i, DEFAULT_REMOTELY_ACCESSIBLE); + Path rootDir = moduleConfig.getPathProperty("db-file-" + i); + configs.add(new MapDbAppFileSystemConfig(driveName, remotelyAccessible, rootDir)); + } + } + return configs; + } + private static Path checkDbFile(Path dbFile) { Objects.requireNonNull(dbFile); if (Files.isDirectory(dbFile)) { diff --git a/afs-spring-server/src/test/java/com/powsybl/afs/server/StorageServerTest.java b/afs-spring-server/src/test/java/com/powsybl/afs/server/StorageServerTest.java index 5552b84b..f03c6de0 100644 --- a/afs-spring-server/src/test/java/com/powsybl/afs/server/StorageServerTest.java +++ b/afs-spring-server/src/test/java/com/powsybl/afs/server/StorageServerTest.java @@ -79,7 +79,7 @@ private URI getRestUri() { } @Override - protected AppStorage createStorage() { + public AppStorage createStorage() { URI restUri = getRestUri(); return new RemoteAppStorage(FS_TEST_NAME, restUri, ""); } diff --git a/afs-storage-api/src/test/java/com/powsybl/afs/storage/AbstractAppStorageTest.java b/afs-storage-api/src/test/java/com/powsybl/afs/storage/AbstractAppStorageTest.java index bab2dc05..facbc3cd 100644 --- a/afs-storage-api/src/test/java/com/powsybl/afs/storage/AbstractAppStorageTest.java +++ b/afs-storage-api/src/test/java/com/powsybl/afs/storage/AbstractAppStorageTest.java @@ -43,10 +43,23 @@ public abstract class AbstractAppStorageTest { protected AppStorage storage; + protected boolean conservesChunks; + + protected boolean handlesStringTimeSeries; + protected BlockingQueue eventStack; protected AppStorageListener l = eventList -> eventStack.addAll(eventList.getEvents()); + public AbstractAppStorageTest() { + this(true, true); + } + + public AbstractAppStorageTest(boolean conservesChunks, boolean handlesStringTimeSeries) { + this.conservesChunks = conservesChunks; + this.handlesStringTimeSeries = handlesStringTimeSeries; + } + protected abstract AppStorage createStorage(); @Before @@ -385,8 +398,13 @@ public void test() throws IOException, InterruptedException { assertTrue(storage.getTimeSeriesMetadata(testData3Info.getId(), Sets.newHashSet("ts1")).isEmpty()); // 14) add data to double time series - storage.addDoubleTimeSeriesData(testData2Info.getId(), 0, "ts1", Arrays.asList(new UncompressedDoubleDataChunk(2, new double[] {1d, 2d}), - new UncompressedDoubleDataChunk(5, new double[] {3d}))); + if (conservesChunks) { + storage.addDoubleTimeSeriesData(testData2Info.getId(), 0, "ts1", Arrays.asList(new UncompressedDoubleDataChunk(2, new double[] {1d, 2d}), + new UncompressedDoubleDataChunk(5, new double[] {3d}))); + } else { + storage.addDoubleTimeSeriesData(testData2Info.getId(), 0, "ts1", Arrays.asList(new UncompressedDoubleDataChunk(0, new double[] {1d, 2d, 3d}))); + } + storage.flush(); // check event @@ -399,52 +417,58 @@ public void test() throws IOException, InterruptedException { // check double time series data query Map> doubleTimeSeriesData = storage.getDoubleTimeSeriesData(testData2Info.getId(), Sets.newHashSet("ts1"), 0); assertEquals(1, doubleTimeSeriesData.size()); - assertEquals(Arrays.asList(new UncompressedDoubleDataChunk(2, new double[] {1d, 2d}), - new UncompressedDoubleDataChunk(5, new double[] {3d})), - doubleTimeSeriesData.get("ts1")); + if (conservesChunks) { + assertEquals(Arrays.asList(new UncompressedDoubleDataChunk(2, new double[] {1d, 2d}), + new UncompressedDoubleDataChunk(5, new double[] {3d})), + doubleTimeSeriesData.get("ts1")); + } else { + assertEquals(Arrays.asList(new UncompressedDoubleDataChunk(0, new double[]{1d, 2d, 3d})), doubleTimeSeriesData.get("ts1")); + } assertTrue(storage.getDoubleTimeSeriesData(testData3Info.getId(), Sets.newHashSet("ts1"), 0).isEmpty()); // 15) create a second string time series - TimeSeriesMetadata metadata2 = new TimeSeriesMetadata("ts2", - TimeSeriesDataType.STRING, - ImmutableMap.of(), - RegularTimeSeriesIndex.create(Interval.parse("2015-01-01T00:00:00Z/2015-01-01T01:15:00Z"), - Duration.ofMinutes(15))); - storage.createTimeSeries(testData2Info.getId(), metadata2); - storage.flush(); - - // check event - assertEventStack(new TimeSeriesCreated(testData2Info.getId(), "ts2")); - - // check string time series query - assertEquals(Sets.newHashSet("ts1", "ts2"), storage.getTimeSeriesNames(testData2Info.getId())); - metadataList = storage.getTimeSeriesMetadata(testData2Info.getId(), Sets.newHashSet("ts1")); - assertEquals(1, metadataList.size()); - - // 16) add data to double time series - storage.addStringTimeSeriesData(testData2Info.getId(), 0, "ts2", Arrays.asList(new UncompressedStringDataChunk(2, new String[] {"a", "b"}), - new UncompressedStringDataChunk(5, new String[] {"c"}))); - storage.flush(); - - // check event - assertEventStack(new TimeSeriesDataUpdated(testData2Info.getId(), "ts2")); - - // check string time series data query - Map> stringTimeSeriesData = storage.getStringTimeSeriesData(testData2Info.getId(), Sets.newHashSet("ts2"), 0); - assertEquals(1, stringTimeSeriesData.size()); - assertEquals(Arrays.asList(new UncompressedStringDataChunk(2, new String[] {"a", "b"}), - new UncompressedStringDataChunk(5, new String[] {"c"})), - stringTimeSeriesData.get("ts2")); - - // 17) clear time series - storage.clearTimeSeries(testData2Info.getId()); - storage.flush(); - - // check event - assertEventStack(new TimeSeriesCleared(testData2Info.getId())); - - // check there is no more time series - assertTrue(storage.getTimeSeriesNames(testData2Info.getId()).isEmpty()); + if (handlesStringTimeSeries) { + TimeSeriesMetadata metadata2 = new TimeSeriesMetadata("ts2", + TimeSeriesDataType.STRING, + ImmutableMap.of(), + RegularTimeSeriesIndex.create(Interval.parse("2015-01-01T00:00:00Z/2015-01-01T01:15:00Z"), + Duration.ofMinutes(15))); + storage.createTimeSeries(testData2Info.getId(), metadata2); + storage.flush(); + + // check event + assertEventStack(new TimeSeriesCreated(testData2Info.getId(), "ts2")); + + // check string time series query + assertEquals(Sets.newHashSet("ts1", "ts2"), storage.getTimeSeriesNames(testData2Info.getId())); + metadataList = storage.getTimeSeriesMetadata(testData2Info.getId(), Sets.newHashSet("ts1")); + assertEquals(1, metadataList.size()); + + // 16) add data to double time series + storage.addStringTimeSeriesData(testData2Info.getId(), 0, "ts2", Arrays.asList(new UncompressedStringDataChunk(2, new String[] {"a", "b"}), + new UncompressedStringDataChunk(5, new String[] {"c"}))); + storage.flush(); + + // check event + assertEventStack(new TimeSeriesDataUpdated(testData2Info.getId(), "ts2")); + + // check string time series data query + Map> stringTimeSeriesData = storage.getStringTimeSeriesData(testData2Info.getId(), Sets.newHashSet("ts2"), 0); + assertEquals(1, stringTimeSeriesData.size()); + assertEquals(Arrays.asList(new UncompressedStringDataChunk(2, new String[] {"a", "b"}), + new UncompressedStringDataChunk(5, new String[] {"c"})), + stringTimeSeriesData.get("ts2")); + + // 17) clear time series + storage.clearTimeSeries(testData2Info.getId()); + storage.flush(); + + // check event + assertEventStack(new TimeSeriesCleared(testData2Info.getId())); + + // check there is no more time series + assertTrue(storage.getTimeSeriesNames(testData2Info.getId()).isEmpty()); + } // 18) change parent test NodeInfo folder1Info = storage.createNode(rootFolderInfo.getId(), "test1", FOLDER_PSEUDO_CLASS, "", 0, new NodeGenericMetadata()); diff --git a/afs-timeseries-server-storage/pom.xml b/afs-timeseries-server-storage/pom.xml new file mode 100644 index 00000000..685e8299 --- /dev/null +++ b/afs-timeseries-server-storage/pom.xml @@ -0,0 +1,97 @@ + + + + 4.0.0 + + 3.6.0-SNAPSHOT + 2.12.1 + 0.0.1-SNAPSHOT + + + + powsybl-afs + com.powsybl + 3.6.0-SNAPSHOT + + + powsybl-afs-timeseries-server-storage + AFS time series server filesystem storage implementations + An AFS time series provider based on time series server + + + + com.powsybl + powsybl-afs-storage-api + ${project.version} + + + com.powsybl + powsybl-time-series-server-interfaces + ${time-series-server.version} + + + + org.slf4j + slf4j-simple + + + com.fasterxml.jackson.core + jackson-core + ${jackson.version} + + + org.jboss.resteasy + resteasy-jackson-provider + ${resteasy.version} + + + org.apache.commons + commons-lang3 + 3.11 + + + org.jboss.resteasy + resteasy-client + provided + + + + + com.powsybl + powsybl-afs-mapdb-storage + ${project.version} + test + + + junit + junit + test + + + com.powsybl + powsybl-afs-storage-api + 3.6.0-SNAPSHOT + test-jar + test + + + org.mock-server + mockserver-netty + 5.11.2 + test + + + org.assertj + assertj-core + + + \ No newline at end of file diff --git a/afs-timeseries-server-storage/src/main/java/com/powsybl/afs/timeseriesserver/storage/TimeSeriesServerAppStorage.java b/afs-timeseries-server-storage/src/main/java/com/powsybl/afs/timeseriesserver/storage/TimeSeriesServerAppStorage.java new file mode 100644 index 00000000..208ab4e5 --- /dev/null +++ b/afs-timeseries-server-storage/src/main/java/com/powsybl/afs/timeseriesserver/storage/TimeSeriesServerAppStorage.java @@ -0,0 +1,268 @@ +package com.powsybl.afs.timeseriesserver.storage; + +import com.powsybl.afs.storage.*; +import com.powsybl.afs.storage.events.AppStorageListener; +import com.powsybl.afs.storage.events.TimeSeriesCreated; +import com.powsybl.afs.storage.events.TimeSeriesDataUpdated; +import com.powsybl.timeseries.DoubleDataChunk; +import com.powsybl.timeseries.StringDataChunk; +import com.powsybl.timeseries.TimeSeriesMetadata; + +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URI; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; + +public class TimeSeriesServerAppStorage extends AbstractAppStorage { + + @FunctionalInterface + public interface TimeSeriesServerAppStorageProvider { + R apply(F first, S second, T third); + } + + /** + * This storage is used for all non-timeseries-related operations + */ + private AbstractAppStorage generalDelegate; + + /** + * This storage handles all the timeseries-related operations + */ + private TimeSeriesStorageDelegate timeSeriesDelegate; + + /** + * A listener that copies all event from the general delegate event bus to this class event bus. + * This has to be a field because the listeners of an event bus are stored in a WeakReferenceList + */ + private AppStorageListener notifyGeneralDelegateEventListener; + + public TimeSeriesServerAppStorage(final URI targetURI, final String app, final AbstractAppStorage generalDelegate) { + this(generalDelegate, targetURI, app); + } + + public TimeSeriesServerAppStorage(final AbstractAppStorage generalDelegate, final URI timeSeriesServerURI, final String app) { + this.generalDelegate = generalDelegate; + eventsBus = new InMemoryEventsBus(); + notifyGeneralDelegateEventListener = t -> t.getEvents().forEach(e -> pushEvent(e, t.getTopic())); + generalDelegate.getEventsBus().addListener(notifyGeneralDelegateEventListener); + timeSeriesDelegate = new TimeSeriesStorageDelegate(timeSeriesServerURI, app); + timeSeriesDelegate.createAFSAppIfNotExists(); + } + + @Override + public String getFileSystemName() { + return generalDelegate.getFileSystemName(); + } + + @Override + public boolean isRemote() { + return generalDelegate.isRemote(); + } + + @Override + public NodeInfo createRootNodeIfNotExists(String name, String nodePseudoClass) { + return generalDelegate.createRootNodeIfNotExists(name, nodePseudoClass); + } + + @Override + public NodeInfo createNode(String parentNodeId, String name, String nodePseudoClass, String description, int version, NodeGenericMetadata genericMetadata) { + return generalDelegate.createNode(parentNodeId, name, nodePseudoClass, description, version, genericMetadata); + } + + @Override + public boolean isWritable(String nodeId) { + return generalDelegate.isWritable(nodeId); + } + + @Override + public NodeInfo getNodeInfo(String nodeId) { + return generalDelegate.getNodeInfo(nodeId); + } + + @Override + public void setDescription(String nodeId, String description) { + generalDelegate.setDescription(nodeId, description); + } + + @Override + public void updateModificationTime(String nodeId) { + generalDelegate.updateModificationTime(nodeId); + } + + @Override + public List getChildNodes(String nodeId) { + return generalDelegate.getChildNodes(nodeId); + } + + @Override + public Optional getChildNode(String nodeId, String name) { + return generalDelegate.getChildNode(nodeId, name); + } + + @Override + public Optional getParentNode(String nodeId) { + return generalDelegate.getParentNode(nodeId); + } + + @Override + public void setParentNode(String nodeId, String newParentNodeId) { + generalDelegate.setParentNode(nodeId, newParentNodeId); + } + + @Override + public String deleteNode(String nodeId) { + return generalDelegate.deleteNode(nodeId); + } + + @Override + public Optional readBinaryData(String nodeId, String name) { + return generalDelegate.readBinaryData(nodeId, name); + } + + @Override + public OutputStream writeBinaryData(String nodeId, String name) { + return generalDelegate.writeBinaryData(nodeId, name); + } + + @Override + public boolean dataExists(String nodeId, String name) { + return generalDelegate.dataExists(nodeId, name); + } + + @Override + public Set getDataNames(String nodeId) { + return generalDelegate.getDataNames(nodeId); + } + + @Override + public boolean removeData(String nodeId, String name) { + return generalDelegate.removeData(nodeId, name); + } + + @Override + public void createTimeSeries(String nodeId, TimeSeriesMetadata metadata) { + timeSeriesDelegate.createTimeSeries(nodeId, metadata); + pushEvent(new TimeSeriesCreated(nodeId, metadata.getName()), AbstractAppStorage.APPSTORAGE_TIMESERIES_TOPIC); + } + + @Override + public Set getTimeSeriesNames(String nodeId) { + return timeSeriesDelegate.getTimeSeriesNames(nodeId); + } + + @Override + public boolean timeSeriesExists(String nodeId, String timeSeriesName) { + return timeSeriesDelegate.timeSeriesExists(nodeId, timeSeriesName); + } + + @Override + public List getTimeSeriesMetadata(String nodeId, Set timeSeriesNames) { + return timeSeriesDelegate.getTimeSeriesMetadata(nodeId, timeSeriesNames); + } + + @Override + public Set getTimeSeriesDataVersions(String nodeId) { + return timeSeriesDelegate.getTimeSeriesDataVersions(nodeId, null); + } + + @Override + public Set getTimeSeriesDataVersions(String nodeId, String timeSeriesName) { + return timeSeriesDelegate.getTimeSeriesDataVersions(nodeId, timeSeriesName); + } + + @Override + public Map> getDoubleTimeSeriesData(String nodeId, Set timeSeriesNames, int version) { + return timeSeriesDelegate.getDoubleTimeSeriesData(nodeId, timeSeriesNames, version); + } + + @Override + public void addDoubleTimeSeriesData(String nodeId, int version, String timeSeriesName, List chunks) { + timeSeriesDelegate.addDoubleTimeSeriesData(nodeId, version, timeSeriesName, chunks); + pushEvent(new TimeSeriesDataUpdated(nodeId, timeSeriesName), AbstractAppStorage.APPSTORAGE_TIMESERIES_TOPIC); + } + + @Override + public Map> getStringTimeSeriesData(String nodeId, Set timeSeriesNames, int version) { + return timeSeriesDelegate.getStringTimeSeriesData(nodeId, timeSeriesNames, version); + } + + @Override + public void addStringTimeSeriesData(String nodeId, int version, String timeSeriesName, List chunks) { + timeSeriesDelegate.addStringTimeSeriesData(nodeId, version, timeSeriesName, chunks); + pushEvent(new TimeSeriesDataUpdated(nodeId, timeSeriesName), AbstractAppStorage.APPSTORAGE_TIMESERIES_TOPIC); + } + + @Override + public void clearTimeSeries(String nodeId) { + //TODO + } + + @Override + public void addDependency(String nodeId, String name, String toNodeId) { + generalDelegate.addDependency(nodeId, name, toNodeId); + } + + @Override + public Set getDependencies(String nodeId, String name) { + return generalDelegate.getDependencies(nodeId, name); + } + + @Override + public Set getDependencies(String nodeId) { + return generalDelegate.getDependencies(nodeId); + } + + @Override + public Set getBackwardDependencies(String nodeId) { + return generalDelegate.getBackwardDependencies(nodeId); + } + + @Override + public void removeDependency(String nodeId, String name, String toNodeId) { + generalDelegate.removeDependency(nodeId, name, toNodeId); + } + + @Override + public void flush() { + generalDelegate.flush(); + eventsBus.flush(); + } + + @Override + public boolean isClosed() { + return generalDelegate.isClosed(); + } + + @Override + public void close() { + generalDelegate.close(); + } + + @Override + public boolean isConsistent(String nodeId) { + return generalDelegate.isConsistent(nodeId); + } + + @Override + public void setMetadata(String nodeId, NodeGenericMetadata genericMetadata) { + generalDelegate.setMetadata(nodeId, genericMetadata); + } + + @Override + public void setConsistent(String nodeId) { + generalDelegate.setConsistent(nodeId); + } + + @Override + public List getInconsistentNodes() { + return generalDelegate.getInconsistentNodes(); + } + + @Override + public void renameNode(String nodeId, String name) { + generalDelegate.renameNode(nodeId, name); + } +} diff --git a/afs-timeseries-server-storage/src/main/java/com/powsybl/afs/timeseriesserver/storage/TimeSeriesStorageDelegate.java b/afs-timeseries-server-storage/src/main/java/com/powsybl/afs/timeseriesserver/storage/TimeSeriesStorageDelegate.java new file mode 100644 index 00000000..d499ec95 --- /dev/null +++ b/afs-timeseries-server-storage/src/main/java/com/powsybl/afs/timeseriesserver/storage/TimeSeriesStorageDelegate.java @@ -0,0 +1,366 @@ +package com.powsybl.afs.timeseriesserver.storage; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.powsybl.afs.storage.AfsStorageException; +import com.powsybl.timeseries.*; +import com.powsybl.timeseries.storer.query.create.CreateQuery; +import com.powsybl.timeseries.storer.query.fetch.FetchQuery; +import com.powsybl.timeseries.storer.query.fetch.result.AbstractFetchQueryResult; +import com.powsybl.timeseries.storer.query.fetch.result.FetchQueryMixedResult; +import com.powsybl.timeseries.storer.query.publish.AbstractPublishQuery; +import com.powsybl.timeseries.storer.query.publish.PublishDoubleQuery; +import com.powsybl.timeseries.storer.query.publish.PublishStringQuery; +import com.powsybl.timeseries.storer.query.search.SearchQuery; +import com.powsybl.timeseries.storer.query.search.SearchQueryResults; +import org.apache.commons.lang3.ArrayUtils; +import org.apache.commons.lang3.NotImplementedException; +import org.jboss.resteasy.client.jaxrs.ResteasyClientBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.client.Client; +import javax.ws.rs.client.Entity; +import javax.ws.rs.client.WebTarget; +import javax.ws.rs.core.Response; +import java.io.IOException; +import java.net.URI; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.util.*; +import java.util.stream.Collectors; + +public class TimeSeriesStorageDelegate { + + private static final Logger LOGGER = LoggerFactory.getLogger(TimeSeriesStorageDelegate.class); + + /** + * The name of the app in TimeSeriesServer, in which AFS will store time series + */ + private final String app; + + /** + * The address of the TimeSeriesServer + */ + private URI timeSeriesServerURI; + + public TimeSeriesStorageDelegate(URI timeSeriesServerURI, String app) { + this.timeSeriesServerURI = timeSeriesServerURI; + this.app = app; + } + + public static Client createClient() { + return new ResteasyClientBuilder() + .connectionPoolSize(50) + .build(); + } + + private WebTarget buildBaseRequest(Client client) { + return client.target(timeSeriesServerURI) + .path("v1") + .path("timeseries") + .path("apps"); + } + + public void createAFSAppIfNotExists() { + Client client = createClient(); + try { + Response response = buildBaseRequest(client).request().get(); + + Collection apps = response.readEntity(Collection.class); + if (apps.contains(app)) { + return; + } + + response = buildBaseRequest(client).path(app).request().post(Entity.json("")); + if (response.getStatus() != 200) { + throw new AfsStorageException("Error while initializing AFS timeseries app storage"); + } + + } finally { + client.close(); + } + + } + + public void createTimeSeries(String nodeId, TimeSeriesMetadata metadata) { + if (!(metadata.getIndex() instanceof RegularTimeSeriesIndex)) { + throw new NotImplementedException("TimeSeriesServer only handles regular time series for now."); + } + RegularTimeSeriesIndex index = (RegularTimeSeriesIndex) metadata.getIndex(); + + CreateQuery createQuery = new CreateQuery(); + createQuery.setMatrix(nodeId); + createQuery.setName(metadata.getName()); + createQuery.setTags(metadata.getTags()); + createQuery.setTimeStepCount(index.getPointCount()); + createQuery.setTimeStepDuration(index.getSpacing()); + LocalDateTime startDate = Instant.ofEpochMilli(index.getStartTime()).atZone(ZoneId.systemDefault()).toLocalDateTime(); + createQuery.setStartDate(startDate); + + Client client = createClient(); + try { + buildBaseRequest(client) + .path(app) + .path("series") + .request().post(Entity.json(new ObjectMapper().writeValueAsString(createQuery))); + } catch (JsonProcessingException e) { + LOGGER.error(e.getMessage(), e); + } finally { + client.close(); + } + } + + public SearchQueryResults performSearch(SearchQuery query) { + + SearchQueryResults results = null; + + Client client = createClient(); + try { + Response response = buildBaseRequest(client) + .path(app) + .path("series") + .path("_search") + .request().post(Entity.json(new ObjectMapper().writeValueAsString(query))); + String json = response.readEntity(String.class); + results = new ObjectMapper().readValue(json, SearchQueryResults.class); + } catch (IOException e) { + LOGGER.error(e.getMessage(), e); + } finally { + client.close(); + } + + return results; + } + + public Set getTimeSeriesNames(String nodeId) { + + SearchQuery searchQuery = new SearchQuery(); + searchQuery.setMatrix(nodeId); + + SearchQueryResults results = performSearch(searchQuery); + if (results != null) { + return results.getTimeSeriesInformations() + .stream().map(t -> t.getName()).collect(Collectors.toSet()); + } + return null; + } + + public boolean timeSeriesExists(String nodeId, String name) { + + SearchQueryResults results = doSearch(nodeId, Collections.singleton(name)); + if (results != null) { + return results.getTimeSeriesInformations().size() > 0; + } + return false; + } + + public List getTimeSeriesMetadata(String nodeId, Set timeSeriesNames) { + + SearchQueryResults results = doSearch(nodeId, timeSeriesNames); + if (results != null) { + return results.getTimeSeriesInformations() + .stream().map(t -> { + long startTime = t.getStartDate().atZone(ZoneId.systemDefault()).toInstant().toEpochMilli(); + long spacing = t.getTimeStepDuration(); + long endTime = startTime + spacing * (t.getTimeStepCount() - 1); + TimeSeriesIndex index = new RegularTimeSeriesIndex(startTime, endTime, spacing); + return new TimeSeriesMetadata(t.getName(), TimeSeriesDataType.DOUBLE, t.getTags(), index); + }).collect(Collectors.toList()); + } + return null; + } + + public Set getTimeSeriesDataVersions(String nodeId, String timeSeriesName) { + SearchQuery searchQuery = new SearchQuery(); + searchQuery.setMatrix(nodeId); + if (timeSeriesName != null) { + searchQuery.setNames(Collections.singleton(timeSeriesName)); + } + SearchQueryResults results = performSearch(searchQuery); + if (results != null) { + return results.getTimeSeriesInformations() + .stream().flatMap(t -> t.getVersions().keySet().stream()) + .map(t -> Integer.parseInt(t)) + .collect(Collectors.toSet()); + } + return null; + } + + /** + * Add a new string time series + * + * @param nodeId identifier for the node + * @param version version to publish to + * @param timeSeriesName name of the time series + * @param chunks actual (string) data to publish + */ + public void addStringTimeSeriesData(String nodeId, int version, String timeSeriesName, List chunks) { + // Retrieve metadata and build time series + TimeSeriesMetadata metadata = getTimeSeriesMetadata(nodeId, Collections.singleton(timeSeriesName)).get(0); + StringTimeSeries ts = new StringTimeSeries(metadata, chunks); + + // Prepare publish request + AbstractPublishQuery publishQuery = new PublishStringQuery(); + publishQuery.setMatrix(nodeId); + publishQuery.setTimeSeriesName(timeSeriesName); + publishQuery.setVersionName(String.valueOf(version)); + publishQuery.setData(ts.toArray()); + + // Do run query + doPublish(publishQuery); + } + + /** + * Add a new double time series + * + * @param nodeId identifier for the node + * @param version version to publish to + * @param timeSeriesName name of the time series + * @param chunks actual (double) data to publish + */ + public void addDoubleTimeSeriesData(String nodeId, int version, String timeSeriesName, List chunks) { + + //First step : retrieve metadata and reconstitute the time series + TimeSeriesMetadata metadata = getTimeSeriesMetadata(nodeId, Collections.singleton(timeSeriesName)).get(0); + StoredDoubleTimeSeries ts = new StoredDoubleTimeSeries(metadata, chunks); + + //Second step : perform a publish request + AbstractPublishQuery publishQuery = new PublishDoubleQuery(); + publishQuery.setMatrix(nodeId); + publishQuery.setTimeSeriesName(timeSeriesName); + publishQuery.setVersionName(String.valueOf(version)); + publishQuery.setData(ArrayUtils.toObject(ts.toArray())); + + // Do run query + doPublish(publishQuery); + } + + /** + * Retrieve time series String data from server + * Perform a search query with provided criteria, followed by a fetch query (using search results) + * @param nodeId identifier for the node to retrieve data for + * @param timeSeriesNames names of time series to retrieve + * @param version version of the data to fetch + * @return a map contaning all queried time series, indexed by their name + */ + public Map> getStringTimeSeriesData(String nodeId, Set timeSeriesNames, int version) { + String versionString = String.valueOf(version); + // First perform a search query + final SearchQueryResults searchResults = doSearch(nodeId, timeSeriesNames); + List versionIDs = searchResults.getVersionIds(versionString); + Map versionToName = searchResults.getVersionToNameMap(versionString); + // Then perform the fetch query + FetchQueryMixedResult fetchResults = (FetchQueryMixedResult) doFetch(versionToName); + Map> result = new HashMap<>(); + for (int i = 0; i < versionToName.keySet().size(); i++) { + String[] values = fetchResults.getData().get(i) + .stream() + .map(v -> (String) v) + .toArray(String[]::new); + StringDataChunk chunk = new UncompressedStringDataChunk(0, values); + result.put(versionToName.get(versionIDs.get(i)), Collections.singletonList(chunk)); + } + return result; + } + + /** + * Perform search then fetch queries to retrieve double data against time series server + * + * @param nodeId identifier for the node to retrieve time series for + * @param timeSeriesNames names of time series to search for + * @param version version of the data to fetch + * @return time series names, mapped to their respective results (as chunks) + */ + public Map> getDoubleTimeSeriesData(String nodeId, Set timeSeriesNames, int version) { + String versionString = String.valueOf(version); + // First perform a search query + final SearchQueryResults searchResults = doSearch(nodeId, timeSeriesNames); + List versionIDs = searchResults.getVersionIds(versionString); + Map versionToName = searchResults.getVersionToNameMap(versionString); + // Then perform the fetch query + FetchQueryMixedResult fetchResults = (FetchQueryMixedResult) doFetch(versionToName); + // Extract data from results + Map> toReturn = new HashMap<>(); + for (int i = 0; i < versionIDs.size(); i++) { + double[] values = fetchResults.getData() + .get(i) + .stream() + .mapToDouble(val -> ((Number)val).doubleValue()) + .toArray(); + UncompressedDoubleDataChunk chunk = new UncompressedDoubleDataChunk(0, values); + toReturn.put(versionToName.get(versionIDs.get(i)), Collections.singletonList(chunk)); + } + return toReturn; + } + + /** + * Perform search request against time series server + * + * @param nodeId identifier of the node to search for + * @param timeSeriesNames names of time series to search for + * @return a SearchQueryResults object representing the TS server response + */ + private SearchQueryResults doSearch(final String nodeId, final Set timeSeriesNames) { + // Prepare search query + SearchQuery searchQuery = new SearchQuery(); + searchQuery.setMatrix(nodeId); + searchQuery.setNames(timeSeriesNames); + // Run search query + return performSearch(searchQuery); + } + + /** + * Perform a search query against distant API + * + * @param versionToName a map of TS version -> TS name + * @return fetch HTTP response + */ + private AbstractFetchQueryResult doFetch(final Map versionToName) { + // Prepare fetch query + FetchQuery query = new FetchQuery(versionToName.keySet(), null, null); + Client client = createClient(); + try { + Response response = buildBaseRequest(client) + .path(app) + .path("series") + .path("_fetch") + .request().post(Entity.json(new ObjectMapper().writeValueAsString(query))); + if (response.getStatus() != 200) { + throw new AfsStorageException("Error while fetching data from time series server"); + } + + String json = response.readEntity(String.class); + return new ObjectMapper().readValue(json, AbstractFetchQueryResult.class); + } catch (JsonProcessingException e) { + throw new AfsStorageException("Error while fetching data from time series server"); + } finally { + client.close(); + } + } + + /** + * Perform a publish query + * + * @param publishQuery query to issue + */ + private void doPublish(final AbstractPublishQuery publishQuery) { + // Run request + Client client = createClient(); + try { + Response response = buildBaseRequest(client) + .path(app) + .path("series") + .request() + .put(Entity.json(new ObjectMapper().writeValueAsString(publishQuery))); + if (response.getStatus() != 200) { + throw new AfsStorageException("Error while publishing data to time series server"); + } + } catch (IOException ioe) { + LOGGER.error("Could not publish String time series", ioe); + } finally { + client.close(); + } + } +} diff --git a/afs-timeseries-server-storage/src/test/java/com/powsybl/afs/timeseriesserver/TimeSeriesServerAppStorageTest.java b/afs-timeseries-server-storage/src/test/java/com/powsybl/afs/timeseriesserver/TimeSeriesServerAppStorageTest.java new file mode 100644 index 00000000..6ff39091 --- /dev/null +++ b/afs-timeseries-server-storage/src/test/java/com/powsybl/afs/timeseriesserver/TimeSeriesServerAppStorageTest.java @@ -0,0 +1,35 @@ +package com.powsybl.afs.timeseriesserver; + +import com.powsybl.afs.mapdb.storage.MapDbAppStorage; +import com.powsybl.afs.storage.AbstractAppStorageTest; +import com.powsybl.afs.storage.AppStorage; +import com.powsybl.afs.storage.InMemoryEventsBus; +import com.powsybl.afs.timeseriesserver.storage.TimeSeriesServerAppStorage; +import org.junit.Before; + +import java.net.URI; + +public class TimeSeriesServerAppStorageTest extends AbstractAppStorageTest { + + private URI timeSeriesServerURI; + + private static final String AFS_APP = "AFS"; + + public TimeSeriesServerAppStorageTest() { + super(false, false); + } + + @Override + @Before + public void setUp() throws Exception { + timeSeriesServerURI = new URI("http://localhost:9000/"); + super.setUp(); + } + + @Override + protected AppStorage createStorage() { + final MapDbAppStorage storage = MapDbAppStorage.createMem("mem", new InMemoryEventsBus()); + return new TimeSeriesServerAppStorage(storage, timeSeriesServerURI, AFS_APP); + } + +} diff --git a/afs-timeseries-server/pom.xml b/afs-timeseries-server/pom.xml new file mode 100644 index 00000000..cbceaae3 --- /dev/null +++ b/afs-timeseries-server/pom.xml @@ -0,0 +1,126 @@ + + + + 4.0.0 + + + com.powsybl + powsybl-afs + 3.6.0-SNAPSHOT + + + powsybl-afs-timeseries-server + AFS TimeSeries Server impl + AFS TimeSeries Server implementation + + + + + ${project.groupId} + powsybl-afs-core + ${project.version} + + + com.powsybl + powsybl-afs-timeseries-server-storage + ${project.version} + + + ${project.groupId} + powsybl-time-series-server-interfaces + 0.0.1-SNAPSHOT + + + org.jboss.spec.javax.ws.rs + jboss-jaxrs-api_2.0_spec + 1.0.1.Beta1 + + + org.jboss.resteasy + resteasy-client + + + org.jboss.resteasy + resteasy-jackson-provider + 3.1.4.Final + + + + + junit + junit + test + + + org.assertj + assertj-core + test + + + org.slf4j + slf4j-simple + test + + + org.cassandraunit + cassandra-unit + test + + + ${project.groupId} + powsybl-afs-cassandra + ${project.version} + test + + + ${project.groupId} + powsybl-afs-local + ${project.version} + test + + + org.mockito + mockito-core + test + + + org.mock-server + mockserver-netty + 5.11.2 + + + com.google.jimfs + jimfs + test + + + + ${project.groupId} + powsybl-afs-storage-api + ${project.version} + test-jar + test + + + org.projectlombok + lombok + 1.18.20 + + + com.powsybl + powsybl-afs-mapdb + ${project.version} + compile + + + + diff --git a/afs-timeseries-server/src/main/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystem.java b/afs-timeseries-server/src/main/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystem.java new file mode 100644 index 00000000..b5ef4f22 --- /dev/null +++ b/afs-timeseries-server/src/main/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystem.java @@ -0,0 +1,14 @@ +package com.powsybl.afs.timeseriesserver; + +import com.powsybl.afs.AppFileSystem; +import com.powsybl.afs.storage.AppStorage; + +/** + * @author amichaut@artelys.com + */ +public class TSServerAppFileSystem extends AppFileSystem { + + public TSServerAppFileSystem(final String name, final boolean remotelyAccessible, final AppStorage storage) { + super(name, remotelyAccessible, storage); + } +} diff --git a/afs-timeseries-server/src/main/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystemConfig.java b/afs-timeseries-server/src/main/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystemConfig.java new file mode 100644 index 00000000..67c9c8a6 --- /dev/null +++ b/afs-timeseries-server/src/main/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystemConfig.java @@ -0,0 +1,96 @@ +package com.powsybl.afs.timeseriesserver; + +import com.powsybl.afs.mapdb.MapDbAppFileSystemConfig; +import com.powsybl.afs.storage.AbstractAppFileSystemConfig; +import com.powsybl.commons.config.PlatformConfig; +import lombok.Getter; +import lombok.Setter; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.List; + +/** + * App file system configuration for time series server app storage + * + * @author amichaut@artelys.com + */ +public class TSServerAppFileSystemConfig extends AbstractAppFileSystemConfig { + + private static final String TSSERVER_APP_FILE_SYSTEM = "tsserver-app-file-system"; + private static final String DRIVE_NAME = "drive-name"; + private static final String REMOTELY_ACCESSIBLE = "remotely-accessible"; + private static final boolean DEFAULT_REMOTELY_ACCESSIBLE = false; + private static final String HOST = "host"; + private static final String PORT = "port"; + private static final String SCHEME = "scheme"; + private static final String APP = "app"; + private static final String DEFAULT_APP = "AFS"; + public static final String DELEGATE_MAPDB_APP_FILE_SYSTEM = "delegate-mapdb-app-file-system"; + + @Setter + private String host; + + @Setter + private int port; + + @Setter + private String scheme; + + @Getter + @Setter + private String app = DEFAULT_APP; + + @Getter + @Setter + private MapDbAppFileSystemConfig delegateConfig; + + public TSServerAppFileSystemConfig(final String driveName, final boolean remotelyAccessible) { + super(driveName, remotelyAccessible); + } + + public static TSServerAppFileSystemConfig load() { + return load(PlatformConfig.defaultConfig()); + } + + public static TSServerAppFileSystemConfig load(final PlatformConfig platformConfig) { + return platformConfig.getOptionalModuleConfig(TSSERVER_APP_FILE_SYSTEM) + .map(moduleConfig -> { + final String driveName; + if (moduleConfig.hasProperty(DRIVE_NAME)) { + driveName = moduleConfig.getStringProperty(DRIVE_NAME); + } else { + throw new IllegalArgumentException("Please provide a drive name for timeseries server app file system configuration"); + } + final boolean remotelyAccessible = moduleConfig.getBooleanProperty(REMOTELY_ACCESSIBLE, DEFAULT_REMOTELY_ACCESSIBLE); + final TSServerAppFileSystemConfig config = new TSServerAppFileSystemConfig(driveName, remotelyAccessible); + if (moduleConfig.hasProperty(HOST)) { + config.setHost(moduleConfig.getStringProperty(HOST)); + } + if (moduleConfig.hasProperty(PORT)) { + config.setPort(moduleConfig.getIntProperty(PORT)); + } + if (moduleConfig.hasProperty(SCHEME)) { + config.setScheme(moduleConfig.getStringProperty(SCHEME)); + } + config.setApp(moduleConfig.getStringProperty(APP, DEFAULT_APP)); + // Load delegate config + final List delegateConfig = MapDbAppFileSystemConfig.load(platformConfig.getModuleConfig(DELEGATE_MAPDB_APP_FILE_SYSTEM)); + if (delegateConfig.size() != 1) { + throw new IllegalArgumentException("A single mapdb delegate configuration is necessary"); + } + config.setDelegateConfig(delegateConfig.get(0)); + return config; + }) + .orElse(null); + } + + /** + * @return target URI for timeseries service + * @throws URISyntaxException if information does not allow to build a proper URI + */ + public URI getURI() throws URISyntaxException { + return new URI(scheme, null, host, port, null, null, null); + } + +} diff --git a/afs-timeseries-server/src/main/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystemProvider.java b/afs-timeseries-server/src/main/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystemProvider.java new file mode 100644 index 00000000..d0c1d706 --- /dev/null +++ b/afs-timeseries-server/src/main/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystemProvider.java @@ -0,0 +1,69 @@ +package com.powsybl.afs.timeseriesserver; + +import com.google.auto.service.AutoService; +import com.powsybl.afs.AppFileSystem; +import com.powsybl.afs.AppFileSystemProvider; +import com.powsybl.afs.AppFileSystemProviderContext; +import com.powsybl.afs.mapdb.storage.MapDbAppStorage; +import com.powsybl.afs.storage.AbstractAppStorage; +import com.powsybl.afs.storage.EventsBus; +import com.powsybl.afs.timeseriesserver.storage.TimeSeriesServerAppStorage; + +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.file.Path; +import java.util.Collections; +import java.util.List; + +/** + * @author amichaut@artelys.com + */ +@AutoService(AppFileSystemProvider.class) +public class TSServerAppFileSystemProvider implements AppFileSystemProvider { + + private final TSServerAppFileSystemConfig configuration; + + private final TimeSeriesServerAppStorage.TimeSeriesServerAppStorageProvider storageProvider; + + private final MapDbAppStorage.MapDbAppStorageProvider delegateStorageProvider; + + public TSServerAppFileSystemProvider() { + this( + TSServerAppFileSystemConfig.load(), + TimeSeriesServerAppStorage::new, (name, path, eventsStore) -> MapDbAppStorage.createMmapFile(name, path.toFile(), eventsStore) + ); + } + + public TSServerAppFileSystemProvider(final TSServerAppFileSystemConfig configuration, + final TimeSeriesServerAppStorage.TimeSeriesServerAppStorageProvider provider, + final MapDbAppStorage.MapDbAppStorageProvider delegateStorageProvider) { + this.configuration = configuration; + this.storageProvider = provider; + this.delegateStorageProvider = delegateStorageProvider; + } + + @Override + public List getFileSystems(final AppFileSystemProviderContext context) { + return Collections.singletonList(getFileSystem(context)); + } + + /** + * @param context a context holding necessary event bus + * @return a single time series server app file system, built using internal configuration and provided context + */ + private TSServerAppFileSystem getFileSystem(final AppFileSystemProviderContext context) { + // Make an URI form configuration + URI uri; + try { + uri = configuration.getURI(); + } catch (URISyntaxException e) { + throw new IllegalStateException("Could not build a proper target URI for time series server"); + } + MapDbAppStorage delegateAppStorage = delegateStorageProvider.apply(configuration.getDriveName(), configuration.getDelegateConfig().getDbFile(), context.getEventsBus()); + return new TSServerAppFileSystem( + configuration.getDriveName(), + configuration.isRemotelyAccessible(), + storageProvider.apply(uri, configuration.getApp(), delegateAppStorage) + ); + } +} diff --git a/afs-timeseries-server/src/test/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystemConfigTest.java b/afs-timeseries-server/src/test/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystemConfigTest.java new file mode 100644 index 00000000..fbc18f69 --- /dev/null +++ b/afs-timeseries-server/src/test/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystemConfigTest.java @@ -0,0 +1,84 @@ +package com.powsybl.afs.timeseriesserver; + +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import com.powsybl.afs.mapdb.MapDbAppFileSystemConfig; +import com.powsybl.commons.config.InMemoryPlatformConfig; +import com.powsybl.commons.config.MapModuleConfig; +import com.powsybl.commons.config.PlatformConfig; +import org.junit.Before; +import org.junit.Test; + +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.file.FileSystem; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +public class TSServerAppFileSystemConfigTest { + + private static final String APP = "test-app"; + private static final String PORT = "8765"; + private static final String HOST = "localhost"; + private static final String SCHEME = "http"; + private static final String REMOTELY_ACCESSIBLE = "false"; + private static final String DRIVE_NAME = "test-drive"; + private static final String DB_FILE = "/db/test.db"; + + private FileSystem fileSystem; + private PlatformConfig platformConfig; + + @Before + public void setup() { + fileSystem = Jimfs.newFileSystem(Configuration.unix()); + InMemoryPlatformConfig conf = new InMemoryPlatformConfig(fileSystem); + final MapModuleConfig tsServerConfig = conf.createModuleConfig("tsserver-app-file-system"); + tsServerConfig.setStringProperty("drive-name", DRIVE_NAME); + tsServerConfig.setStringProperty("remotely-accessible", REMOTELY_ACCESSIBLE); + tsServerConfig.setStringProperty("scheme", SCHEME); + tsServerConfig.setStringProperty("host", HOST); + tsServerConfig.setStringProperty("port", PORT); + tsServerConfig.setStringProperty("app", APP); + final MapModuleConfig mapdbConfig = conf.createModuleConfig("delegate-mapdb-app-file-system"); + mapdbConfig.setStringProperty("drive-name", DRIVE_NAME); + mapdbConfig.setPathProperty("db-file", fileSystem.getPath(DB_FILE)); + mapdbConfig.setStringProperty("remotely-accessible", REMOTELY_ACCESSIBLE); + platformConfig = conf; + } + + @Test + public void loadTest() throws URISyntaxException { + final TSServerAppFileSystemConfig conf = TSServerAppFileSystemConfig.load(platformConfig); + assertEquals(conf.getApp(), APP); + assertEquals(conf.getDriveName(), DRIVE_NAME); + final URI uri = conf.getURI(); + assertEquals(uri.getScheme(), SCHEME); + assertEquals(uri.getPort(), Integer.parseInt(PORT)); + assertEquals(uri.getHost(), HOST); + assertEquals(conf.isRemotelyAccessible(), Boolean.valueOf(REMOTELY_ACCESSIBLE)); + + final MapDbAppFileSystemConfig delegateConf = conf.getDelegateConfig(); + assertEquals(delegateConf.getDriveName(), DRIVE_NAME); + assertEquals(delegateConf.isRemotelyAccessible(), Boolean.valueOf(REMOTELY_ACCESSIBLE)); + assertEquals(fileSystem.getPath(DB_FILE), delegateConf.getDbFile()); + } + + /** + * Refer to config.yaml file in test resources + */ + @Test + public void emptyLoadTest() throws URISyntaxException { + final TSServerAppFileSystemConfig load = TSServerAppFileSystemConfig.load(); + assertEquals(load.getDriveName(), "test-fs"); + assertFalse(load.isRemotelyAccessible()); + assertEquals(load.getApp(), "AFS"); + final URI uri = load.getURI(); + assertEquals(uri.getScheme(), "http"); + assertEquals(uri.getHost(), "localhost"); + assertEquals(uri.getPort(), 8080); + final MapDbAppFileSystemConfig delegateConfig = load.getDelegateConfig(); + assertEquals(delegateConfig.getDriveName(), "test-fs"); + assertFalse(delegateConfig.isRemotelyAccessible()); + } +} diff --git a/afs-timeseries-server/src/test/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystemProviderTest.java b/afs-timeseries-server/src/test/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystemProviderTest.java new file mode 100644 index 00000000..402fef5f --- /dev/null +++ b/afs-timeseries-server/src/test/java/com/powsybl/afs/timeseriesserver/TSServerAppFileSystemProviderTest.java @@ -0,0 +1,117 @@ +package com.powsybl.afs.timeseriesserver; + +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import com.powsybl.afs.AppFileSystem; +import com.powsybl.afs.AppFileSystemProviderContext; +import com.powsybl.afs.mapdb.MapDbAppFileSystemConfig; +import com.powsybl.afs.mapdb.storage.MapDbAppStorage; +import com.powsybl.afs.storage.AbstractAppStorage; +import com.powsybl.afs.storage.EventsBus; +import com.powsybl.afs.storage.InMemoryEventsBus; +import com.powsybl.afs.timeseriesserver.storage.TimeSeriesServerAppStorage; +import com.powsybl.computation.ComputationManager; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import org.mockserver.integration.ClientAndServer; +import org.mockserver.model.MediaType; + +import java.io.IOException; +import java.net.URI; +import java.nio.file.FileSystem; +import java.nio.file.Path; +import java.util.List; + +import static org.junit.Assert.assertTrue; +import static org.mockserver.model.HttpRequest.request; +import static org.mockserver.model.HttpResponse.response; +import static org.junit.Assert.assertEquals; + +public class TSServerAppFileSystemProviderTest { + + private static final int SRV_PORT = 9876; + public static final String DRIVE = "drive"; + + private TSServerAppFileSystemConfig config; + private MapDbAppFileSystemConfig delegateConfig; + + /** + * Mock server to simulate time series provider + */ + private ClientAndServer mockServer; + + /** + * In-memory fs for mapDB + */ + private FileSystem fileSystem; + + /** + * mapDB file + */ + private Path dbFile; + + @Before + public void setUp() { + mockServer = ClientAndServer.startClientAndServer(SRV_PORT); + setupMockServer(); + + // Setup connection parameters + config = new TSServerAppFileSystemConfig(DRIVE, true); + config.setScheme("http"); + config.setHost("localhost"); + config.setPort(mockServer.getPort()); + + // Setup delegate config + fileSystem = Jimfs.newFileSystem(Configuration.unix()); + dbFile = fileSystem.getPath("/db"); + delegateConfig = new MapDbAppFileSystemConfig(DRIVE, true, dbFile); + config.setDelegateConfig(delegateConfig); + } + + /** + * Setup mock server for tests + */ + private void setupMockServer() { + mockServer.when(request() + .withMethod("GET") + .withPath("/v1/timeseries/apps") + ).respond(response() + .withStatusCode(200) + .withContentType(MediaType.APPLICATION_JSON) + .withBody("[]") + ); + mockServer.when(request() + .withMethod("POST") + .withPath("/v1/timeseries/apps/.*") + ).respond(response() + .withStatusCode(200) + .withContentType(MediaType.APPLICATION_JSON) + ); + } + + @After + public void tearDown() throws IOException { + mockServer.stop(); + fileSystem.close(); + } + + @Test + public void provideTest() { + // Build a mock computation context + ComputationManager computationManager = Mockito.mock(ComputationManager.class); + final AppFileSystemProviderContext context = new AppFileSystemProviderContext(computationManager, null, new InMemoryEventsBus()); + // Build a new provider + final MapDbAppStorage.MapDbAppStorageProvider delegateAppStorageProvider = (name, path, eventsStore) -> MapDbAppStorage.createMem(name, eventsStore); + final TimeSeriesServerAppStorage.TimeSeriesServerAppStorageProvider appStorageProvider = TimeSeriesServerAppStorage::new; + TSServerAppFileSystemProvider provider = new TSServerAppFileSystemProvider(config, appStorageProvider, delegateAppStorageProvider); + // Check that FS is correct + final List fileSystems = provider.getFileSystems(context); + assertEquals(fileSystems.size(), 1); + assertTrue(fileSystems.get(0) instanceof TSServerAppFileSystem); + final TSServerAppFileSystem fs = (TSServerAppFileSystem) fileSystems.get(0); + assertEquals(fs.getName(), DRIVE); + assertTrue(fs.isRemotelyAccessible()); + } +} diff --git a/afs-timeseries-server/src/test/resources/conf.yaml b/afs-timeseries-server/src/test/resources/conf.yaml new file mode 100644 index 00000000..5b85e3bb --- /dev/null +++ b/afs-timeseries-server/src/test/resources/conf.yaml @@ -0,0 +1,12 @@ +tsserver-app-file-system: + drive-name: "test-fs" + remotely-accessible: false + app: AFS + scheme: http + host: localhost + port: 8080 +delegate-mapdb-app-file-system: + drive-name: "test-fs" + remotely-accessible: false + db-file: "/db/test.db" + diff --git a/pom.xml b/pom.xml index 68ac3230..63b88996 100644 --- a/pom.xml +++ b/pom.xml @@ -64,6 +64,8 @@ afs-storage-api afs-ws afs-spring-server + afs-timeseries-server + afs-timeseries-server-storage