Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -8,23 +8,24 @@
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicBoolean;
import lombok.extern.slf4j.Slf4j;

/**
* A pool of pre-warmed {@link ContainerEntry} instances.
*
* <p>All containers are started in parallel during {@link #initialize()}, paying the ~45s Docker
* Compose startup cost only once. Scenarios borrow a container via {@link #acquire()} and return
* it via {@link #release(ContainerEntry)} after teardown, allowing the next scenario to reuse it
* immediately without any cold-start overhead.
* <p>All containers are started in parallel on the first {@link #acquire()} call, paying the
* Docker Compose startup cost only once per JVM. Scenarios borrow a container via
* {@link #acquire()} and return it via {@link #release(ContainerEntry)} after teardown.
*
* <p>Pool size is controlled by the system property {@code flagd.e2e.pool.size} (default: 2).
* <p>Cleanup is handled automatically via a JVM shutdown hook — no explicit lifecycle calls are
* needed from test classes. This means multiple test classes (e.g. several {@code @Suite} runners
* or {@code @TestFactory} methods) share the same pool across the entire JVM lifetime without
* redundant container startups.
*
* <p>Multiple test classes may share the same JVM fork (Surefire {@code reuseForks=true}). Each
* class calls {@link #initialize()} and {@link #shutdown()} once. A reference counter ensures
* that containers are only started on the first {@code initialize()} call and only stopped when
* the last {@code shutdown()} call is made, preventing one class from destroying containers that
* are still in use by another class running concurrently in the same JVM.
* <p>Pool size is controlled by the system property {@code flagd.e2e.pool.size}
* (default: min(availableProcessors, 4)).
*/
@Slf4j
public class ContainerPool {
Expand All @@ -34,52 +35,92 @@ public class ContainerPool {

private static final BlockingQueue<ContainerEntry> pool = new LinkedBlockingQueue<>();
private static final List<ContainerEntry> all = new ArrayList<>();
private static final java.util.concurrent.atomic.AtomicInteger refCount =
new java.util.concurrent.atomic.AtomicInteger(0);
private static final AtomicBoolean initialized = new AtomicBoolean(false);

public static void initialize() throws Exception {
if (refCount.getAndIncrement() > 0) {
log.info("Container pool already initialized (refCount={}), reusing existing pool.", refCount.get());
/**
* JVM-wide semaphore that serializes disruptive container operations (stop/restart) across all
* parallel Cucumber engines. Only one scenario at a time may bring a container down, preventing
* cascading initialization timeouts in sibling scenarios that are waiting for a container slot.
*/
private static final Semaphore restartSlot = new Semaphore(1);

static {
Runtime.getRuntime().addShutdownHook(new Thread(ContainerPool::stopAll, "container-pool-shutdown"));
}

/**
* Borrow a container from the pool, blocking until one becomes available.
* Initializes the pool on the first call. The caller MUST call
* {@link #release(ContainerEntry)} when done.
*/
public static ContainerEntry acquire() throws Exception {
ensureInitialized();
return pool.take();
}

/** Return a container to the pool so the next scenario can use it. */
public static void release(ContainerEntry entry) {
pool.add(entry);
}

/**
* Acquires the JVM-wide restart slot before stopping or restarting a container.
* Must be paired with {@link #releaseRestartSlot()} in the scenario {@code @After} hook.
*/
public static void acquireRestartSlot() throws InterruptedException {
log.debug("Acquiring restart slot...");
restartSlot.acquire();
log.debug("Restart slot acquired.");
}

/** Releases the JVM-wide restart slot acquired by {@link #acquireRestartSlot()}. */
public static void releaseRestartSlot() {
restartSlot.release();
log.debug("Restart slot released.");
}

private static void ensureInitialized() throws Exception {
if (initialized.get()) {
return;
}
log.info("Starting container pool of size {}...", POOL_SIZE);
ExecutorService executor = Executors.newFixedThreadPool(POOL_SIZE);
try {
List<Future<ContainerEntry>> futures = new ArrayList<>();
for (int i = 0; i < POOL_SIZE; i++) {
futures.add(executor.submit(ContainerEntry::start));
}
for (Future<ContainerEntry> future : futures) {
ContainerEntry entry = future.get();
pool.add(entry);
all.add(entry);
synchronized (ContainerPool.class) {
if (!initialized.compareAndSet(false, true)) {
return;
}
} catch (Exception e) {
// Stop any containers that started successfully before the failure
all.forEach(entry -> {
try {
entry.stop();
} catch (IOException suppressed) {
e.addSuppressed(suppressed);
log.info("Starting container pool of size {}...", POOL_SIZE);
ExecutorService executor = Executors.newFixedThreadPool(POOL_SIZE);
try {
List<Future<ContainerEntry>> futures = new ArrayList<>();
for (int i = 0; i < POOL_SIZE; i++) {
futures.add(executor.submit(ContainerEntry::start));
}
for (Future<ContainerEntry> future : futures) {
ContainerEntry entry = future.get();
pool.add(entry);
all.add(entry);
}
});
pool.clear();
all.clear();
refCount.decrementAndGet();
throw e;
} finally {
executor.shutdown();
} catch (Exception e) {
all.forEach(entry -> {
try {
entry.stop();
} catch (IOException suppressed) {
e.addSuppressed(suppressed);
}
});
pool.clear();
all.clear();
initialized.set(false);
throw e;
} finally {
executor.shutdown();
}
log.info("Container pool ready ({} containers).", POOL_SIZE);
}
log.info("Container pool ready ({} containers).", POOL_SIZE);
}

public static void shutdown() {
int remaining = refCount.decrementAndGet();
if (remaining > 0) {
log.info("Container pool still in use by {} class(es), deferring shutdown.", remaining);
return;
}
log.info("Last shutdown call — stopping all containers.");
private static void stopAll() {
if (all.isEmpty()) return;
log.info("Shutdown hook — stopping all containers.");
all.forEach(entry -> {
try {
entry.stop();
Expand All @@ -90,17 +131,4 @@ public static void shutdown() {
pool.clear();
all.clear();
}

/**
* Borrow a container from the pool, blocking until one becomes available.
* The caller MUST call {@link #release(ContainerEntry)} when done.
*/
public static ContainerEntry acquire() throws InterruptedException {
return pool.take();
}

/** Return a container to the pool so the next scenario can use it. */
public static void release(ContainerEntry entry) {
pool.add(entry);
}
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
package dev.openfeature.contrib.providers.flagd.e2e;

import dev.openfeature.contrib.providers.flagd.Config;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.util.Optional;
import org.testcontainers.containers.ComposeContainer;
import org.testcontainers.containers.ContainerState;
Expand Down Expand Up @@ -29,4 +32,39 @@ public static String getLaunchpadUrl(ComposeContainer container) {
})
.orElseThrow(() -> new RuntimeException("Could not find launchpad url"));
}

/**
* Blocks until the given flagd service port accepts TCP connections, or the timeout elapses.
* The launchpad's {@code /start} endpoint polls flagd's HTTP {@code /readyz} before returning,
* but the gRPC ports (8013, 8015) may become available slightly later. Waiting here prevents
* {@code setProviderAndWait} from timing out under parallel load.
*/
public static void waitForGrpcPort(ComposeContainer container, Config.Resolver resolver, long timeoutMs)
throws InterruptedException {
int internalPort;
switch (resolver) {
case RPC:
internalPort = 8013;
break;
case IN_PROCESS:
internalPort = 8015;
break;
default:
return;
}
ContainerState state = container
.getContainerByServiceName("flagd")
.orElseThrow(() -> new RuntimeException("Could not find flagd container"));
String host = state.getHost();
int mappedPort = state.getMappedPort(internalPort);
long deadline = System.currentTimeMillis() + timeoutMs;
while (System.currentTimeMillis() < deadline) {
try (Socket s = new Socket()) {
s.connect(new InetSocketAddress(host, mappedPort), 100);
return;
} catch (IOException ignored) {
Thread.sleep(50);
}
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
package dev.openfeature.contrib.providers.flagd.e2e;

import java.util.Collections;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import lombok.extern.slf4j.Slf4j;
import org.junit.platform.engine.TestExecutionResult;
import org.junit.platform.engine.reporting.ReportEntry;
import org.junit.platform.launcher.TestExecutionListener;
import org.junit.platform.launcher.TestIdentifier;
import org.junit.platform.launcher.TestPlan;

/**
* Captures the full lifecycle of a JUnit Platform test execution, tracking start, finish, and skip
* events for every node in the test plan (both containers and tests). Results are later replayed as
* JUnit Jupiter {@link org.junit.jupiter.api.DynamicTest} instances to expose the Cucumber scenario
* tree in IDEs.
*/
@Slf4j
class CucumberResultListener implements TestExecutionListener {

private final Set<String> started = Collections.newSetFromMap(new ConcurrentHashMap<>());
private final Map<String, TestExecutionResult> results = new ConcurrentHashMap<>();
private final Map<String, String> skipped = new ConcurrentHashMap<>();

@Override
public void testPlanExecutionStarted(TestPlan testPlan) {
log.debug("Cucumber execution started");
}

@Override
public void testPlanExecutionFinished(TestPlan testPlan) {
log.debug(
"Cucumber execution finished — started={}, finished={}, skipped={}",
started.size(),
results.size(),
skipped.size());
}

@Override
public void executionStarted(TestIdentifier id) {
log.debug(" START {}", id.getDisplayName());
started.add(id.getUniqueId());
}

@Override
public void executionFinished(TestIdentifier id, TestExecutionResult result) {
results.put(id.getUniqueId(), result);
if (result.getStatus() == TestExecutionResult.Status.FAILED) {
log.debug(
" FAIL {} — {}",
id.getDisplayName(),
result.getThrowable().map(Throwable::getMessage).orElse("(no message)"));
} else {
log.debug(" {} {}", result.getStatus(), id.getDisplayName());
}
}

@Override
public void executionSkipped(TestIdentifier id, String reason) {
skipped.put(id.getUniqueId(), reason);
log.debug(" SKIP {} — {}", id.getDisplayName(), reason);
}

@Override
public void dynamicTestRegistered(TestIdentifier id) {
log.debug(" DYN {}", id.getDisplayName());
}

@Override
public void reportingEntryPublished(TestIdentifier id, ReportEntry entry) {
log.debug(" REPORT {} — {}", id.getDisplayName(), entry);
}

/** Whether the node with the given unique ID had {@code executionStarted} called. */
boolean wasStarted(String uniqueId) {
return started.contains(uniqueId);
}

/** Whether the node was skipped before starting. */
boolean wasSkipped(String uniqueId) {
return skipped.containsKey(uniqueId);
}

/** The skip reason for a skipped node, or {@code null} if not skipped. */
String getSkipReason(String uniqueId) {
return skipped.get(uniqueId);
}

/** Whether a finished result was recorded for the given node. */
boolean hasResult(String uniqueId) {
return results.containsKey(uniqueId);
}

/**
* The recorded {@link TestExecutionResult}, or {@code null} if the node never finished.
* Use {@link #hasResult} to distinguish "finished with success" from "never finished".
*/
TestExecutionResult getResult(String uniqueId) {
return results.get(uniqueId);
}
}
Loading
Loading