Skip to content

Commit ccc63c8

Browse files
authored
feat(validation): Add MicroK8s firewall validation tests for egress a… (#94)
* feat(validation): Add MicroK8s firewall validation tests for egress and pod-to-pod communication - Implemented `ValidateMicroK8sFirewallAllowsEgress` to test egress connectivity from MicroK8s pods. - Implemented `ValidateMicroK8sFirewallAllowsPodToPodCommunication` to verify communication between MicroK8s pods. - Updated validation suite to include new tests for MicroK8s firewall rules. * refactor(validation): Simplify MicroK8s firewall validation by extracting tests into dedicated functions - Removed inline tests for MicroK8s firewall egress and pod-to-pod communication from the validation suite. - Introduced `runMicroK8sFirewallValidation` function to encapsulate MicroK8s firewall tests. - Enhanced readability and maintainability of the validation code. * refactor: modify tests to fit rest of format * fix: remove IP tables fix for testing * fix: focus the iptables for shadeform * fix(validation): Remove unnecessary blank line in MicroK8s firewall validation tests * refactor(validation): Extract firewall validation tests into dedicated function - Introduced `runFirewallSubtests` to encapsulate firewall-related validation tests for improved readability and maintainability. - Removed inline firewall tests from `RunInstanceLifecycleValidation` and `RunFirewallValidation` functions. - Ensured consistent testing structure across different validation scenarios. * refactor(validation): Update parameter order in runFirewallSubtests for consistency - Changed the parameter order in `runFirewallSubtests` to place context first, aligning with common Go practices. - Updated all calls to `runFirewallSubtests` to reflect the new parameter order, enhancing code readability and maintainability. * feat(validation): Enhance MicroK8s service readiness checks and installation retries - Added a command to wait for MicroK8s Nginx service endpoints to be ready before proceeding with tests, preventing race conditions. - Implemented a retry mechanism for the MicroK8s installation process, allowing up to three attempts with delays between retries for improved reliability. - Included a check to wait for CoreDNS pods to be ready, ensuring subsequent commands relying on DNS resolution do not fail. * fix: revert the retries. Didn't help nebius * fix: enable the iptables changes to fix egress
1 parent ec58e36 commit ccc63c8

4 files changed

Lines changed: 264 additions & 39 deletions

File tree

internal/validation/suite.go

Lines changed: 36 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,40 @@ func RunValidationSuite(t *testing.T, config ProviderConfig) {
5050
})
5151
}
5252

53+
func runFirewallSubtests(ctx context.Context, t *testing.T, client v1.CloudInstanceReader, instance *v1.Instance, privateKey string, testPort int) {
54+
t.Helper()
55+
56+
t.Run("ValidateFirewallBlocksPort", func(t *testing.T) {
57+
err := v1.ValidateFirewallBlocksPort(ctx, client, instance, privateKey, testPort)
58+
require.NoError(t, err, "ValidateFirewallBlocksPort should pass - port should be blocked")
59+
})
60+
61+
t.Run("ValidateDockerFirewallBlocksPort", func(t *testing.T) {
62+
err := v1.ValidateDockerFirewallBlocksPort(ctx, client, instance, privateKey, testPort)
63+
require.NoError(t, err, "ValidateDockerFirewallBlocksPort should pass - docker port should be blocked")
64+
})
65+
66+
t.Run("ValidateDockerFirewallAllowsEgress", func(t *testing.T) {
67+
err := v1.ValidateDockerFirewallAllowsEgress(ctx, client, instance, privateKey)
68+
require.NoError(t, err, "ValidateDockerFirewallAllowsEgress should pass - egress should be allowed")
69+
})
70+
71+
t.Run("ValidateDockerFirewallAllowsContainerToContainerCommunication", func(t *testing.T) {
72+
err := v1.ValidateDockerFirewallAllowsContainerToContainerCommunication(ctx, client, instance, privateKey)
73+
require.NoError(t, err, "ValidateDockerFirewallAllowsContainerToContainerCommunication should pass - container to container communication should be allowed")
74+
})
75+
76+
t.Run("ValidateMicroK8sFirewallAllowsEgress", func(t *testing.T) {
77+
err := v1.ValidateMicroK8sFirewallAllowsEgress(ctx, client, instance, privateKey)
78+
require.NoError(t, err, "ValidateMicroK8sFirewallAllowsEgress should pass - microk8s pod egress should be allowed")
79+
})
80+
81+
t.Run("ValidateMicroK8sFirewallAllowsPodToPodCommunication", func(t *testing.T) {
82+
err := v1.ValidateMicroK8sFirewallAllowsPodToPodCommunication(ctx, client, instance, privateKey)
83+
require.NoError(t, err, "ValidateMicroK8sFirewallAllowsPodToPodCommunication should pass - microk8s pod to pod communication should be allowed")
84+
})
85+
}
86+
5387
func RunInstanceLifecycleValidation(t *testing.T, config ProviderConfig) {
5488
if testing.Short() {
5589
t.Skip("Skipping validation tests in short mode")
@@ -119,25 +153,7 @@ func RunInstanceLifecycleValidation(t *testing.T, config ProviderConfig) {
119153
require.NoError(t, err, "ValidateInstanceImage should pass")
120154
})
121155

122-
t.Run("ValidateFirewallBlocksPort", func(t *testing.T) {
123-
err := v1.ValidateFirewallBlocksPort(ctx, client, instance, ssh.GetTestPrivateKey(), v1.DefaultFirewallTestPort)
124-
require.NoError(t, err, "ValidateFirewallBlocksPort should pass - non-allowed port should be blocked")
125-
})
126-
127-
t.Run("ValidateDockerFirewallBlocksPort", func(t *testing.T) {
128-
err := v1.ValidateDockerFirewallBlocksPort(ctx, client, instance, ssh.GetTestPrivateKey(), v1.DefaultFirewallTestPort)
129-
require.NoError(t, err, "ValidateDockerFirewallBlocksPort should pass - docker port should be blocked by iptables")
130-
})
131-
132-
t.Run("ValidateDockerFirewallAllowsEgress", func(t *testing.T) {
133-
err := v1.ValidateDockerFirewallAllowsEgress(ctx, client, instance, ssh.GetTestPrivateKey())
134-
require.NoError(t, err, "ValidateDockerFirewallAllowsEgress should pass - egress should be allowed")
135-
})
136-
137-
t.Run("ValidateDockerFirewallAllowsContainerToContainerCommunication", func(t *testing.T) {
138-
err := v1.ValidateDockerFirewallAllowsContainerToContainerCommunication(ctx, client, instance, ssh.GetTestPrivateKey())
139-
require.NoError(t, err, "ValidateDockerFirewallAllowsContainerToContainerCommunication should pass - container to container communication should be allowed")
140-
})
156+
runFirewallSubtests(ctx, t, client, instance, ssh.GetTestPrivateKey(), v1.DefaultFirewallTestPort)
141157

142158
if capabilities.IsCapable(v1.CapabilityStopStartInstance) && instance.Stoppable {
143159
t.Run("ValidateStopStartInstance", func(t *testing.T) {
@@ -321,26 +337,7 @@ func RunFirewallValidation(t *testing.T, config ProviderConfig, opts FirewallVal
321337
testPort = v1.DefaultFirewallTestPort
322338
}
323339

324-
// Test that regular server on 0.0.0.0 is blocked
325-
t.Run("ValidateFirewallBlocksPort", func(t *testing.T) {
326-
err := v1.ValidateFirewallBlocksPort(ctx, client, instance, ssh.GetTestPrivateKey(), testPort)
327-
require.NoError(t, err, "ValidateFirewallBlocksPort should pass - port should be blocked")
328-
})
329-
330-
t.Run("ValidateDockerFirewallBlocksPort", func(t *testing.T) {
331-
err := v1.ValidateDockerFirewallBlocksPort(ctx, client, instance, ssh.GetTestPrivateKey(), testPort)
332-
require.NoError(t, err, "ValidateDockerFirewallBlocksPort should pass - docker port should be blocked")
333-
})
334-
335-
t.Run("ValidateDockerFirewallAllowsEgress", func(t *testing.T) {
336-
err := v1.ValidateDockerFirewallAllowsEgress(ctx, client, instance, ssh.GetTestPrivateKey())
337-
require.NoError(t, err, "ValidateDockerFirewallAllowsEgress should pass - egress should be allowed")
338-
})
339-
340-
t.Run("ValidateDockerFirewallAllowsContainerToContainerCommunication", func(t *testing.T) {
341-
err := v1.ValidateDockerFirewallAllowsContainerToContainerCommunication(ctx, client, instance, ssh.GetTestPrivateKey())
342-
require.NoError(t, err, "ValidateDockerFirewallAllowsContainerToContainerCommunication should pass - container to container communication should be allowed")
343-
})
340+
runFirewallSubtests(ctx, t, client, instance, ssh.GetTestPrivateKey(), testPort)
344341

345342
// Test that SSH port is accessible (sanity check)
346343
t.Run("ValidateSSHPortAccessible", func(t *testing.T) {

v1/networking_validation.go

Lines changed: 216 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -280,6 +280,190 @@ func ValidateDockerFirewallAllowsContainerToContainerCommunication(ctx context.C
280280
return nil
281281
}
282282

283+
func ValidateMicroK8sFirewallAllowsEgress(ctx context.Context, client CloudInstanceReader, instance *Instance, privateKey string) error {
284+
var err error
285+
instance, err = WaitForInstanceLifecycleStatus(ctx, client, instance, LifecycleStatusRunning, PendingToRunningTimeout)
286+
if err != nil {
287+
return fmt.Errorf("failed to wait for instance running: %w", err)
288+
}
289+
290+
publicIP := instance.PublicIP
291+
if publicIP == "" {
292+
return fmt.Errorf("public IP is not available for instance %s", instance.CloudID)
293+
}
294+
295+
sshClient, err := ssh.ConnectToHost(ctx, ssh.ConnectionConfig{
296+
User: instance.SSHUser,
297+
HostPort: fmt.Sprintf("%s:%d", publicIP, instance.SSHPort),
298+
PrivKey: privateKey,
299+
})
300+
if err != nil {
301+
return fmt.Errorf("failed to SSH into instance: %w", err)
302+
}
303+
defer func() { _ = sshClient.Close() }()
304+
305+
microK8sCmd, err := setupMicroK8sCommand(ctx, sshClient, instance.CloudID)
306+
if err != nil {
307+
return err
308+
}
309+
310+
// Ensure prior run artifacts do not interfere.
311+
_, _, _ = sshClient.RunCommand(ctx, fmt.Sprintf("%s kubectl delete pod mk8s-egress-test --ignore-not-found=true", microK8sCmd))
312+
313+
cmd := fmt.Sprintf(
314+
"%s kubectl run mk8s-egress-test --image=alpine:3.20 --restart=Never --command -- sh -c 'ping -c 3 8.8.8.8'",
315+
microK8sCmd,
316+
)
317+
_, stderr, err := sshClient.RunCommand(ctx, cmd)
318+
if err != nil {
319+
return fmt.Errorf("failed to create microk8s egress test pod: %w, stderr: %s", err, stderr)
320+
}
321+
322+
defer func() {
323+
_, _, _ = sshClient.RunCommand(ctx, fmt.Sprintf("%s kubectl delete pod mk8s-egress-test --ignore-not-found=true", microK8sCmd))
324+
}()
325+
326+
cmd = fmt.Sprintf("%s kubectl wait --for=jsonpath='{.status.phase}'=Succeeded pod/mk8s-egress-test --timeout=180s", microK8sCmd)
327+
_, stderr, err = sshClient.RunCommand(ctx, cmd)
328+
if err != nil {
329+
logsCmd := fmt.Sprintf("%s kubectl logs mk8s-egress-test 2>/dev/null || true", microK8sCmd)
330+
logs, _, _ := sshClient.RunCommand(ctx, logsCmd)
331+
return fmt.Errorf("microk8s egress test pod did not succeed: %w, stderr: %s, logs: %s", err, stderr, logs)
332+
}
333+
334+
cmd = fmt.Sprintf("%s kubectl logs mk8s-egress-test", microK8sCmd)
335+
stdout, stderr, err := sshClient.RunCommand(ctx, cmd)
336+
if err != nil {
337+
return fmt.Errorf("failed to get microk8s egress test pod logs: %w, stderr: %s", err, stderr)
338+
}
339+
if !strings.Contains(stdout, "3 packets transmitted, 3 packets received") {
340+
return fmt.Errorf("expected successful pod egress ping, got logs: %s", stdout)
341+
}
342+
343+
return nil
344+
}
345+
346+
func ValidateMicroK8sFirewallAllowsPodToPodCommunication(ctx context.Context, client CloudInstanceReader, instance *Instance, privateKey string) error {
347+
var err error
348+
instance, err = WaitForInstanceLifecycleStatus(ctx, client, instance, LifecycleStatusRunning, PendingToRunningTimeout)
349+
if err != nil {
350+
return fmt.Errorf("failed to wait for instance running: %w", err)
351+
}
352+
353+
publicIP := instance.PublicIP
354+
if publicIP == "" {
355+
return fmt.Errorf("public IP is not available for instance %s", instance.CloudID)
356+
}
357+
358+
sshClient, err := ssh.ConnectToHost(ctx, ssh.ConnectionConfig{
359+
User: instance.SSHUser,
360+
HostPort: fmt.Sprintf("%s:%d", publicIP, instance.SSHPort),
361+
PrivKey: privateKey,
362+
})
363+
if err != nil {
364+
return fmt.Errorf("failed to SSH into instance: %w", err)
365+
}
366+
defer func() { _ = sshClient.Close() }()
367+
368+
microK8sCmd, err := setupMicroK8sCommand(ctx, sshClient, instance.CloudID)
369+
if err != nil {
370+
return err
371+
}
372+
373+
cleanupMicroK8sPodToPodArtifacts(ctx, sshClient, microK8sCmd)
374+
defer cleanupMicroK8sPodToPodArtifacts(ctx, sshClient, microK8sCmd)
375+
376+
if err := createMicroK8sNginxPod(ctx, sshClient, microK8sCmd); err != nil {
377+
return err
378+
}
379+
if err := waitForMicroK8sNginxReady(ctx, sshClient, microK8sCmd); err != nil {
380+
return err
381+
}
382+
if err := exposeMicroK8sNginxService(ctx, sshClient, microK8sCmd); err != nil {
383+
return err
384+
}
385+
return runMicroK8sPodToPodTest(ctx, sshClient, microK8sCmd)
386+
}
387+
388+
func cleanupMicroK8sPodToPodArtifacts(ctx context.Context, sshClient *ssh.Client, microK8sCmd string) {
389+
_, _, _ = sshClient.RunCommand(ctx, fmt.Sprintf("%s kubectl delete pod mk8s-nginx mk8s-c2c-test --ignore-not-found=true", microK8sCmd))
390+
_, _, _ = sshClient.RunCommand(ctx, fmt.Sprintf("%s kubectl delete service mk8s-nginx-svc --ignore-not-found=true", microK8sCmd))
391+
}
392+
393+
func createMicroK8sNginxPod(ctx context.Context, sshClient *ssh.Client, microK8sCmd string) error {
394+
cmd := fmt.Sprintf("%s kubectl run mk8s-nginx --image=nginx:alpine --restart=Never --port=80", microK8sCmd)
395+
_, stderr, err := sshClient.RunCommand(ctx, cmd)
396+
if err != nil {
397+
return fmt.Errorf("failed to create microk8s nginx pod: %w, stderr: %s", err, stderr)
398+
}
399+
400+
return nil
401+
}
402+
403+
func waitForMicroK8sNginxReady(ctx context.Context, sshClient *ssh.Client, microK8sCmd string) error {
404+
cmd := fmt.Sprintf("%s kubectl wait --for=condition=Ready pod/mk8s-nginx --timeout=180s", microK8sCmd)
405+
_, stderr, err := sshClient.RunCommand(ctx, cmd)
406+
if err != nil {
407+
return fmt.Errorf("microk8s nginx pod did not become ready: %w, stderr: %s", err, stderr)
408+
}
409+
410+
return nil
411+
}
412+
413+
func exposeMicroK8sNginxService(ctx context.Context, sshClient *ssh.Client, microK8sCmd string) error {
414+
cmd := fmt.Sprintf("%s kubectl expose pod mk8s-nginx --name=mk8s-nginx-svc --port=80 --target-port=80", microK8sCmd)
415+
_, stderr, err := sshClient.RunCommand(ctx, cmd)
416+
if err != nil {
417+
return fmt.Errorf("failed to create microk8s nginx service: %w, stderr: %s", err, stderr)
418+
}
419+
420+
// Wait for the service to have endpoints before the test pod tries to connect.
421+
// This avoids the race where DNS resolves but the service has no backing endpoints yet.
422+
epCmd := fmt.Sprintf(
423+
"%s kubectl wait --for=jsonpath='{.subsets[0].addresses[0].ip}' endpoints/mk8s-nginx-svc --timeout=60s",
424+
microK8sCmd,
425+
)
426+
_, stderr, err = sshClient.RunCommand(ctx, epCmd)
427+
if err != nil {
428+
return fmt.Errorf("service endpoints did not become ready: %w, stderr: %s", err, stderr)
429+
}
430+
431+
return nil
432+
}
433+
434+
func runMicroK8sPodToPodTest(ctx context.Context, sshClient *ssh.Client, microK8sCmd string) error {
435+
// Retry wget up to 10 times with 3-second sleeps so transient DNS propagation
436+
// delays don't cause an immediate hard failure.
437+
wgetScript := `for i in $(seq 1 10); do wget -q -O- http://mk8s-nginx-svc && exit 0; sleep 3; done; exit 1`
438+
cmd := fmt.Sprintf(
439+
"%s kubectl run mk8s-c2c-test --image=alpine:3.20 --restart=Never --command -- sh -c '%s'",
440+
microK8sCmd, wgetScript,
441+
)
442+
_, stderr, err := sshClient.RunCommand(ctx, cmd)
443+
if err != nil {
444+
return fmt.Errorf("failed to create microk8s pod-to-pod test pod: %w, stderr: %s", err, stderr)
445+
}
446+
447+
cmd = fmt.Sprintf("%s kubectl wait --for=jsonpath='{.status.phase}'=Succeeded pod/mk8s-c2c-test --timeout=180s", microK8sCmd)
448+
_, stderr, err = sshClient.RunCommand(ctx, cmd)
449+
if err != nil {
450+
logsCmd := fmt.Sprintf("%s kubectl logs mk8s-c2c-test 2>/dev/null || true", microK8sCmd)
451+
logs, _, _ := sshClient.RunCommand(ctx, logsCmd)
452+
return fmt.Errorf("microk8s pod-to-pod test pod did not succeed: %w, stderr: %s, logs: %s", err, stderr, logs)
453+
}
454+
455+
cmd = fmt.Sprintf("%s kubectl logs mk8s-c2c-test", microK8sCmd)
456+
stdout, stderr, err := sshClient.RunCommand(ctx, cmd)
457+
if err != nil {
458+
return fmt.Errorf("failed to get microk8s pod-to-pod test pod logs: %w, stderr: %s", err, stderr)
459+
}
460+
if !strings.Contains(stdout, "Welcome to nginx") {
461+
return fmt.Errorf("expected successful pod-to-pod communication, got logs: %s", stdout)
462+
}
463+
464+
return nil
465+
}
466+
283467
// setupDockerCommand ensures Docker is available and returns the command to use (always with sudo)
284468
func setupDockerCommand(ctx context.Context, sshClient *ssh.Client, instanceID CloudProviderInstanceID) (string, error) {
285469
// Check if Docker is available
@@ -301,6 +485,38 @@ func setupDockerCommand(ctx context.Context, sshClient *ssh.Client, instanceID C
301485
return "sudo docker", nil
302486
}
303487

488+
// setupMicroK8sCommand ensures MicroK8s is available and returns the command to use (always with sudo).
489+
func setupMicroK8sCommand(ctx context.Context, sshClient *ssh.Client, instanceID CloudProviderInstanceID) (string, error) {
490+
checkCmd := "sudo microk8s status --wait-ready --timeout 120"
491+
_, _, err := sshClient.RunCommand(ctx, checkCmd)
492+
if err != nil {
493+
fmt.Printf("MicroK8s not found or not ready, attempting to install on instance %s\n", instanceID)
494+
_, stderr, installErr := sshClient.RunCommand(ctx, "sudo snap install microk8s --classic")
495+
if installErr != nil {
496+
return "", fmt.Errorf("microk8s not available and failed to install: %w, stderr: %s", installErr, stderr)
497+
}
498+
_, stderr, readyErr := sshClient.RunCommand(ctx, checkCmd)
499+
if readyErr != nil {
500+
return "", fmt.Errorf("microk8s installed but not ready: %w, stderr: %s", readyErr, stderr)
501+
}
502+
}
503+
504+
_, stderr, err := sshClient.RunCommand(ctx, "sudo microk8s enable dns")
505+
if err != nil && !strings.Contains(stderr, "Nothing to do for dns") && !strings.Contains(stderr, "is already enabled") {
506+
return "", fmt.Errorf("failed to enable microk8s dns addon: %w, stderr: %s", err, stderr)
507+
}
508+
509+
// Wait for CoreDNS pods to be ready before returning. Without this,
510+
// subsequent kubectl commands that rely on DNS resolution may fail.
511+
dnsWaitCmd := "sudo microk8s kubectl wait --for=condition=Ready pod -l k8s-app=kube-dns -n kube-system --timeout=120s"
512+
_, stderr, err = sshClient.RunCommand(ctx, dnsWaitCmd)
513+
if err != nil {
514+
return "", fmt.Errorf("CoreDNS pods did not become ready: %w, stderr: %s", err, stderr)
515+
}
516+
517+
return "sudo microk8s", nil
518+
}
519+
304520
// waitForDockerService waits for a Docker container's service to be ready and responding
305521
func waitForDockerService(ctx context.Context, sshClient *ssh.Client, dockerCmd, containerName string, port int) error {
306522
for i := 0; i < 30; i++ { // Try for up to 30 seconds

v1/providers/nebius/instance.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1815,8 +1815,12 @@ func generateIPTablesCommands() []string {
18151815
"iptables -A DOCKER-USER -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT",
18161816
"iptables -A DOCKER-USER -i docker0 ! -o docker0 -j ACCEPT",
18171817
"iptables -A DOCKER-USER -i br+ ! -o br+ -j ACCEPT",
1818+
"iptables -A DOCKER-USER -i cni+ ! -o cni+ -j ACCEPT", // TODO: add these back in when we have a way to test it
1819+
"iptables -A DOCKER-USER -i cali+ ! -o cali+ -j ACCEPT",
18181820
"iptables -A DOCKER-USER -i docker0 -o docker0 -j ACCEPT",
18191821
"iptables -A DOCKER-USER -i br+ -o br+ -j ACCEPT",
1822+
"iptables -A DOCKER-USER -i cni+ -o cni+ -j ACCEPT",
1823+
"iptables -A DOCKER-USER -i cali+ -o cali+ -j ACCEPT",
18201824
"iptables -A DOCKER-USER -i lo -j ACCEPT",
18211825
"iptables -A DOCKER-USER -j DROP",
18221826
"iptables -A DOCKER-USER -j RETURN", // Expected by Docker

v1/providers/shadeform/firewall.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,14 @@ const (
2424
// Allow containers to initiate outbound traffic (default bridge + user-defined bridges).
2525
ipTablesAllowDockerUserOutboundInit0 = "iptables -A DOCKER-USER -i docker0 ! -o docker0 -j ACCEPT"
2626
ipTablesAllowDockerUserOutboundInit1 = "iptables -A DOCKER-USER -i br+ ! -o br+ -j ACCEPT"
27+
ipTablesAllowDockerUserOutboundInit2 = "iptables -A DOCKER-USER -i cni+ ! -o cni+ -j ACCEPT"
28+
ipTablesAllowDockerUserOutboundInit3 = "iptables -A DOCKER-USER -i cali+ ! -o cali+ -j ACCEPT"
2729

2830
// Allow container-to-container on the same bridge.
2931
ipTablesAllowDockerUserDockerToDocker0 = "iptables -A DOCKER-USER -i docker0 -o docker0 -j ACCEPT"
3032
ipTablesAllowDockerUserDockerToDocker1 = "iptables -A DOCKER-USER -i br+ -o br+ -j ACCEPT"
33+
ipTablesAllowDockerUserDockerToDocker2 = "iptables -A DOCKER-USER -i cni+ -o cni+ -j ACCEPT"
34+
ipTablesAllowDockerUserDockerToDocker3 = "iptables -A DOCKER-USER -i cali+ -o cali+ -j ACCEPT"
3135

3236
// Allow inbound traffic on the loopback interface.
3337
ipTablesAllowDockerUserInpboundLoopback = "iptables -A DOCKER-USER -i lo -j ACCEPT"
@@ -80,8 +84,12 @@ func (c *ShadeformClient) getIPTablesCommands() []string {
8084
ipTablesAllowDockerUserOutbound,
8185
ipTablesAllowDockerUserOutboundInit0,
8286
ipTablesAllowDockerUserOutboundInit1,
87+
ipTablesAllowDockerUserOutboundInit2, // TODO: add these back in when we have a way to test it
88+
ipTablesAllowDockerUserOutboundInit3,
8389
ipTablesAllowDockerUserDockerToDocker0,
8490
ipTablesAllowDockerUserDockerToDocker1,
91+
ipTablesAllowDockerUserDockerToDocker2,
92+
ipTablesAllowDockerUserDockerToDocker3,
8593
ipTablesAllowDockerUserInpboundLoopback,
8694
ipTablesDropDockerUserInbound,
8795
ipTablesReturnDockerUser, // Expected by Docker

0 commit comments

Comments
 (0)