diff --git a/e2e/brickmux_test.go b/e2e/brickmux_test.go index 5905748dc..0562c0a0f 100644 --- a/e2e/brickmux_test.go +++ b/e2e/brickmux_test.go @@ -27,12 +27,11 @@ func TestBrickMux(t *testing.T) { r.Nil(err) r.NotNil(client) - // Turn on brick mux cluster option optReq := api.ClusterOptionReq{ - Options: map[string]string{"cluster.brick-multiplex": "on"}, + Options: map[string]string{"cluster.brick-multiplex": "invalidValue"}, } err = client.ClusterOptionSet(optReq) - r.Nil(err) + r.NotNil(err) // Create a 1 x 3 volume var brickPaths []string diff --git a/e2e/glustershd_test.go b/e2e/glustershd_test.go index d61d89694..751718250 100644 --- a/e2e/glustershd_test.go +++ b/e2e/glustershd_test.go @@ -30,7 +30,7 @@ func testSelfHeal(t *testing.T, tc *testCluster) { //glustershd pid file path pidpath := path.Join(tc.gds[0].Rundir, "glustershd.pid") - for i := 1; i <= 2; i++ { + for i := 1; i <= 4; i++ { brickPath := testTempDir(t, "brick") brickPaths = append(brickPaths, brickPath) } @@ -39,11 +39,12 @@ func testSelfHeal(t *testing.T, tc *testCluster) { Name: volname, Subvols: []api.SubvolReq{ { - ReplicaCount: 2, + ReplicaCount: 3, Type: "replicate", Bricks: []api.BrickReq{ {PeerID: tc.gds[0].PeerID(), Path: brickPaths[0]}, - {PeerID: tc.gds[0].PeerID(), Path: brickPaths[1]}, + {PeerID: tc.gds[1].PeerID(), Path: brickPaths[1]}, + {PeerID: tc.gds[2].PeerID(), Path: brickPaths[2]}, }, }, }, @@ -72,14 +73,6 @@ func testSelfHeal(t *testing.T, tc *testCluster) { getBricksStatus, err := client.BricksStatus(volname) r.Nil(err, fmt.Sprintf("brick status operation failed: %s", err)) count := 0 - for brick := range getBricksStatus { - if getBricksStatus[brick].Info.PeerID.String() == tc.gds[0].PeerID() { - count++ - } - } - - r.Equal(count, 2) - for brick := range getBricksStatus { if getBricksStatus[brick].Info.PeerID.String() == tc.gds[0].PeerID() { process, err := os.FindProcess(getBricksStatus[brick].Pid) diff --git a/e2e/volume_ops_test.go b/e2e/volume_ops_test.go index b028b8957..5cae2b6dc 100644 --- a/e2e/volume_ops_test.go +++ b/e2e/volume_ops_test.go @@ -38,7 +38,7 @@ func TestVolume(t *testing.T) { r := require.New(t) - tc, err := setupCluster(t, "./config/1.toml", "./config/2.toml") + tc, err := setupCluster(t, "./config/1.toml", "./config/2.toml", "./config/3.toml") r.Nil(err) defer teardownCluster(tc) diff --git a/glusterd2/brickmux/option.go b/glusterd2/brickmux/option.go index 2d6c4dabe..2df4e67e0 100644 --- a/glusterd2/brickmux/option.go +++ b/glusterd2/brickmux/option.go @@ -45,13 +45,18 @@ func getMaxBricksPerProcess() (int, error) { // validateOption validates brick mux options func validateOption(option, value string) error { + if option == "cluster.brick-multiplex" { + _, err := options.StringToBoolean(value) + if err != nil { + return err + } + } if option == "cluster.max-bricks-per-process" { _, err := strconv.Atoi(value) if err != nil { return errors.ErrInvalidIntValue } } - return nil } diff --git a/glusterd2/options/cluster.go b/glusterd2/options/cluster.go index c4d05adf5..fe5c3bee8 100644 --- a/glusterd2/options/cluster.go +++ b/glusterd2/options/cluster.go @@ -30,7 +30,7 @@ var ClusterOptMap = map[string]*ClusterOption{ "cluster.shared-storage": {"cluster.shared-storage", "off", OptionTypeBool, nil}, "cluster.op-version": {"cluster.op-version", strconv.Itoa(gdctx.OpVersion), OptionTypeInt, nil}, "cluster.max-op-version": {"cluster.max-op-version", strconv.Itoa(gdctx.OpVersion), OptionTypeInt, nil}, - "cluster.brick-multiplex": {"cluster.brick-multiplex", "off", OptionTypeBool, nil}, + "cluster.brick-multiplex": {"cluster.brick-multiplex", "on", OptionTypeBool, nil}, "cluster.max-bricks-per-process": {"cluster.max-bricks-per-process", "250", OptionTypeInt, nil}, "cluster.localtime-logging": {"cluster.localtime-logging", "off", OptionTypeBool, nil}, }