diff --git a/e2e/smartvol_ops_test.go b/e2e/smartvol_ops_test.go index 3e8b8331d..8dea52328 100644 --- a/e2e/smartvol_ops_test.go +++ b/e2e/smartvol_ops_test.go @@ -29,7 +29,7 @@ func brickSizeTest(brickpath string, min uint64, max uint64) error { func checkZeroLvs(r *require.Assertions) { for i := 1; i < 3; i++ { - nlv, err := numberOfLvs(fmt.Sprintf("vg-dev-gluster_loop%d", i)) + nlv, err := numberOfLvs(fmt.Sprintf("gluster-dev-gluster_loop%d", i)) r.Nil(err) if err == nil { r.Equal(0, nlv) diff --git a/glusterd2/brick/types.go b/glusterd2/brick/types.go index 39af766e9..f862444eb 100644 --- a/glusterd2/brick/types.go +++ b/glusterd2/brick/types.go @@ -33,6 +33,7 @@ type MountInfo struct { // Brickinfo is the static information about the brick type Brickinfo struct { ID uuid.UUID + Name string Hostname string PeerID uuid.UUID Path string @@ -42,6 +43,7 @@ type Brickinfo struct { Type Type Decommissioned bool PType ProvisionType + Device string MountInfo } diff --git a/glusterd2/bricksplanner/planner.go b/glusterd2/bricksplanner/planner.go index 33faab65e..fddd0094f 100644 --- a/glusterd2/bricksplanner/planner.go +++ b/glusterd2/bricksplanner/planner.go @@ -7,7 +7,7 @@ import ( "github.com/gluster/glusterd2/glusterd2/volume" "github.com/gluster/glusterd2/pkg/api" - "github.com/gluster/glusterd2/plugins/device/deviceutils" + "github.com/gluster/glusterd2/pkg/lvmutils" config "github.com/spf13/viper" ) @@ -76,7 +76,7 @@ func getBricksLayout(req *api.VolCreateReq) ([]api.SubvolReq, error) { // User input will be in MBs, convert to KBs for all // internal usage - subvolSize := deviceutils.MbToKb(req.Size) + subvolSize := lvmutils.MbToKb(req.Size) if numSubvols > 1 { subvolSize = subvolSize / uint64(numSubvols) } @@ -118,19 +118,13 @@ func getBricksLayout(req *api.VolCreateReq) ([]api.SubvolReq, error) { for j := 0; j < subvolplanner.BricksCount(); j++ { eachBrickSize := subvolplanner.BrickSize(j) brickType := subvolplanner.BrickType(j) - eachBrickTpSize := uint64(float64(eachBrickSize) * req.SnapshotReserveFactor) bricks = append(bricks, api.BrickReq{ - Type: brickType, - Path: fmt.Sprintf("%s/%s/subvol%d/brick%d/brick", bricksMountRoot, req.Name, i+1, j+1), - Mountdir: "/brick", - TpName: fmt.Sprintf("tp_%s_s%d_b%d", req.Name, i+1, j+1), - LvName: fmt.Sprintf("brick_%s_s%d_b%d", req.Name, i+1, j+1), - Size: eachBrickSize, - TpSize: eachBrickTpSize, - TpMetadataSize: deviceutils.GetPoolMetadataSize(eachBrickTpSize), - FsType: "xfs", - MntOpts: "rw,inode64,noatime,nouuid", + Type: brickType, + Path: fmt.Sprintf("%s/%s/subvol%d/brick%d/brick", bricksMountRoot, req.Name, i+1, j+1), + Mountdir: "/brick", + Name: fmt.Sprintf("%s_s%d_b%d", req.Name, i+1, j+1), + Size: eachBrickSize, }) } @@ -177,14 +171,15 @@ func PlanBricks(req *api.VolCreateReq) error { // with device with expected space available. numBricksAllocated := 0 for bidx, b := range sv.Bricks { - totalsize := b.TpSize + b.TpMetadataSize + tpSize := uint64(float64(b.Size) * req.SnapshotReserveFactor) + tpMetadataSize := lvmutils.GetTpMetadataSize(tpSize) + totalsize := tpSize + tpMetadataSize for _, vg := range availableVgs { _, zoneUsed := zones[vg.Zone] if vg.AvailableSize >= totalsize && !zoneUsed && !vg.Used { subvols[idx].Bricks[bidx].PeerID = vg.PeerID - subvols[idx].Bricks[bidx].VgName = vg.Name - subvols[idx].Bricks[bidx].DevicePath = "/dev/" + vg.Name + "/" + b.LvName + subvols[idx].Bricks[bidx].Device = vg.Device zones[vg.Zone] = struct{}{} numBricksAllocated++ @@ -205,14 +200,15 @@ func PlanBricks(req *api.VolCreateReq) error { // but enough space is available in the devices for bidx := numBricksAllocated; bidx < len(sv.Bricks); bidx++ { b := sv.Bricks[bidx] - totalsize := b.TpSize + b.TpMetadataSize + tpSize := uint64(float64(b.Size) * req.SnapshotReserveFactor) + tpMetadataSize := lvmutils.GetTpMetadataSize(tpSize) + totalsize := tpSize + tpMetadataSize for _, vg := range availableVgs { _, zoneUsed := zones[vg.Zone] if vg.AvailableSize >= totalsize && !zoneUsed { subvols[idx].Bricks[bidx].PeerID = vg.PeerID - subvols[idx].Bricks[bidx].VgName = vg.Name - subvols[idx].Bricks[bidx].DevicePath = "/dev/" + vg.Name + "/" + b.LvName + subvols[idx].Bricks[bidx].Device = vg.Device zones[vg.Zone] = struct{}{} numBricksAllocated++ diff --git a/glusterd2/bricksplanner/utils.go b/glusterd2/bricksplanner/utils.go index 6cfce8afd..a81407add 100644 --- a/glusterd2/bricksplanner/utils.go +++ b/glusterd2/bricksplanner/utils.go @@ -1,7 +1,6 @@ package bricksplanner import ( - "encoding/json" "sort" "strings" @@ -10,6 +9,7 @@ import ( "github.com/gluster/glusterd2/pkg/api" "github.com/gluster/glusterd2/pkg/utils" deviceapi "github.com/gluster/glusterd2/plugins/device/api" + "github.com/gluster/glusterd2/plugins/device/deviceutils" ) var subvolPlanners = make(map[string]SubvolPlanner) @@ -24,8 +24,7 @@ type SubvolPlanner interface { // Vg represents Virtual Volume Group type Vg struct { - Name string - DeviceName string + Device string PeerID string Zone string State string @@ -71,14 +70,8 @@ func getAvailableVgs(req *api.VolCreateReq) ([]Vg, error) { continue } - devicesRaw, exists := p.Metadata["_devices"] - if !exists { - // No device registered for this peer - continue - } - - var deviceInfo []deviceapi.Info - if err := json.Unmarshal([]byte(devicesRaw), &deviceInfo); err != nil { + deviceInfo, err := deviceutils.GetDevices(p.ID.String()) + if err != nil { return nil, err } @@ -89,8 +82,7 @@ func getAvailableVgs(req *api.VolCreateReq) ([]Vg, error) { } vgs = append(vgs, Vg{ - DeviceName: d.Name, - Name: d.VgName, + Device: d.Device, PeerID: p.ID.String(), Zone: peerzone, State: d.State, diff --git a/glusterd2/commands/volumes/volume-create-txn.go b/glusterd2/commands/volumes/volume-create-txn.go index 5124f550e..daa407690 100644 --- a/glusterd2/commands/volumes/volume-create-txn.go +++ b/glusterd2/commands/volumes/volume-create-txn.go @@ -128,6 +128,7 @@ func newVolinfo(req *api.VolCreateReq) (*volume.Volinfo, error) { DistCount: len(req.Subvols), SnapList: []string{}, SnapshotReserveFactor: req.SnapshotReserveFactor, + Provisioner: req.Provisioner, Auth: volume.VolAuth{ Username: uuid.NewRandom().String(), Password: uuid.NewRandom().String(), diff --git a/glusterd2/commands/volumes/volume-delete.go b/glusterd2/commands/volumes/volume-delete.go index 0a8ab765a..6954088b5 100644 --- a/glusterd2/commands/volumes/volume-delete.go +++ b/glusterd2/commands/volumes/volume-delete.go @@ -78,7 +78,9 @@ func volumeDeleteHandler(w http.ResponseWriter, r *http.Request) { return } - bricksAutoProvisioned := volinfo.IsAutoProvisioned() || volinfo.IsSnapshotProvisioned() + // TODO: Include volinfo.IsSnapshotProvisioned() once + // Snapshot integrated with Provisioner interface + bricksAutoProvisioned := volinfo.IsAutoProvisioned() txn.Steps = []*transaction.Step{ { DoFunc: "vol-delete.CleanBricks", diff --git a/glusterd2/commands/volumes/volume-smartvol-txn.go b/glusterd2/commands/volumes/volume-smartvol-txn.go index 2132c6eac..17de51e2b 100644 --- a/glusterd2/commands/volumes/volume-smartvol-txn.go +++ b/glusterd2/commands/volumes/volume-smartvol-txn.go @@ -1,11 +1,10 @@ package volumecommands import ( - "errors" - "os" "strings" "github.com/gluster/glusterd2/glusterd2/gdctx" + "github.com/gluster/glusterd2/glusterd2/provisioners" "github.com/gluster/glusterd2/glusterd2/transaction" "github.com/gluster/glusterd2/glusterd2/volume" "github.com/gluster/glusterd2/pkg/api" @@ -21,74 +20,76 @@ func txnPrepareBricks(c transaction.TxnCtx) error { return err } + var provisioner provisioners.Provisioner + var err error + if req.Provisioner == "" { + provisioner = provisioners.GetDefault() + } else { + provisioner, err = provisioners.Get(req.Provisioner) + if err != nil { + c.Logger().WithError(err).WithField("name", req.Provisioner).Error("invalid provisioner") + return err + } + } + for _, sv := range req.Subvols { for _, b := range sv.Bricks { if b.PeerID != gdctx.MyUUID.String() { continue } - // Create Mount directory - mountRoot := strings.TrimSuffix(b.Path, b.Mountdir) - err := os.MkdirAll(mountRoot, os.ModeDir|os.ModePerm) - if err != nil { - c.Logger().WithError(err).WithField("path", mountRoot).Error("failed to create brick mount directory") - return err - } - - // Thin Pool Creation - err = deviceutils.CreateTP(b.VgName, b.TpName, b.TpSize, b.TpMetadataSize) + err := provisioner.CreateBrick(b.Device, b.Name, b.Size, req.SnapshotReserveFactor) if err != nil { c.Logger().WithError(err).WithFields(log.Fields{ - "vg-name": b.VgName, - "tp-name": b.TpName, - "tp-size": b.TpSize, - "tp-meta-size": b.TpMetadataSize, - }).Error("thinpool creation failed") + "device": b.Device, + "name": b.Name, + "size": b.Size, + "snapshot-reserve": req.SnapshotReserveFactor, + }).Error("brick creation failed") return err } - // LV Creation - err = deviceutils.CreateLV(b.VgName, b.TpName, b.LvName, b.Size) + err = provisioner.CreateBrickFS(b.Device, b.Name, "xfs") if err != nil { c.Logger().WithError(err).WithFields(log.Fields{ - "vg-name": b.VgName, - "tp-name": b.TpName, - "lv-name": b.LvName, - "size": b.Size, - }).Error("lvcreate failed") - return err - } - - // Make Filesystem - err = deviceutils.MakeXfs(b.DevicePath) - if err != nil { - c.Logger().WithError(err).WithField("dev", b.DevicePath).Error("mkfs.xfs failed") + "device": b.Device, + "fstype": "xfs", + }).Error("create brick filesystem failed") return err } // Mount the Created FS - err = deviceutils.BrickMount(b.DevicePath, mountRoot) + err = provisioner.MountBrick(b.Device, b.Name, b.Path) if err != nil { c.Logger().WithError(err).WithFields(log.Fields{ - "dev": b.DevicePath, - "path": mountRoot, + "device": b.Device, + "path": b.Path, + "name": b.Name, }).Error("brick mount failed") return err } // Create a directory in Brick Mount - err = os.MkdirAll(b.Path, os.ModeDir|os.ModePerm) + err = provisioner.CreateBrickDir(b.Path) if err != nil { c.Logger().WithError(err).WithField( "path", b.Path).Error("failed to create brick directory in mount") return err } - // Update current Vg free size - err = deviceutils.UpdateDeviceFreeSize(gdctx.MyUUID.String(), b.VgName) + availableSize, extentSize, err := provisioner.AvailableSize(b.Device) + if err != nil { + c.Logger().WithError(err).WithField("device", b.Device). + Error("failed to get available size of a device") + return err + } + err = deviceutils.UpdateDeviceFreeSize(gdctx.MyUUID.String(), b.Device, availableSize, extentSize) if err != nil { - c.Logger().WithError(err).WithField("vg-name", b.VgName). - Error("failed to update available size of a device") + c.Logger().WithError(err).WithFields(log.Fields{ + "peerid": gdctx.MyUUID.String(), + "device": b.Device, + "availablesize": availableSize, + }).Error("failed to update available size of a device") return err } } @@ -104,6 +105,18 @@ func txnUndoPrepareBricks(c transaction.TxnCtx) error { return err } + var provisioner provisioners.Provisioner + var err error + if req.Provisioner == "" { + provisioner = provisioners.GetDefault() + } else { + provisioner, err = provisioners.Get(req.Provisioner) + if err != nil { + c.Logger().WithError(err).WithField("name", req.Provisioner).Error("invalid provisioner") + return err + } + } + for _, sv := range req.Subvols { for _, b := range sv.Bricks { @@ -112,32 +125,34 @@ func txnUndoPrepareBricks(c transaction.TxnCtx) error { } // UnMount the Brick - mountRoot := strings.TrimSuffix(b.Path, b.Mountdir) - err := deviceutils.BrickUnmount(mountRoot) + err := provisioner.UnmountBrick(b.Path) if err != nil { - c.Logger().WithError(err).WithField("path", mountRoot).Error("brick unmount failed") + c.Logger().WithError(err).WithField("path", b.Path).Error("brick unmount failed") } - // Remove LV - err = deviceutils.RemoveLV(b.VgName, b.LvName) + // Remove Brick + err = provisioner.RemoveBrick(b.Device, b.Name) if err != nil { c.Logger().WithError(err).WithFields(log.Fields{ - "vg-name": b.VgName, - "lv-name": b.LvName, + "device": b.Device, + "name": b.Name, }).Error("lv remove failed") } - // Remove Thin Pool - err = deviceutils.RemoveLV(b.VgName, b.TpName) + availableSize, extentSize, err := provisioner.AvailableSize(b.Device) + if err != nil { + c.Logger().WithError(err).WithField("device", b.Device). + Error("failed to get available size of a device") + return err + } + err = deviceutils.UpdateDeviceFreeSize(gdctx.MyUUID.String(), b.Device, availableSize, extentSize) if err != nil { c.Logger().WithError(err).WithFields(log.Fields{ - "vg-name": b.VgName, - "tp-name": b.TpName, - }).Error("thinpool remove failed") + "peerid": gdctx.MyUUID.String(), + "device": b.Device, + "availablesize": availableSize, + }).Error("failed to update available size of a device") } - - // Update current Vg free size - deviceutils.UpdateDeviceFreeSize(gdctx.MyUUID.String(), b.VgName) } } @@ -152,6 +167,18 @@ func txnCleanBricks(c transaction.TxnCtx) error { return err } + var provisioner provisioners.Provisioner + var err error + if volinfo.Provisioner == "" { + provisioner = provisioners.GetDefault() + } else { + provisioner, err = provisioners.Get(volinfo.Provisioner) + if err != nil { + c.Logger().WithError(err).WithField("name", volinfo.Provisioner).Error("invalid provisioner") + return err + } + } + for _, b := range volinfo.GetLocalBricks() { // UnMount the Brick if mounted mountRoot := strings.TrimSuffix(b.Path, b.MountInfo.Mountdir) @@ -163,66 +190,34 @@ func txnCleanBricks(c transaction.TxnCtx) error { return err } } else { - err := deviceutils.BrickUnmount(mountRoot) + err := provisioner.UnmountBrick(b.Path) if err != nil { - c.Logger().WithError(err).WithField("path", mountRoot). - Error("brick unmount failed") + c.Logger().WithError(err).WithField("path", b.Path).Error("brick unmount failed") return err } } - parts := strings.Split(b.MountInfo.DevicePath, "/") - if len(parts) != 4 { - return errors.New("unable to parse device path") - } - vgname := parts[2] - lvname := parts[3] - tpname, err := deviceutils.GetThinpoolName(vgname, lvname) + err = provisioner.RemoveBrick(b.Device, b.Name) if err != nil { c.Logger().WithError(err).WithFields(log.Fields{ - "vg-name": vgname, - "lv-name": lvname, - }).Error("failed to get thinpool name") - return err + "device": b.Device, + "name": b.Name, + }).Error("remove brick failed") } - // Remove LV - err = deviceutils.RemoveLV(vgname, lvname) + availableSize, extentSize, err := provisioner.AvailableSize(b.Device) if err != nil { - c.Logger().WithError(err).WithFields(log.Fields{ - "vg-name": vgname, - "lv-name": lvname, - }).Error("lv remove failed") + c.Logger().WithError(err).WithField("device", b.Device). + Error("failed to get available size of a device") return err } - - // Remove Thin Pool if LV count is zero, Thinpool will - // have more LVs in case of snapshots and clones - numLvs, err := deviceutils.NumberOfLvs(vgname, tpname) + err = deviceutils.UpdateDeviceFreeSize(gdctx.MyUUID.String(), b.Device, availableSize, extentSize) if err != nil { c.Logger().WithError(err).WithFields(log.Fields{ - "vg-name": vgname, - "tp-name": tpname, - }).Error("failed to get number of lvs") - return err - } - - if numLvs == 0 { - err = deviceutils.RemoveLV(vgname, tpname) - if err != nil { - c.Logger().WithError(err).WithFields(log.Fields{ - "vg-name": vgname, - "tp-name": tpname, - }).Error("thinpool remove failed") - return err - } - } - - // Update current Vg free size - err = deviceutils.UpdateDeviceFreeSize(gdctx.MyUUID.String(), vgname) - if err != nil { - c.Logger().WithError(err).WithField("vg-name", vgname). - Error("failed to update available size of a device") + "peerid": gdctx.MyUUID.String(), + "device": b.Device, + "availablesize": availableSize, + }).Error("failed to update available size of a device") return err } } diff --git a/glusterd2/provisioners/provisioners.go b/glusterd2/provisioners/provisioners.go new file mode 100644 index 000000000..9df6173d8 --- /dev/null +++ b/glusterd2/provisioners/provisioners.go @@ -0,0 +1,54 @@ +package provisioners + +import ( + "errors" + + "github.com/gluster/glusterd2/plugins/lvmprovisioner" +) + +// Provisioner represents bricks provisioner +type Provisioner interface { + // Register will be called when user registers a device or directory. + // PV and VG will be created in case of lvm plugin + Register(devpath string) error + // AvailableSize returns available size of the device + AvailableSize(devpath string) (uint64, uint64, error) + // Unregister will be called when device/directory needs to be removed + Unregister(devpath string) error + // CreateBrick creates the brick volume + CreateBrick(devpath, brickid string, size uint64, bufferFactor float64) error + // CreateBrickFs will create the brick filesystem + CreateBrickFS(devpath, brickid, fstype string) error + // CreateBrickDir will create the brick directory + CreateBrickDir(brickPath string) error + // MountBrick will mount the brick + MountBrick(devpath, brickid, brickPath string) error + // UnmountBrick will unmount the brick + UnmountBrick(brickPath string) error + // RemoveBrick will remove the brick + RemoveBrick(devpath, brickid string) error +} + +const defaultProvisionerName = "lvm" + +var provisionersMap map[string]Provisioner + +// Get returns requested provisioner if exists +func Get(name string) (Provisioner, error) { + prov, exists := provisionersMap[name] + if !exists { + return nil, errors.New("unsupported provisioner") + } + return prov, nil +} + +// GetDefault returns default Provisioner +func GetDefault() Provisioner { + return provisionersMap[defaultProvisionerName] +} + +func init() { + provisionersMap = map[string]Provisioner{ + "lvm": lvmprovisioner.Provisioner{}, + } +} diff --git a/glusterd2/utils/mount.go b/glusterd2/utils/mount.go index d46d7dfbb..aa8998692 100644 --- a/glusterd2/utils/mount.go +++ b/glusterd2/utils/mount.go @@ -4,6 +4,7 @@ import ( "context" "strings" + "github.com/gluster/glusterd2/glusterd2/provisioners" "github.com/gluster/glusterd2/glusterd2/volume" "github.com/gluster/glusterd2/pkg/utils" @@ -46,6 +47,28 @@ func MountLocalBricks() error { continue } + if v.Provisioner != "" { + provisioner, err := provisioners.Get(v.Provisioner) + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "volume": v.Name, + "provisioner": v.Provisioner, + }).Error("unable to get provisioner") + continue + } + + err = provisioner.MountBrick(b.Device, b.Name, b.Path) + if err != nil { + log.WithError(err).WithFields(log.Fields{ + "volume": v.Name, + "device": b.Device, + "name": b.Name, + "path": b.Path, + }).Error("brick mount failed") + } + continue + } + err := utils.ExecuteCommandRun("mount", "-o", b.MountInfo.MntOpts, b.MountInfo.DevicePath, mountRoot) if err != nil { log.WithError(err).WithFields(log.Fields{ diff --git a/glusterd2/volume/struct.go b/glusterd2/volume/struct.go index c94d7d732..8401afad8 100644 --- a/glusterd2/volume/struct.go +++ b/glusterd2/volume/struct.go @@ -98,6 +98,7 @@ type Volinfo struct { SnapList []string SnapshotReserveFactor float64 Capacity uint64 + Provisioner string } // VolAuth represents username and password used by trusted/internal clients @@ -162,6 +163,8 @@ func NewBrickEntries(bricks []api.BrickReq, volName, volfileID string, volID uui binfo.VolfileID = volfileID binfo.VolumeID = volID binfo.ID = uuid.NewRandom() + binfo.Name = b.Name + binfo.Device = b.Device binfo.PType = ptype if ptype.IsAutoProvisioned() { diff --git a/pkg/api/volume_req.go b/pkg/api/volume_req.go index 4b0968368..dae123bd9 100644 --- a/pkg/api/volume_req.go +++ b/pkg/api/volume_req.go @@ -2,20 +2,16 @@ package api // BrickReq represents Brick Request type BrickReq struct { - Type string `json:"type"` - PeerID string `json:"peerid"` - Path string `json:"path"` - TpMetadataSize uint64 `json:"metadata-size,omitempty"` - TpSize uint64 `json:"thinpool-size,omitempty"` - VgName string `json:"vg-name,omitempty"` - TpName string `json:"thinpool-name,omitempty"` - LvName string `json:"logical-volume,omitempty"` - Size uint64 `json:"size,omitempty"` - VgID string `json:"vg-id,omitempty"` - Mountdir string `json:"mount-dir,omitempty"` - DevicePath string `json:"device-path,omitempty"` - MntOpts string `json:"mnt-opts,omitempty"` - FsType string `json:"fs-type,omitempty"` + Type string `json:"type"` + PeerID string `json:"peerid"` + Path string `json:"path"` + Name string `json:"name,omitempty"` + Size uint64 `json:"size,omitempty"` + Mountdir string `json:"mount-dir,omitempty"` + Device string `json:"device,omitempty"` + DevicePath string `json:"device-path,omitempty"` + MntOpts string `json:"mnt-opts,omitempty"` + FsType string `json:"fs-type,omitempty"` } // SubvolReq represents Sub volume Request @@ -59,6 +55,7 @@ type VolCreateReq struct { ExcludeZones []string `json:"exclude-zones,omitempty"` SubvolZonesOverlap bool `json:"subvolume-zones-overlap,omitempty"` SubvolType string `json:"subvolume-type,omitempty"` + Provisioner string `json:"provisioner,omitempty"` VolOptionReq } diff --git a/pkg/fsutils/utils.go b/pkg/fsutils/utils.go new file mode 100644 index 000000000..39f50b235 --- /dev/null +++ b/pkg/fsutils/utils.go @@ -0,0 +1,31 @@ +package fsutils + +import ( + "syscall" + + "github.com/gluster/glusterd2/pkg/utils" +) + +// MakeXfs creates XFS filesystem +func MakeXfs(dev string) error { + // TODO: Adjust -d su=<>,sw=<> based on RAID/JBOD + return utils.ExecuteCommandRun("mkfs.xfs", + "-i", "size=512", + "-n", "size=8192", + dev, + ) +} + +// Mount mounts the brick LV +func Mount(dev, mountdir, options string) error { + return utils.ExecuteCommandRun("mount", + "-o", options, + dev, + mountdir, + ) +} + +// Unmount unmounts the Brick +func Unmount(mountdir string) error { + return syscall.Unmount(mountdir, syscall.MNT_FORCE) +} diff --git a/plugins/device/deviceutils/utils.go b/pkg/lvmutils/utils.go similarity index 83% rename from plugins/device/deviceutils/utils.go rename to pkg/lvmutils/utils.go index cb2423afb..5dc778ce1 100644 --- a/plugins/device/deviceutils/utils.go +++ b/pkg/lvmutils/utils.go @@ -1,4 +1,4 @@ -package deviceutils +package lvmutils import ( "errors" @@ -6,7 +6,6 @@ import ( "os/exec" "strconv" "strings" - "syscall" "github.com/gluster/glusterd2/pkg/utils" ) @@ -16,6 +15,16 @@ const ( chunkSize = "1280k" ) +// MbToKb converts Value from Mb to Kb +func MbToKb(value uint64) uint64 { + return value * 1024 +} + +// GbToKb converts Value from Gb to Kb +func GbToKb(value uint64) uint64 { + return value * 1024 * 1024 +} + //CreatePV is used to create physical volume. func CreatePV(device string) error { return utils.ExecuteCommandRun("pvcreate", "--metadatasize=128M", "--dataalignment=256K", device) @@ -36,16 +45,6 @@ func RemovePV(device string) error { return utils.ExecuteCommandRun("pvremove", device) } -// MbToKb converts Value from Mb to Kb -func MbToKb(value uint64) uint64 { - return value * 1024 -} - -// GbToKb converts Value from Gb to Kb -func GbToKb(value uint64) uint64 { - return value * 1024 * 1024 -} - // GetVgAvailableSize gets available size of given Vg func GetVgAvailableSize(vgname string) (uint64, uint64, error) { out, err := exec.Command("vgdisplay", "-c", vgname).Output() @@ -73,8 +72,8 @@ func GetVgAvailableSize(vgname string) (uint64, uint64, error) { return extentSize * freeExtents, extentSize, nil } -// GetPoolMetadataSize calculates the thin pool metadata size based on the given thin pool size -func GetPoolMetadataSize(poolsize uint64) uint64 { +// GetTpMetadataSize calculates the thin pool metadata size based on the given thin pool size +func GetTpMetadataSize(poolsize uint64) uint64 { // https://access.redhat.com/documentation/en-us/red_hat_gluster_storage/3.3/html-single/administration_guide/#Brick_Configuration // Minimum metadata size required is 0.5% and Max upto 16GB @@ -107,30 +106,6 @@ func CreateLV(vgname, tpname, lvname string, lvsize uint64) error { ) } -// MakeXfs creates XFS filesystem -func MakeXfs(dev string) error { - // TODO: Adjust -d su=<>,sw=<> based on RAID/JBOD - return utils.ExecuteCommandRun("mkfs.xfs", - "-i", "size=512", - "-n", "size=8192", - dev, - ) -} - -// BrickMount mounts the brick LV -func BrickMount(dev, mountdir string) error { - return utils.ExecuteCommandRun("mount", - "-o", "rw,inode64,noatime,nouuid", - dev, - mountdir, - ) -} - -// BrickUnmount unmounts the Brick -func BrickUnmount(mountdir string) error { - return syscall.Unmount(mountdir, syscall.MNT_FORCE) -} - // RemoveLV removes Logical Volume func RemoveLV(vgName, lvName string) error { return utils.ExecuteCommandRun("lvremove", "-f", vgName+"/"+lvName) diff --git a/plugins/device/api/req.go b/plugins/device/api/req.go index e47a48079..4836212b3 100644 --- a/plugins/device/api/req.go +++ b/plugins/device/api/req.go @@ -10,7 +10,8 @@ const ( // AddDeviceReq structure type AddDeviceReq struct { - Device string `json:"device"` + Device string `json:"device"` + Provisioner string `json:"provisioner,omitempty"` } // EditDeviceReq structure diff --git a/plugins/device/api/resp.go b/plugins/device/api/resp.go index cc62ad301..2bb777212 100644 --- a/plugins/device/api/resp.go +++ b/plugins/device/api/resp.go @@ -2,17 +2,18 @@ package api import ( "github.com/gluster/glusterd2/pkg/api" + + "github.com/pborman/uuid" ) // Info represents structure in which devices are to be store in Peer Metadata type Info struct { - Name string `json:"name"` - State string `json:"state"` - VgName string `json:"vg-name"` - AvailableSize uint64 `json:"available-size"` - ExtentSize uint64 `json:"extent-size"` - Used bool `json:"used"` - PeerID string `json:"peer-id"` + Device string `json:"device"` + State string `json:"state"` + AvailableSize uint64 `json:"available-size"` + ExtentSize uint64 `json:"extent-size"` + Used bool `json:"used"` + PeerID uuid.UUID `json:"peer-id"` } // AddDeviceResp is the success response sent to a AddDeviceReq request diff --git a/plugins/device/deviceutils/errors.go b/plugins/device/deviceutils/errors.go new file mode 100644 index 000000000..e4a19f3b2 --- /dev/null +++ b/plugins/device/deviceutils/errors.go @@ -0,0 +1,10 @@ +package deviceutils + +import ( + "errors" +) + +var ( + // ErrDeviceNotFound represents device not found error + ErrDeviceNotFound = errors.New("device does not exist in the given peer") +) diff --git a/plugins/device/deviceutils/store-utils.go b/plugins/device/deviceutils/store-utils.go index e34c6d873..695761e3d 100644 --- a/plugins/device/deviceutils/store-utils.go +++ b/plugins/device/deviceutils/store-utils.go @@ -1,159 +1,114 @@ package deviceutils import ( + "context" "encoding/json" - "errors" - peer "github.com/gluster/glusterd2/glusterd2/peer" + "github.com/gluster/glusterd2/glusterd2/store" deviceapi "github.com/gluster/glusterd2/plugins/device/api" + + "github.com/coreos/etcd/clientv3" +) + +const ( + devicePrefix string = "devices/" ) // GetDevices returns devices of specified peer/peers from the store // if no peers are specified, it returns devices of all peers func GetDevices(peerIds ...string) ([]deviceapi.Info, error) { - - var peers []*peer.Peer + var devices []deviceapi.Info var err error + var resp *clientv3.GetResponse + if len(peerIds) > 0 { for _, peerID := range peerIds { - var peerInfo *peer.Peer - peerInfo, err = peer.GetPeer(peerID) + resp, err = store.Get(context.TODO(), devicePrefix+peerID+"/", clientv3.WithPrefix()) if err != nil { return nil, err } - peers = append(peers, peerInfo) } } else { - peers, err = peer.GetPeers() + resp, err = store.Get(context.TODO(), devicePrefix, clientv3.WithPrefix()) if err != nil { return nil, err } } - var devices []deviceapi.Info - for _, peerInfo := range peers { - deviceInfo, err := GetDevicesFromPeer(peerInfo) - if err != nil { - return nil, err - } - devices = append(devices, deviceInfo...) - } - return devices, nil -} - -// GetDevicesFromPeer returns devices from peer object. -func GetDevicesFromPeer(peerInfo *peer.Peer) ([]deviceapi.Info, error) { + for _, kv := range resp.Kvs { + var dev deviceapi.Info - var deviceInfo []deviceapi.Info - if _, exists := peerInfo.Metadata["_devices"]; exists { - if err := json.Unmarshal([]byte(peerInfo.Metadata["_devices"]), &deviceInfo); err != nil { + if err = json.Unmarshal(kv.Value, &dev); err != nil { return nil, err } + devices = append(devices, dev) } - return deviceInfo, nil + return devices, nil } -// SetDeviceState sets device state and updates device state in etcd -func SetDeviceState(peerID, deviceName, deviceState string) error { +// GetDevice returns device of specified peer and device name +func GetDevice(peerID, deviceName string) (*deviceapi.Info, error) { + var device deviceapi.Info + var err error - devices, err := GetDevices(peerID) + resp, err := store.Get(context.TODO(), devicePrefix+peerID+"/"+deviceName) if err != nil { - return err + return nil, err } - index := DeviceInList(deviceName, devices) - if index < 0 { - return errors.New("device does not exist in the given peer") + if resp.Count != 1 { + return nil, ErrDeviceNotFound } - devices[index].State = deviceState - return updateDevices(peerID, devices) -} -func updateDevices(peerID string, devices []deviceapi.Info) error { - peerInfo, err := peer.GetPeer(peerID) - if err != nil { - return err + if err = json.Unmarshal(resp.Kvs[0].Value, &device); err != nil { + return nil, err } - deviceJSON, err := json.Marshal(devices) - if err != nil { - return err - } - peerInfo.Metadata["_devices"] = string(deviceJSON) - return peer.AddOrUpdatePeer(peerInfo) -} -// DeviceInList returns index of device if device is present in list else returns -1. -func DeviceInList(reqDevice string, devices []deviceapi.Info) int { - for index, key := range devices { - if reqDevice == key.Name { - return index - } - } - return -1 + return &device, nil } -// AddDevice adds device to peerinfo -func AddDevice(device deviceapi.Info) error { - deviceDetails, err := GetDevices(device.PeerID) - if err != nil { - return err - } - peerInfo, err := peer.GetPeer(device.PeerID) +// SetDeviceState sets device state and updates device state in etcd +func SetDeviceState(peerID, deviceName, deviceState string) error { + resp, err := store.Get(context.TODO(), devicePrefix+peerID+"/"+deviceName) if err != nil { return err } - var devices []deviceapi.Info - if deviceDetails != nil { - devices = append(deviceDetails, device) - } else { - devices = append(devices, device) - } - deviceJSON, err := json.Marshal(devices) - if err != nil { - return err + if resp.Count != 1 { + return ErrDeviceNotFound } - peerInfo.Metadata["_devices"] = string(deviceJSON) - err = peer.AddOrUpdatePeer(peerInfo) - if err != nil { + + var dev deviceapi.Info + if err := json.Unmarshal(resp.Kvs[0].Value, &dev); err != nil { return err } - return nil + dev.State = deviceState + return AddOrUpdateDevice(dev) } -// UpdateDeviceFreeSize updates the actual available size of VG -func UpdateDeviceFreeSize(peerid, vgname string) error { - deviceDetails, err := GetDevices(peerid) - if err != nil { - return err - } - - peerInfo, err := peer.GetPeer(peerid) +// AddOrUpdateDevice adds device to peerinfo +func AddOrUpdateDevice(device deviceapi.Info) error { + json, err := json.Marshal(device) if err != nil { return err } - for idx, dev := range deviceDetails { - if dev.VgName == vgname { - availableSize, extentSize, err := GetVgAvailableSize(vgname) - if err != nil { - return err - } - deviceDetails[idx].AvailableSize = availableSize - deviceDetails[idx].ExtentSize = extentSize - } - } + storeKey := device.PeerID.String() + "/" + device.Device - deviceJSON, err := json.Marshal(deviceDetails) - if err != nil { + if _, err := store.Put(context.TODO(), devicePrefix+storeKey, string(json)); err != nil { return err } - peerInfo.Metadata["_devices"] = string(deviceJSON) - err = peer.AddOrUpdatePeer(peerInfo) + return nil +} + +// UpdateDeviceFreeSize updates the actual available size of VG +func UpdateDeviceFreeSize(peerid, device string, size uint64, extentSize uint64) error { + deviceDetails, err := GetDevice(peerid, device) if err != nil { return err } - - return nil + deviceDetails.AvailableSize = size + deviceDetails.ExtentSize = extentSize + return AddOrUpdateDevice(*deviceDetails) } diff --git a/plugins/device/init.go b/plugins/device/init.go index f5a6837fc..12af8fee0 100644 --- a/plugins/device/init.go +++ b/plugins/device/init.go @@ -47,7 +47,7 @@ func (p *Plugin) RestRoutes() route.Routes { Pattern: "/devices", Version: 1, ResponseType: utils.GetTypeString((*deviceapi.ListDeviceResp)(nil)), - HandlerFunc: listAllDevicesHandler}, + HandlerFunc: deviceListHandler}, } } diff --git a/plugins/device/rest.go b/plugins/device/rest.go index f67dbd268..f9815364f 100644 --- a/plugins/device/rest.go +++ b/plugins/device/rest.go @@ -41,6 +41,18 @@ func deviceAddHandler(w http.ResponseWriter, r *http.Request) { } defer txn.Done() + _, err = deviceutils.GetDevice(peerID, req.Device) + if err == nil { + logger.WithError(err).WithField("device", req.Device).Error("Device already exists") + restutils.SendHTTPError(ctx, w, http.StatusBadRequest, "device already exists") + return + } + + if err != deviceutils.ErrDeviceNotFound { + restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, "failed to get device details from store") + return + } + peerInfo, err := peer.GetPeer(peerID) if err != nil { logger.WithError(err).WithField("peerid", peerID).Error("Peer ID not found in store") @@ -52,19 +64,6 @@ func deviceAddHandler(w http.ResponseWriter, r *http.Request) { return } - devices, err := deviceutils.GetDevicesFromPeer(peerInfo) - if err != nil { - logger.WithError(err).WithField("peerid", peerID).Error("Failed to get device from peer") - restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err) - return - } - - if index := deviceutils.DeviceInList(req.Device, devices); index >= 0 { - logger.WithError(err).WithField("device", req.Device).Error("Device already exists") - restutils.SendHTTPError(ctx, w, http.StatusBadRequest, "device already exists") - return - } - txn.Nodes = []uuid.UUID{peerInfo.ID} txn.Steps = []*transaction.Step{ { @@ -87,22 +86,29 @@ func deviceAddHandler(w http.ResponseWriter, r *http.Request) { return } + err = txn.Ctx.Set("provisioner", &req.Provisioner) + if err != nil { + logger.WithError(err).WithField("key", "provisioner").Error("failed to set key in transaction context") + restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err) + return + } + err = txn.Do() if err != nil { logger.WithError(err).Error("Transaction to prepare device failed") restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, "transaction to prepare device failed") return } - peerInfo, err = peer.GetPeer(peerID) + deviceInfo, err := deviceutils.GetDevice(peerID, req.Device) if err != nil { - logger.WithError(err).WithField("peerid", peerID).Error("Failed to get peer from store") - restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, "failed to get peer from store") + logger.WithError(err).WithField("peerid", peerID).Error("Failed to get device from store") + restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, "failed to get device from store") return } // FIXME: Change this to http.StatusCreated when we are able to set // location header with a unique URL that points to created device. - restutils.SendHTTPResponse(ctx, w, http.StatusOK, peerInfo) + restutils.SendHTTPResponse(ctx, w, http.StatusOK, deviceInfo) } func deviceListHandler(w http.ResponseWriter, r *http.Request) { @@ -110,12 +116,18 @@ func deviceListHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() logger := gdctx.GetReqLogger(ctx) peerID := mux.Vars(r)["peerid"] - if uuid.Parse(peerID) == nil { + if peerID != "" && uuid.Parse(peerID) == nil { restutils.SendHTTPError(ctx, w, http.StatusBadRequest, "invalid peer-id passed in url") return } - devices, err := deviceutils.GetDevices(peerID) + var devices []deviceapi.Info + var err error + if peerID == "" { + devices, err = deviceutils.GetDevices() + } else { + devices, err = deviceutils.GetDevices(peerID) + } if err != nil { logger.WithError(err).WithField("peerid", peerID).Error( "Failed to get devices for peer") @@ -169,17 +181,3 @@ func deviceEditHandler(w http.ResponseWriter, r *http.Request) { restutils.SendHTTPResponse(ctx, w, http.StatusOK, nil) } - -func listAllDevicesHandler(w http.ResponseWriter, r *http.Request) { - - ctx := r.Context() - logger := gdctx.GetReqLogger(ctx) - devices, err := deviceutils.GetDevices() - if err != nil { - logger.WithError(err).Error(err) - restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err) - return - } - - restutils.SendHTTPResponse(ctx, w, http.StatusOK, devices) -} diff --git a/plugins/device/transaction.go b/plugins/device/transaction.go index 65ac5baa7..40aae491b 100644 --- a/plugins/device/transaction.go +++ b/plugins/device/transaction.go @@ -1,11 +1,12 @@ package device import ( - "strings" - + "github.com/gluster/glusterd2/glusterd2/provisioners" "github.com/gluster/glusterd2/glusterd2/transaction" deviceapi "github.com/gluster/glusterd2/plugins/device/api" "github.com/gluster/glusterd2/plugins/device/deviceutils" + + "github.com/pborman/uuid" ) func txnPrepareDevice(c transaction.TxnCtx) error { @@ -21,39 +22,47 @@ func txnPrepareDevice(c transaction.TxnCtx) error { return err } - var deviceInfo deviceapi.Info - - err := deviceutils.CreatePV(device) - if err != nil { - c.Logger().WithError(err).WithField("device", device).Error("Failed to create physical volume") + var provisionerType string + if err := c.Get("provisioner", &provisionerType); err != nil { + c.Logger().WithError(err).WithField("key", "provisioner").Error("Failed to get key from transaction context") return err } - vgName := strings.Replace("vg"+device, "/", "-", -1) - err = deviceutils.CreateVG(device, vgName) - if err != nil { - c.Logger().WithError(err).WithField("device", device).Error("Failed to create volume group") - errPV := deviceutils.RemovePV(device) - if errPV != nil { - c.Logger().WithError(err).WithField("device", device).Error("Failed to remove physical volume") + + var provisioner provisioners.Provisioner + var err error + if provisionerType == "" { + provisioner = provisioners.GetDefault() + } else { + provisioner, err = provisioners.Get(provisionerType) + if err != nil { + c.Logger().WithError(err).WithField("name", provisionerType).Error("invalid provisioner") + return err } + } + + var deviceInfo deviceapi.Info + + err = provisioner.Register(device) + if err != nil { + c.Logger().WithError(err).WithField("device", device).Error("failed to register device") return err } + c.Logger().WithField("device", device).Info("Device setup successful, setting device status to 'Enabled'") - availableSize, extentSize, err := deviceutils.GetVgAvailableSize(vgName) + availableSize, extentSize, err := provisioner.AvailableSize(device) if err != nil { return err } deviceInfo = deviceapi.Info{ - Name: device, + Device: device, State: deviceapi.DeviceEnabled, AvailableSize: availableSize, ExtentSize: extentSize, - PeerID: peerID, - VgName: vgName, + PeerID: uuid.Parse(peerID), } - err = deviceutils.AddDevice(deviceInfo) + err = deviceutils.AddOrUpdateDevice(deviceInfo) if err != nil { c.Logger().WithError(err).WithField("peerid", peerID).Error("Couldn't add deviceinfo to store") return err diff --git a/plugins/lvmprovisioner/provisioner.go b/plugins/lvmprovisioner/provisioner.go new file mode 100644 index 000000000..f8e1b2736 --- /dev/null +++ b/plugins/lvmprovisioner/provisioner.go @@ -0,0 +1,133 @@ +package lvmprovisioner + +import ( + "errors" + "os" + "path" + "strings" + + "github.com/gluster/glusterd2/pkg/fsutils" + "github.com/gluster/glusterd2/pkg/lvmutils" +) + +var mountOpts = "rw,inode64,noatime,nouuid" + +// Provisioner represents lvm provisioner plugin +type Provisioner struct{} + +func getVgName(devpath string) string { + return "gluster" + strings.Replace(devpath, "/", "-", -1) +} + +func getLvName(brickid string) string { + return "lv-" + brickid +} + +func getTpName(brickid string) string { + return "tp-" + brickid +} + +func getBrickDev(devpath, brickid string) string { + return "/dev/" + getVgName(devpath) + "/" + getLvName(brickid) +} + +// Register creates pv and vg for a given device +func (p Provisioner) Register(devpath string) error { + err := lvmutils.CreatePV(devpath) + if err != nil { + return err + } + return lvmutils.CreateVG(devpath, getVgName(devpath)) +} + +// AvailableSize returns available size in the given device +func (p Provisioner) AvailableSize(devpath string) (uint64, uint64, error) { + return lvmutils.GetVgAvailableSize(getVgName(devpath)) +} + +// Unregister removes VG and PV +func (p Provisioner) Unregister(devpath string) error { + err := lvmutils.RemoveVG(getVgName(devpath)) + if err != nil { + return err + } + return lvmutils.RemovePV(devpath) +} + +// CreateBrick creates thinpool and lv for given size +func (p Provisioner) CreateBrick(devpath, brickid string, size uint64, bufferFactor float64) error { + vgname := getVgName(devpath) + tpsize := uint64(float64(size) * bufferFactor) + tpname := getTpName(brickid) + lvname := getLvName(brickid) + metasize := lvmutils.GetTpMetadataSize(tpsize) + + err := lvmutils.CreateTP(vgname, tpname, tpsize, metasize) + if err != nil { + return err + } + return lvmutils.CreateLV(vgname, tpname, lvname, size) +} + +// CreateBrickFS creates the filesystem as requested +func (p Provisioner) CreateBrickFS(devpath, brickid, fstype string) error { + brickdev := getBrickDev(devpath, brickid) + switch fstype { + case "xfs": + return fsutils.MakeXfs(brickdev) + default: + return errors.New("unsupported filesystem") + } +} + +// CreateBrickDir creates brick directory inside mount +func (p Provisioner) CreateBrickDir(brickPath string) error { + return os.MkdirAll(brickPath, os.ModeDir|os.ModePerm) +} + +// MountBrick mounts the brick to the given brick path +func (p Provisioner) MountBrick(devpath, brickid, brickPath string) error { + mountdir := path.Dir(brickPath) + brickdev := getBrickDev(devpath, brickid) + err := os.MkdirAll(mountdir, os.ModeDir|os.ModePerm) + if err != nil { + return err + } + return fsutils.Mount(brickdev, mountdir, mountOpts) +} + +// UnmountBrick unmounts the brick +func (p Provisioner) UnmountBrick(brickPath string) error { + mountdir := path.Dir(brickPath) + return fsutils.Unmount(mountdir) +} + +// RemoveBrick removes the brick LV and Thinpool +func (p Provisioner) RemoveBrick(devpath, brickid string) error { + vgname := getVgName(devpath) + lvname := getLvName(brickid) + + tpname, err := lvmutils.GetThinpoolName(vgname, lvname) + if err != nil { + return err + } + + err = lvmutils.RemoveLV(vgname, lvname) + if err != nil { + return err + } + // Remove Thin Pool if LV count is zero, Thinpool will + // have more LVs in case of snapshots and clones + numLvs, err := lvmutils.NumberOfLvs(vgname, tpname) + if err != nil { + return err + } + + if numLvs == 0 { + err = lvmutils.RemoveLV(vgname, tpname) + if err != nil { + return err + } + } + return nil +}