Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 12 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,17 +34,23 @@ When mounted with the [Tigris](https://www.tigrisdata.com) backend TigrisFS supp

# Installation

## Recommended: One-line install

```bash
curl -sSL https://raw.githubusercontent.com/tigrisdata/tigrisfs/refs/heads/main/install.sh | bash
```

## Prebuilt DEB and RPM packages

* Download the latest release: [DEB](https://github.com/tigrisdata/tigrisfs/releases/download/v1.2.0/tigrisfs_1.2.0_linux_amd64.deb), [RPM](https://github.com/tigrisdata/tigrisfs/releases/download/v1.2.0/tigrisfs_1.2.0_linux_amd64.rpm).
* Download the latest release: [DEB](https://github.com/tigrisdata/tigrisfs/releases/download/v1.2.1/tigrisfs_1.2.1_linux_amd64.deb), [RPM](https://github.com/tigrisdata/tigrisfs/releases/download/v1.2.1/tigrisfs_1.2.1_linux_amd64.rpm).
* Install the package:
* Debian-based systems:
```bash
dpkg -i tigrisfs_1.2.0_linux_amd64.deb
dpkg -i tigrisfs_1.2.1_linux_amd64.deb
```
* RPM-based systems:
```bash
rpm -i tigrisfs_1.2.0_linux_amd64.rpm
rpm -i tigrisfs_1.2.1_linux_amd64.rpm
```
* Configure credentials
TigrisFS can use credentials from different sources:
Expand All @@ -59,19 +65,19 @@ See [docs](https://www.tigrisdata.com/docs/sdks/s3/aws-cli/) for more details.
```bash
systemctl --user start tigrisfs@<bucket>
```
The bucket is mounted at `$HOME/mnt/tigrisfs/<bucket>`.
The bucket is mounted at `$HOME/mnt/tigris/<bucket>`.
* as root
```bash
systemctl start tigrisfs@<bucket>
```
The bucket is mounted at `/mnt/tigrisfs/<bucket>`.
The bucket is mounted at `/mnt/tigris/<bucket>`.

## Binary install

* Download and unpack the latest release:
* MacOS ARM64
```
curl -L https://github.com/tigrisdata/tigrisfs/releases/download/v1.2.0/tigrisfs_1.2.0_darwin_arm64.tar.gz | sudo tar -xz -C /usr/local/bin
curl -L https://github.com/tigrisdata/tigrisfs/releases/download/v1.2.1/tigrisfs_1.2.1_darwin_arm64.tar.gz | sudo tar -xz -C /usr/local/bin
```
* Configuration is the same as for the DEB and RPM packages above.
* Mount the bucket:
Expand Down
8 changes: 4 additions & 4 deletions core/cfg/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,8 @@ MISC OPTIONS:
},

cli.BoolFlag{
Name: "no-tigris-prefetch",
Usage: "Disable Tigris prefetch on list (default: on)",
Name: "tigris-prefetch",
Usage: "Enable Tigris prefetch on list (default: off)",
},

cli.BoolFlag{
Expand Down Expand Up @@ -955,7 +955,7 @@ func PopulateFlags(c *cli.Context) (ret *FlagStorage) {
ClusterMode: c.Bool("cluster"),
ClusterGrpcReflection: c.Bool("grpc-reflection"),

TigrisPrefetch: !c.Bool("no-tigris-prefetch"),
TigrisPrefetch: c.Bool("tigris-prefetch"),
TigrisListContent: c.Bool("tigris-list-content"),
}

Expand Down Expand Up @@ -1138,7 +1138,7 @@ func DefaultFlags() *FlagStorage {
{PartSize: 25 * 1024 * 1024, PartCount: 1000},
{PartSize: 125 * 1024 * 1024, PartCount: 8000},
},
TigrisPrefetch: true,
TigrisPrefetch: false,
TigrisListContent: true,
}
}
1 change: 1 addition & 0 deletions core/cluster_fs.go
Original file line number Diff line number Diff line change
Expand Up @@ -430,6 +430,7 @@ func (fs *ClusterFs) readDir(handleId fuseops.HandleID, offset fuseops.DirOffset
dh.lastExternalOffset = 0
dh.lastInternalOffset = 0
dh.lastName = ""
dh.generation = atomic.LoadUint64(&dh.inode.dir.generation)
}

for {
Expand Down
5 changes: 4 additions & 1 deletion core/cluster_fs_fuse.go
Original file line number Diff line number Diff line change
Expand Up @@ -550,7 +550,10 @@ func (fs *ClusterFsFuse) OpenDir(ctx context.Context, op *fuseops.OpenDirOp) (er

// 2nd phase
fs.Goofys.mu.Lock()
dh := &DirHandle{inode: inode}
dh := &DirHandle{
inode: inode,
generation: atomic.LoadUint64(&inode.dir.generation),
}
fs.Goofys.dirHandles[fuseops.HandleID(resp.HandleId)] = dh
fs.Goofys.mu.Unlock()

Expand Down
98 changes: 82 additions & 16 deletions core/dir.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ type DirInodeData struct {
DeletedChildren map[string]*Inode
Gaps []*SlurpGap
handles []*DirHandle
generation uint64 // incremented on structural changes
}

// Returns the position of first char < '/' in `inp` after prefixLen + any continued '/' characters.
Expand Down Expand Up @@ -87,11 +88,15 @@ type DirHandle struct {
// or from the previous offset
lastExternalOffset fuseops.DirOffset
lastInternalOffset int
generation uint64 // tracks directory structure changes
lastName string
}

func NewDirHandle(inode *Inode) (dh *DirHandle) {
dh = &DirHandle{inode: inode}
dh = &DirHandle{
inode: inode,
generation: atomic.LoadUint64(&inode.dir.generation),
}
return
}

Expand Down Expand Up @@ -385,7 +390,9 @@ func (dir *DirInodeData) checkGapLoaded(key string, newerThan time.Time) bool {
return false
}

// sealDir completes directory listing and cleans up expired entries
// LOCKS_REQUIRED(inode.mu)
// LOCKS_EXCLUDED(dh.mu for all directory handles)
func (inode *Inode) sealDir() {
inode.dir.listMarker = ""
inode.dir.listDone = true
Expand All @@ -397,6 +404,10 @@ func (inode *Inode) sealDir() {
} else {
inode.Attributes.Mtime, inode.Attributes.Ctime = inode.findChildMaxTime()
}

// Increment generation to signal all handles need revalidation
atomic.AddUint64(&inode.dir.generation, 1)

inode.removeExpired("")
}

Expand Down Expand Up @@ -622,7 +633,14 @@ func (dh *DirHandle) listObjectsFlat() (start string, err error) {
dh.inode.dir.listMarker = lastName
}
} else {
// We must release dh.mu before calling sealDir to avoid deadlock
dh.mu.Unlock()
dh.inode.sealDir()
// Reload generation immediately after sealDir completes to get accurate state
currentGen := atomic.LoadUint64(&dh.inode.dir.generation)
dh.mu.Lock()
// Update our generation to match the new state
dh.generation = currentGen
}

dh.inode.mu.Unlock()
Expand All @@ -633,6 +651,16 @@ func (dh *DirHandle) listObjectsFlat() (start string, err error) {
// LOCKS_REQUIRED(dh.mu)
// LOCKS_REQUIRED(dh.inode.mu)
func (dh *DirHandle) checkDirPosition() {
// Check if directory structure changed since we last checked
// Note: There's a benign race here where generation could change between
// the load and assignment. This is acceptable as we'll catch it on the
// next operation. The worst case is an unnecessary position reset.
currentGen := atomic.LoadUint64(&dh.inode.dir.generation)
if dh.generation != currentGen {
dh.lastInternalOffset = -1
dh.generation = currentGen
}

if dh.lastInternalOffset < 0 {
parent := dh.inode
// Directory position invalidated, try to find it again using lastName
Expand Down Expand Up @@ -688,11 +716,21 @@ func (dh *DirHandle) loadListing() error {
// token

if useSlurp {
// We must release both locks before calling slurpOnce to avoid deadlock
// Store current generation before unlocking
currentGen := atomic.LoadUint64(&parent.dir.generation)
parent.mu.Unlock()
dh.mu.Unlock()
done, err := parent.slurpOnce(true)
dh.mu.Lock()
parent.mu.Lock()
// Check if generation changed while we were unlocked
newGen := atomic.LoadUint64(&parent.dir.generation)
if newGen != currentGen {
// Directory structure changed, reset our position
dh.generation = newGen
dh.lastInternalOffset = -1
}
if err != nil {
return err
}
Expand All @@ -706,12 +744,24 @@ func (dh *DirHandle) loadListing() error {

loaded, startMarker := false, ""
for parent.dir.lastFromCloud == nil && !parent.dir.listDone {
// We must release parent.mu before calling listObjectsFlat to avoid deadlock
// Store current generation before unlocking
currentGen := atomic.LoadUint64(&parent.dir.generation)
parent.mu.Unlock()
start, err := dh.listObjectsFlat()
if !loaded {
loaded, startMarker = true, start
}
parent.mu.Lock()
// Check if generation changed while we were unlocked
newGen := atomic.LoadUint64(&parent.dir.generation)
if newGen != currentGen {
// Directory structure changed, reset our position and invalidate startMarker
dh.generation = newGen
dh.lastInternalOffset = -1
// Clear startMarker to prevent removeExpired from operating on stale range
startMarker = ""
}
if err != nil {
return err
}
Expand Down Expand Up @@ -752,6 +802,7 @@ func (dh *DirHandle) Seek(newOffset fuseops.DirOffset) {
dh.lastExternalOffset = 0
dh.lastInternalOffset = 0
dh.lastName = ""
dh.generation = atomic.LoadUint64(&dh.inode.dir.generation)
}
}

Expand Down Expand Up @@ -996,6 +1047,10 @@ func (parent *Inode) removeChildUnlocked(inode *Inode) {
if l == 0 {
return
}

// Increment generation to invalidate all directory handles
atomic.AddUint64(&parent.dir.generation, 1)

i := sort.Search(l, parent.findInodeFunc(inode.Name))
if i >= l || parent.dir.Children[i].Name != inode.Name {
panic(fmt.Sprintf("%v.removeName(%v) but child not found: %v",
Expand All @@ -1004,11 +1059,7 @@ func (parent *Inode) removeChildUnlocked(inode *Inode) {

// POSIX allows parallel readdir() and modifications,
// so preserve position of all directory handles
for _, dh := range parent.dir.handles {
dh.mu.Lock()
dh.lastInternalOffset = -1
dh.mu.Unlock()
}
// Handles will detect the generation change and reset themselves
// >= because we use the "last open dir" as the "next" one
if parent.dir.lastOpenDirIdx >= i {
parent.dir.lastOpenDirIdx--
Expand Down Expand Up @@ -1038,11 +1089,8 @@ func (parent *Inode) removeAllChildrenUnlocked() {
child.DeRef(1)
child.mu.Unlock()
}
// POSIX allows parallel readdir() and modifications,
// so reset position of all directory handles
for _, dh := range parent.dir.handles {
dh.lastInternalOffset = -1
}
// Increment generation to invalidate all directory handles
atomic.AddUint64(&parent.dir.generation, 1)
parent.dir.Children = nil
}

Expand Down Expand Up @@ -1091,11 +1139,8 @@ func (parent *Inode) insertChildUnlocked(inode *Inode) {
panic(fmt.Sprintf("double insert of %v", parent.getChildName(inode.Name)))
}

// POSIX allows parallel readdir() and modifications,
// so preserve position of all directory handles
for _, dh := range parent.dir.handles {
dh.lastInternalOffset = -1
}
// Increment generation to invalidate all directory handles
atomic.AddUint64(&parent.dir.generation, 1)
if parent.dir.lastOpenDirIdx >= i {
parent.dir.lastOpenDirIdx++
}
Expand Down Expand Up @@ -1971,6 +2016,27 @@ func (parent *Inode) recheckInode(inode *Inode, name string) (newInode *Inode, e
return newInode, nil
}

// recheckInodeByName is similar to recheckInode but finds the current child by name
// first to ensure we're working with the most up-to-date inode instance.
// This avoids issues with stale inode references that might not match what's
// currently in the parent's children list.
func (parent *Inode) recheckInodeByName(name string) (newInode *Inode, err error) {
// First get the current child if it exists
parent.mu.Lock()
currentChild := parent.findChildUnlocked(name)
parent.mu.Unlock()

newInode, err = parent.LookUp(name, currentChild == nil && !parent.fs.flags.NoPreloadDir)
if err != nil {
if currentChild != nil {
// Remove the actual current child from parent's children list
parent.removeChild(currentChild)
}
return nil, err
}
return newInode, nil
}

func (parent *Inode) LookUp(name string, doSlurp bool) (*Inode, error) {
_, parentKey := parent.cloud()
key := appendChildName(parentKey, name)
Expand Down
5 changes: 4 additions & 1 deletion core/goofys.go
Original file line number Diff line number Diff line change
Expand Up @@ -958,7 +958,10 @@ func (fs *Goofys) RefreshInodeCache(inode *Inode) error {
}
return mappedErr
}
_, err := parent.recheckInode(inode, name)
// Use recheckInodeByName to ensure we work with the current child instance
// This handles cases where the inode passed to RefreshInodeCache might be
// a stale reference from fs.inodes while parent.dir.Children has a newer instance
_, err := parent.recheckInodeByName(name)
mappedErr = mapAwsError(err)
if mappedErr == syscall.ENOENT {
notifications = append(notifications, &fuseops.NotifyDelete{
Expand Down
Loading