github.com/dotlike13/wemix30_go@v1.8.23/swarm/fuse/swarmfs_unix.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // +build linux darwin freebsd
    18  
    19  package fuse
    20  
    21  import (
    22  	"context"
    23  	"errors"
    24  	"fmt"
    25  	"os"
    26  	"path/filepath"
    27  	"strings"
    28  	"sync"
    29  	"time"
    30  
    31  	"bazil.org/fuse"
    32  	"bazil.org/fuse/fs"
    33  	"github.com/ethereum/go-ethereum/common"
    34  	"github.com/ethereum/go-ethereum/swarm/api"
    35  	"github.com/ethereum/go-ethereum/swarm/log"
    36  )
    37  
    38  var (
    39  	errEmptyMountPoint      = errors.New("need non-empty mount point")
    40  	errNoRelativeMountPoint = errors.New("invalid path for mount point (need absolute path)")
    41  	errMaxMountCount        = errors.New("max FUSE mount count reached")
    42  	errMountTimeout         = errors.New("mount timeout")
    43  	errAlreadyMounted       = errors.New("mount point is already serving")
    44  )
    45  
    46  func isFUSEUnsupportedError(err error) bool {
    47  	if perr, ok := err.(*os.PathError); ok {
    48  		return perr.Op == "open" && perr.Path == "/dev/fuse"
    49  	}
    50  	return err == fuse.ErrOSXFUSENotFound
    51  }
    52  
    53  // MountInfo contains information about every active mount
    54  type MountInfo struct {
    55  	MountPoint     string
    56  	StartManifest  string
    57  	LatestManifest string
    58  	rootDir        *SwarmDir
    59  	fuseConnection *fuse.Conn
    60  	swarmApi       *api.API
    61  	lock           *sync.RWMutex
    62  	serveClose     chan struct{}
    63  }
    64  
    65  func NewMountInfo(mhash, mpoint string, sapi *api.API) *MountInfo {
    66  	log.Debug("swarmfs NewMountInfo", "hash", mhash, "mount point", mpoint)
    67  	newMountInfo := &MountInfo{
    68  		MountPoint:     mpoint,
    69  		StartManifest:  mhash,
    70  		LatestManifest: mhash,
    71  		rootDir:        nil,
    72  		fuseConnection: nil,
    73  		swarmApi:       sapi,
    74  		lock:           &sync.RWMutex{},
    75  		serveClose:     make(chan struct{}),
    76  	}
    77  	return newMountInfo
    78  }
    79  
    80  func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
    81  	log.Info("swarmfs", "mounting hash", mhash, "mount point", mountpoint)
    82  	if mountpoint == "" {
    83  		return nil, errEmptyMountPoint
    84  	}
    85  	if !strings.HasPrefix(mountpoint, "/") {
    86  		return nil, errNoRelativeMountPoint
    87  	}
    88  	cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
    89  	if err != nil {
    90  		return nil, err
    91  	}
    92  	log.Trace("swarmfs mount", "cleanedMountPoint", cleanedMountPoint)
    93  
    94  	swarmfs.swarmFsLock.Lock()
    95  	defer swarmfs.swarmFsLock.Unlock()
    96  
    97  	noOfActiveMounts := len(swarmfs.activeMounts)
    98  	log.Debug("swarmfs mount", "# active mounts", noOfActiveMounts)
    99  	if noOfActiveMounts >= maxFuseMounts {
   100  		return nil, errMaxMountCount
   101  	}
   102  
   103  	if _, ok := swarmfs.activeMounts[cleanedMountPoint]; ok {
   104  		return nil, errAlreadyMounted
   105  	}
   106  
   107  	log.Trace("swarmfs mount: getting manifest tree")
   108  	_, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(context.TODO(), mhash, true)
   109  	if err != nil {
   110  		return nil, err
   111  	}
   112  
   113  	log.Trace("swarmfs mount: building mount info")
   114  	mi := NewMountInfo(mhash, cleanedMountPoint, swarmfs.swarmApi)
   115  
   116  	dirTree := map[string]*SwarmDir{}
   117  	rootDir := NewSwarmDir("/", mi)
   118  	log.Trace("swarmfs mount", "rootDir", rootDir)
   119  	mi.rootDir = rootDir
   120  
   121  	log.Trace("swarmfs mount: traversing manifest map")
   122  	for suffix, entry := range manifestEntryMap {
   123  		if suffix == "" { //empty suffix means that the file has no name - i.e. this is the default entry in a manifest. Since we cannot have files without a name, let us ignore this entry
   124  			log.Warn("Manifest has an empty-path (default) entry which will be ignored in FUSE mount.")
   125  			continue
   126  		}
   127  		addr := common.Hex2Bytes(entry.Hash)
   128  		fullpath := "/" + suffix
   129  		basepath := filepath.Dir(fullpath)
   130  		parentDir := rootDir
   131  		dirUntilNow := ""
   132  		paths := strings.Split(basepath, "/")
   133  		for i := range paths {
   134  			if paths[i] != "" {
   135  				thisDir := paths[i]
   136  				dirUntilNow = dirUntilNow + "/" + thisDir
   137  
   138  				if _, ok := dirTree[dirUntilNow]; !ok {
   139  					dirTree[dirUntilNow] = NewSwarmDir(dirUntilNow, mi)
   140  					parentDir.directories = append(parentDir.directories, dirTree[dirUntilNow])
   141  					parentDir = dirTree[dirUntilNow]
   142  
   143  				} else {
   144  					parentDir = dirTree[dirUntilNow]
   145  				}
   146  			}
   147  		}
   148  		thisFile := NewSwarmFile(basepath, filepath.Base(fullpath), mi)
   149  		thisFile.addr = addr
   150  
   151  		parentDir.files = append(parentDir.files, thisFile)
   152  	}
   153  
   154  	fconn, err := fuse.Mount(cleanedMountPoint, fuse.FSName("swarmfs"), fuse.VolumeName(mhash))
   155  	if isFUSEUnsupportedError(err) {
   156  		log.Error("swarmfs error - FUSE not installed", "mountpoint", cleanedMountPoint, "err", err)
   157  		return nil, err
   158  	} else if err != nil {
   159  		fuse.Unmount(cleanedMountPoint)
   160  		log.Error("swarmfs error mounting swarm manifest", "mountpoint", cleanedMountPoint, "err", err)
   161  		return nil, err
   162  	}
   163  	mi.fuseConnection = fconn
   164  
   165  	serverr := make(chan error, 1)
   166  	go func() {
   167  		log.Info("swarmfs", "serving hash", mhash, "at", cleanedMountPoint)
   168  		filesys := &SwarmRoot{root: rootDir}
   169  		//start serving the actual file system; see note below
   170  		if err := fs.Serve(fconn, filesys); err != nil {
   171  			log.Warn("swarmfs could not serve the requested hash", "error", err)
   172  			serverr <- err
   173  		}
   174  		mi.serveClose <- struct{}{}
   175  	}()
   176  
   177  	/*
   178  	   IMPORTANT NOTE: the fs.Serve function is blocking;
   179  	   Serve builds up the actual fuse file system by calling the
   180  	   Attr functions on each SwarmFile, creating the file inodes;
   181  	   specifically calling the swarm's LazySectionReader.Size() to set the file size.
   182  
   183  	   This can take some time, and it appears that if we access the fuse file system
   184  	   too early, we can bring the tests to deadlock. The assumption so far is that
   185  	   at this point, the fuse driver didn't finish to initialize the file system.
   186  
   187  	   Accessing files too early not only deadlocks the tests, but locks the access
   188  	   of the fuse file completely, resulting in blocked resources at OS system level.
   189  	   Even a simple `ls /tmp/testDir/testMountDir` could deadlock in a shell.
   190  
   191  	   Workaround so far is to wait some time to give the OS enough time to initialize
   192  	   the fuse file system. During tests, this seemed to address the issue.
   193  
   194  	   HOWEVER IT SHOULD BE NOTED THAT THIS MAY ONLY BE AN EFFECT,
   195  	   AND THE DEADLOCK CAUSED BY SOMETHING ELSE BLOCKING ACCESS DUE TO SOME RACE CONDITION
   196  	   (caused in the bazil.org library and/or the SwarmRoot, SwarmDir and SwarmFile implementations)
   197  	*/
   198  	time.Sleep(2 * time.Second)
   199  
   200  	timer := time.NewTimer(mountTimeout)
   201  	defer timer.Stop()
   202  	// Check if the mount process has an error to report.
   203  	select {
   204  	case <-timer.C:
   205  		log.Warn("swarmfs timed out mounting over FUSE", "mountpoint", cleanedMountPoint, "err", err)
   206  		err := fuse.Unmount(cleanedMountPoint)
   207  		if err != nil {
   208  			return nil, err
   209  		}
   210  		return nil, errMountTimeout
   211  	case err := <-serverr:
   212  		log.Warn("swarmfs error serving over FUSE", "mountpoint", cleanedMountPoint, "err", err)
   213  		err = fuse.Unmount(cleanedMountPoint)
   214  		return nil, err
   215  
   216  	case <-fconn.Ready:
   217  		//this signals that the actual mount point from the fuse.Mount call is ready;
   218  		//it does not signal though that the file system from fs.Serve is actually fully built up
   219  		if err := fconn.MountError; err != nil {
   220  			log.Error("Mounting error from fuse driver: ", "err", err)
   221  			return nil, err
   222  		}
   223  		log.Info("swarmfs now served over FUSE", "manifest", mhash, "mountpoint", cleanedMountPoint)
   224  	}
   225  
   226  	timer.Stop()
   227  	swarmfs.activeMounts[cleanedMountPoint] = mi
   228  	return mi, nil
   229  }
   230  
   231  func (swarmfs *SwarmFS) Unmount(mountpoint string) (*MountInfo, error) {
   232  	swarmfs.swarmFsLock.Lock()
   233  	defer swarmfs.swarmFsLock.Unlock()
   234  
   235  	cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
   236  	if err != nil {
   237  		return nil, err
   238  	}
   239  
   240  	mountInfo := swarmfs.activeMounts[cleanedMountPoint]
   241  
   242  	if mountInfo == nil || mountInfo.MountPoint != cleanedMountPoint {
   243  		return nil, fmt.Errorf("swarmfs %s is not mounted", cleanedMountPoint)
   244  	}
   245  	err = fuse.Unmount(cleanedMountPoint)
   246  	if err != nil {
   247  		err1 := externalUnmount(cleanedMountPoint)
   248  		if err1 != nil {
   249  			errStr := fmt.Sprintf("swarmfs unmount error: %v", err)
   250  			log.Warn(errStr)
   251  			return nil, err1
   252  		}
   253  	}
   254  
   255  	err = mountInfo.fuseConnection.Close()
   256  	if err != nil {
   257  		return nil, err
   258  	}
   259  	delete(swarmfs.activeMounts, cleanedMountPoint)
   260  
   261  	<-mountInfo.serveClose
   262  
   263  	succString := fmt.Sprintf("swarmfs unmounting %v succeeded", cleanedMountPoint)
   264  	log.Info(succString)
   265  
   266  	return mountInfo, nil
   267  }
   268  
   269  func (swarmfs *SwarmFS) Listmounts() []*MountInfo {
   270  	swarmfs.swarmFsLock.RLock()
   271  	defer swarmfs.swarmFsLock.RUnlock()
   272  	rows := make([]*MountInfo, 0, len(swarmfs.activeMounts))
   273  	for _, mi := range swarmfs.activeMounts {
   274  		rows = append(rows, mi)
   275  	}
   276  	return rows
   277  }
   278  
   279  func (swarmfs *SwarmFS) Stop() bool {
   280  	for mp := range swarmfs.activeMounts {
   281  		mountInfo := swarmfs.activeMounts[mp]
   282  		swarmfs.Unmount(mountInfo.MountPoint)
   283  	}
   284  	return true
   285  }