github.com/quinndk/ethereum_read@v0.0.0-20181211143958-29c55eec3237/go-ethereum-master_read/swarm/fuse/swarmfs_unix.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // +build linux darwin freebsd
    18  
    19  package fuse
    20  
    21  import (
    22  	"errors"
    23  	"fmt"
    24  	"os"
    25  	"path/filepath"
    26  	"strings"
    27  	"sync"
    28  	"time"
    29  
    30  	"bazil.org/fuse"
    31  	"bazil.org/fuse/fs"
    32  	"github.com/ethereum/go-ethereum/common"
    33  	"github.com/ethereum/go-ethereum/swarm/api"
    34  	"github.com/ethereum/go-ethereum/swarm/log"
    35  )
    36  
    37  var (
    38  	errEmptyMountPoint      = errors.New("need non-empty mount point")
    39  	errNoRelativeMountPoint = errors.New("invalid path for mount point (need absolute path)")
    40  	errMaxMountCount        = errors.New("max FUSE mount count reached")
    41  	errMountTimeout         = errors.New("mount timeout")
    42  	errAlreadyMounted       = errors.New("mount point is already serving")
    43  )
    44  
    45  func isFUSEUnsupportedError(err error) bool {
    46  	if perr, ok := err.(*os.PathError); ok {
    47  		return perr.Op == "open" && perr.Path == "/dev/fuse"
    48  	}
    49  	return err == fuse.ErrOSXFUSENotFound
    50  }
    51  
    52  // MountInfo contains information about every active mount
    53  type MountInfo struct {
    54  	MountPoint     string
    55  	StartManifest  string
    56  	LatestManifest string
    57  	rootDir        *SwarmDir
    58  	fuseConnection *fuse.Conn
    59  	swarmApi       *api.API
    60  	lock           *sync.RWMutex
    61  	serveClose     chan struct{}
    62  }
    63  
    64  func NewMountInfo(mhash, mpoint string, sapi *api.API) *MountInfo {
    65  	log.Debug("swarmfs NewMountInfo", "hash", mhash, "mount point", mpoint)
    66  	newMountInfo := &MountInfo{
    67  		MountPoint:     mpoint,
    68  		StartManifest:  mhash,
    69  		LatestManifest: mhash,
    70  		rootDir:        nil,
    71  		fuseConnection: nil,
    72  		swarmApi:       sapi,
    73  		lock:           &sync.RWMutex{},
    74  		serveClose:     make(chan struct{}),
    75  	}
    76  	return newMountInfo
    77  }
    78  
    79  func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
    80  	log.Info("swarmfs", "mounting hash", mhash, "mount point", mountpoint)
    81  	if mountpoint == "" {
    82  		return nil, errEmptyMountPoint
    83  	}
    84  	if !strings.HasPrefix(mountpoint, "/") {
    85  		return nil, errNoRelativeMountPoint
    86  	}
    87  	cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
    88  	if err != nil {
    89  		return nil, err
    90  	}
    91  	log.Trace("swarmfs mount", "cleanedMountPoint", cleanedMountPoint)
    92  
    93  	swarmfs.swarmFsLock.Lock()
    94  	defer swarmfs.swarmFsLock.Unlock()
    95  
    96  	noOfActiveMounts := len(swarmfs.activeMounts)
    97  	log.Debug("swarmfs mount", "# active mounts", noOfActiveMounts)
    98  	if noOfActiveMounts >= maxFuseMounts {
    99  		return nil, errMaxMountCount
   100  	}
   101  
   102  	if _, ok := swarmfs.activeMounts[cleanedMountPoint]; ok {
   103  		return nil, errAlreadyMounted
   104  	}
   105  
   106  	log.Trace("swarmfs mount: getting manifest tree")
   107  	_, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(mhash, true)
   108  	if err != nil {
   109  		return nil, err
   110  	}
   111  
   112  	log.Trace("swarmfs mount: building mount info")
   113  	mi := NewMountInfo(mhash, cleanedMountPoint, swarmfs.swarmApi)
   114  
   115  	dirTree := map[string]*SwarmDir{}
   116  	rootDir := NewSwarmDir("/", mi)
   117  	log.Trace("swarmfs mount", "rootDir", rootDir)
   118  	mi.rootDir = rootDir
   119  
   120  	log.Trace("swarmfs mount: traversing manifest map")
   121  	for suffix, entry := range manifestEntryMap {
   122  		addr := common.Hex2Bytes(entry.Hash)
   123  		fullpath := "/" + suffix
   124  		basepath := filepath.Dir(fullpath)
   125  		parentDir := rootDir
   126  		dirUntilNow := ""
   127  		paths := strings.Split(basepath, "/")
   128  		for i := range paths {
   129  			if paths[i] != "" {
   130  				thisDir := paths[i]
   131  				dirUntilNow = dirUntilNow + "/" + thisDir
   132  
   133  				if _, ok := dirTree[dirUntilNow]; !ok {
   134  					dirTree[dirUntilNow] = NewSwarmDir(dirUntilNow, mi)
   135  					parentDir.directories = append(parentDir.directories, dirTree[dirUntilNow])
   136  					parentDir = dirTree[dirUntilNow]
   137  
   138  				} else {
   139  					parentDir = dirTree[dirUntilNow]
   140  				}
   141  			}
   142  		}
   143  		thisFile := NewSwarmFile(basepath, filepath.Base(fullpath), mi)
   144  		thisFile.addr = addr
   145  
   146  		parentDir.files = append(parentDir.files, thisFile)
   147  	}
   148  
   149  	fconn, err := fuse.Mount(cleanedMountPoint, fuse.FSName("swarmfs"), fuse.VolumeName(mhash))
   150  	if isFUSEUnsupportedError(err) {
   151  		log.Error("swarmfs error - FUSE not installed", "mountpoint", cleanedMountPoint, "err", err)
   152  		return nil, err
   153  	} else if err != nil {
   154  		fuse.Unmount(cleanedMountPoint)
   155  		log.Error("swarmfs error mounting swarm manifest", "mountpoint", cleanedMountPoint, "err", err)
   156  		return nil, err
   157  	}
   158  	mi.fuseConnection = fconn
   159  
   160  	serverr := make(chan error, 1)
   161  	go func() {
   162  		log.Info("swarmfs", "serving hash", mhash, "at", cleanedMountPoint)
   163  		filesys := &SwarmRoot{root: rootDir}
   164  		//start serving the actual file system; see note below
   165  		if err := fs.Serve(fconn, filesys); err != nil {
   166  			log.Warn("swarmfs could not serve the requested hash", "error", err)
   167  			serverr <- err
   168  		}
   169  		mi.serveClose <- struct{}{}
   170  	}()
   171  
   172  	/*
   173  	   IMPORTANT NOTE: the fs.Serve function is blocking;
   174  	   Serve builds up the actual fuse file system by calling the
   175  	   Attr functions on each SwarmFile, creating the file inodes;
   176  	   specifically calling the swarm's LazySectionReader.Size() to set the file size.
   177  
   178  	   This can take some time, and it appears that if we access the fuse file system
   179  	   too early, we can bring the tests to deadlock. The assumption so far is that
   180  	   at this point, the fuse driver didn't finish to initialize the file system.
   181  
   182  	   Accessing files too early not only deadlocks the tests, but locks the access
   183  	   of the fuse file completely, resulting in blocked resources at OS system level.
   184  	   Even a simple `ls /tmp/testDir/testMountDir` could deadlock in a shell.
   185  
   186  	   Workaround so far is to wait some time to give the OS enough time to initialize
   187  	   the fuse file system. During tests, this seemed to address the issue.
   188  
   189  	   HOWEVER IT SHOULD BE NOTED THAT THIS MAY ONLY BE AN EFFECT,
   190  	   AND THE DEADLOCK CAUSED BY SOMETHING ELSE BLOCKING ACCESS DUE TO SOME RACE CONDITION
   191  	   (caused in the bazil.org library and/or the SwarmRoot, SwarmDir and SwarmFile implementations)
   192  	*/
   193  	time.Sleep(2 * time.Second)
   194  
   195  	timer := time.NewTimer(mountTimeout)
   196  	defer timer.Stop()
   197  	// Check if the mount process has an error to report.
   198  	select {
   199  	case <-timer.C:
   200  		log.Warn("swarmfs timed out mounting over FUSE", "mountpoint", cleanedMountPoint, "err", err)
   201  		err := fuse.Unmount(cleanedMountPoint)
   202  		if err != nil {
   203  			return nil, err
   204  		}
   205  		return nil, errMountTimeout
   206  	case err := <-serverr:
   207  		log.Warn("swarmfs error serving over FUSE", "mountpoint", cleanedMountPoint, "err", err)
   208  		err = fuse.Unmount(cleanedMountPoint)
   209  		return nil, err
   210  
   211  	case <-fconn.Ready:
   212  		//this signals that the actual mount point from the fuse.Mount call is ready;
   213  		//it does not signal though that the file system from fs.Serve is actually fully built up
   214  		if err := fconn.MountError; err != nil {
   215  			log.Error("Mounting error from fuse driver: ", "err", err)
   216  			return nil, err
   217  		}
   218  		log.Info("swarmfs now served over FUSE", "manifest", mhash, "mountpoint", cleanedMountPoint)
   219  	}
   220  
   221  	timer.Stop()
   222  	swarmfs.activeMounts[cleanedMountPoint] = mi
   223  	return mi, nil
   224  }
   225  
   226  func (swarmfs *SwarmFS) Unmount(mountpoint string) (*MountInfo, error) {
   227  	swarmfs.swarmFsLock.Lock()
   228  	defer swarmfs.swarmFsLock.Unlock()
   229  
   230  	cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
   231  	if err != nil {
   232  		return nil, err
   233  	}
   234  
   235  	mountInfo := swarmfs.activeMounts[cleanedMountPoint]
   236  
   237  	if mountInfo == nil || mountInfo.MountPoint != cleanedMountPoint {
   238  		return nil, fmt.Errorf("swarmfs %s is not mounted", cleanedMountPoint)
   239  	}
   240  	err = fuse.Unmount(cleanedMountPoint)
   241  	if err != nil {
   242  		err1 := externalUnmount(cleanedMountPoint)
   243  		if err1 != nil {
   244  			errStr := fmt.Sprintf("swarmfs unmount error: %v", err)
   245  			log.Warn(errStr)
   246  			return nil, err1
   247  		}
   248  	}
   249  
   250  	err = mountInfo.fuseConnection.Close()
   251  	if err != nil {
   252  		return nil, err
   253  	}
   254  	delete(swarmfs.activeMounts, cleanedMountPoint)
   255  
   256  	<-mountInfo.serveClose
   257  
   258  	succString := fmt.Sprintf("swarmfs unmounting %v succeeded", cleanedMountPoint)
   259  	log.Info(succString)
   260  
   261  	return mountInfo, nil
   262  }
   263  
   264  func (swarmfs *SwarmFS) Listmounts() []*MountInfo {
   265  	swarmfs.swarmFsLock.RLock()
   266  	defer swarmfs.swarmFsLock.RUnlock()
   267  	rows := make([]*MountInfo, 0, len(swarmfs.activeMounts))
   268  	for _, mi := range swarmfs.activeMounts {
   269  		rows = append(rows, mi)
   270  	}
   271  	return rows
   272  }
   273  
   274  func (swarmfs *SwarmFS) Stop() bool {
   275  	for mp := range swarmfs.activeMounts {
   276  		mountInfo := swarmfs.activeMounts[mp]
   277  		swarmfs.Unmount(mountInfo.MountPoint)
   278  	}
   279  	return true
   280  }