github.com/muhammedhassanm/blockchain@v0.0.0-20200120143007-697261defd4d/go-ethereum-master/swarm/fuse/swarmfs_unix.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // +build linux darwin freebsd
    18  
    19  package fuse
    20  
    21  import (
    22  	"context"
    23  	"errors"
    24  	"fmt"
    25  	"os"
    26  	"path/filepath"
    27  	"strings"
    28  	"sync"
    29  	"time"
    30  
    31  	"bazil.org/fuse"
    32  	"bazil.org/fuse/fs"
    33  	"github.com/ethereum/go-ethereum/common"
    34  	"github.com/ethereum/go-ethereum/swarm/api"
    35  	"github.com/ethereum/go-ethereum/swarm/log"
    36  )
    37  
    38  var (
    39  	errEmptyMountPoint      = errors.New("need non-empty mount point")
    40  	errNoRelativeMountPoint = errors.New("invalid path for mount point (need absolute path)")
    41  	errMaxMountCount        = errors.New("max FUSE mount count reached")
    42  	errMountTimeout         = errors.New("mount timeout")
    43  	errAlreadyMounted       = errors.New("mount point is already serving")
    44  )
    45  
    46  func isFUSEUnsupportedError(err error) bool {
    47  	if perr, ok := err.(*os.PathError); ok {
    48  		return perr.Op == "open" && perr.Path == "/dev/fuse"
    49  	}
    50  	return err == fuse.ErrOSXFUSENotFound
    51  }
    52  
    53  // MountInfo contains information about every active mount
    54  type MountInfo struct {
    55  	MountPoint     string
    56  	StartManifest  string
    57  	LatestManifest string
    58  	rootDir        *SwarmDir
    59  	fuseConnection *fuse.Conn
    60  	swarmApi       *api.API
    61  	lock           *sync.RWMutex
    62  	serveClose     chan struct{}
    63  }
    64  
    65  func NewMountInfo(mhash, mpoint string, sapi *api.API) *MountInfo {
    66  	log.Debug("swarmfs NewMountInfo", "hash", mhash, "mount point", mpoint)
    67  	newMountInfo := &MountInfo{
    68  		MountPoint:     mpoint,
    69  		StartManifest:  mhash,
    70  		LatestManifest: mhash,
    71  		rootDir:        nil,
    72  		fuseConnection: nil,
    73  		swarmApi:       sapi,
    74  		lock:           &sync.RWMutex{},
    75  		serveClose:     make(chan struct{}),
    76  	}
    77  	return newMountInfo
    78  }
    79  
    80  func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
    81  	log.Info("swarmfs", "mounting hash", mhash, "mount point", mountpoint)
    82  	if mountpoint == "" {
    83  		return nil, errEmptyMountPoint
    84  	}
    85  	if !strings.HasPrefix(mountpoint, "/") {
    86  		return nil, errNoRelativeMountPoint
    87  	}
    88  	cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
    89  	if err != nil {
    90  		return nil, err
    91  	}
    92  	log.Trace("swarmfs mount", "cleanedMountPoint", cleanedMountPoint)
    93  
    94  	swarmfs.swarmFsLock.Lock()
    95  	defer swarmfs.swarmFsLock.Unlock()
    96  
    97  	noOfActiveMounts := len(swarmfs.activeMounts)
    98  	log.Debug("swarmfs mount", "# active mounts", noOfActiveMounts)
    99  	if noOfActiveMounts >= maxFuseMounts {
   100  		return nil, errMaxMountCount
   101  	}
   102  
   103  	if _, ok := swarmfs.activeMounts[cleanedMountPoint]; ok {
   104  		return nil, errAlreadyMounted
   105  	}
   106  
   107  	log.Trace("swarmfs mount: getting manifest tree")
   108  	_, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(context.TODO(), mhash, true)
   109  	if err != nil {
   110  		return nil, err
   111  	}
   112  
   113  	log.Trace("swarmfs mount: building mount info")
   114  	mi := NewMountInfo(mhash, cleanedMountPoint, swarmfs.swarmApi)
   115  
   116  	dirTree := map[string]*SwarmDir{}
   117  	rootDir := NewSwarmDir("/", mi)
   118  	log.Trace("swarmfs mount", "rootDir", rootDir)
   119  	mi.rootDir = rootDir
   120  
   121  	log.Trace("swarmfs mount: traversing manifest map")
   122  	for suffix, entry := range manifestEntryMap {
   123  		addr := common.Hex2Bytes(entry.Hash)
   124  		fullpath := "/" + suffix
   125  		basepath := filepath.Dir(fullpath)
   126  		parentDir := rootDir
   127  		dirUntilNow := ""
   128  		paths := strings.Split(basepath, "/")
   129  		for i := range paths {
   130  			if paths[i] != "" {
   131  				thisDir := paths[i]
   132  				dirUntilNow = dirUntilNow + "/" + thisDir
   133  
   134  				if _, ok := dirTree[dirUntilNow]; !ok {
   135  					dirTree[dirUntilNow] = NewSwarmDir(dirUntilNow, mi)
   136  					parentDir.directories = append(parentDir.directories, dirTree[dirUntilNow])
   137  					parentDir = dirTree[dirUntilNow]
   138  
   139  				} else {
   140  					parentDir = dirTree[dirUntilNow]
   141  				}
   142  			}
   143  		}
   144  		thisFile := NewSwarmFile(basepath, filepath.Base(fullpath), mi)
   145  		thisFile.addr = addr
   146  
   147  		parentDir.files = append(parentDir.files, thisFile)
   148  	}
   149  
   150  	fconn, err := fuse.Mount(cleanedMountPoint, fuse.FSName("swarmfs"), fuse.VolumeName(mhash))
   151  	if isFUSEUnsupportedError(err) {
   152  		log.Error("swarmfs error - FUSE not installed", "mountpoint", cleanedMountPoint, "err", err)
   153  		return nil, err
   154  	} else if err != nil {
   155  		fuse.Unmount(cleanedMountPoint)
   156  		log.Error("swarmfs error mounting swarm manifest", "mountpoint", cleanedMountPoint, "err", err)
   157  		return nil, err
   158  	}
   159  	mi.fuseConnection = fconn
   160  
   161  	serverr := make(chan error, 1)
   162  	go func() {
   163  		log.Info("swarmfs", "serving hash", mhash, "at", cleanedMountPoint)
   164  		filesys := &SwarmRoot{root: rootDir}
   165  		//start serving the actual file system; see note below
   166  		if err := fs.Serve(fconn, filesys); err != nil {
   167  			log.Warn("swarmfs could not serve the requested hash", "error", err)
   168  			serverr <- err
   169  		}
   170  		mi.serveClose <- struct{}{}
   171  	}()
   172  
   173  	/*
   174  	   IMPORTANT NOTE: the fs.Serve function is blocking;
   175  	   Serve builds up the actual fuse file system by calling the
   176  	   Attr functions on each SwarmFile, creating the file inodes;
   177  	   specifically calling the swarm's LazySectionReader.Size() to set the file size.
   178  
   179  	   This can take some time, and it appears that if we access the fuse file system
   180  	   too early, we can bring the tests to deadlock. The assumption so far is that
   181  	   at this point, the fuse driver didn't finish to initialize the file system.
   182  
   183  	   Accessing files too early not only deadlocks the tests, but locks the access
   184  	   of the fuse file completely, resulting in blocked resources at OS system level.
   185  	   Even a simple `ls /tmp/testDir/testMountDir` could deadlock in a shell.
   186  
   187  	   Workaround so far is to wait some time to give the OS enough time to initialize
   188  	   the fuse file system. During tests, this seemed to address the issue.
   189  
   190  	   HOWEVER IT SHOULD BE NOTED THAT THIS MAY ONLY BE AN EFFECT,
   191  	   AND THE DEADLOCK CAUSED BY SOMETHING ELSE BLOCKING ACCESS DUE TO SOME RACE CONDITION
   192  	   (caused in the bazil.org library and/or the SwarmRoot, SwarmDir and SwarmFile implementations)
   193  	*/
   194  	time.Sleep(2 * time.Second)
   195  
   196  	timer := time.NewTimer(mountTimeout)
   197  	defer timer.Stop()
   198  	// Check if the mount process has an error to report.
   199  	select {
   200  	case <-timer.C:
   201  		log.Warn("swarmfs timed out mounting over FUSE", "mountpoint", cleanedMountPoint, "err", err)
   202  		err := fuse.Unmount(cleanedMountPoint)
   203  		if err != nil {
   204  			return nil, err
   205  		}
   206  		return nil, errMountTimeout
   207  	case err := <-serverr:
   208  		log.Warn("swarmfs error serving over FUSE", "mountpoint", cleanedMountPoint, "err", err)
   209  		err = fuse.Unmount(cleanedMountPoint)
   210  		return nil, err
   211  
   212  	case <-fconn.Ready:
   213  		//this signals that the actual mount point from the fuse.Mount call is ready;
   214  		//it does not signal though that the file system from fs.Serve is actually fully built up
   215  		if err := fconn.MountError; err != nil {
   216  			log.Error("Mounting error from fuse driver: ", "err", err)
   217  			return nil, err
   218  		}
   219  		log.Info("swarmfs now served over FUSE", "manifest", mhash, "mountpoint", cleanedMountPoint)
   220  	}
   221  
   222  	timer.Stop()
   223  	swarmfs.activeMounts[cleanedMountPoint] = mi
   224  	return mi, nil
   225  }
   226  
   227  func (swarmfs *SwarmFS) Unmount(mountpoint string) (*MountInfo, error) {
   228  	swarmfs.swarmFsLock.Lock()
   229  	defer swarmfs.swarmFsLock.Unlock()
   230  
   231  	cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
   232  	if err != nil {
   233  		return nil, err
   234  	}
   235  
   236  	mountInfo := swarmfs.activeMounts[cleanedMountPoint]
   237  
   238  	if mountInfo == nil || mountInfo.MountPoint != cleanedMountPoint {
   239  		return nil, fmt.Errorf("swarmfs %s is not mounted", cleanedMountPoint)
   240  	}
   241  	err = fuse.Unmount(cleanedMountPoint)
   242  	if err != nil {
   243  		err1 := externalUnmount(cleanedMountPoint)
   244  		if err1 != nil {
   245  			errStr := fmt.Sprintf("swarmfs unmount error: %v", err)
   246  			log.Warn(errStr)
   247  			return nil, err1
   248  		}
   249  	}
   250  
   251  	err = mountInfo.fuseConnection.Close()
   252  	if err != nil {
   253  		return nil, err
   254  	}
   255  	delete(swarmfs.activeMounts, cleanedMountPoint)
   256  
   257  	<-mountInfo.serveClose
   258  
   259  	succString := fmt.Sprintf("swarmfs unmounting %v succeeded", cleanedMountPoint)
   260  	log.Info(succString)
   261  
   262  	return mountInfo, nil
   263  }
   264  
   265  func (swarmfs *SwarmFS) Listmounts() []*MountInfo {
   266  	swarmfs.swarmFsLock.RLock()
   267  	defer swarmfs.swarmFsLock.RUnlock()
   268  	rows := make([]*MountInfo, 0, len(swarmfs.activeMounts))
   269  	for _, mi := range swarmfs.activeMounts {
   270  		rows = append(rows, mi)
   271  	}
   272  	return rows
   273  }
   274  
   275  func (swarmfs *SwarmFS) Stop() bool {
   276  	for mp := range swarmfs.activeMounts {
   277  		mountInfo := swarmfs.activeMounts[mp]
   278  		swarmfs.Unmount(mountInfo.MountPoint)
   279  	}
   280  	return true
   281  }