github.com/devfans/go-ethereum@v1.5.10-0.20170326212234-7419d0c38291/swarm/api/swarmfs_unix.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // +build linux darwin
    18  
    19  package api
    20  
    21  import (
    22  	"path/filepath"
    23  	"fmt"
    24  	"strings"
    25  	"time"
    26  	"github.com/ethereum/go-ethereum/swarm/storage"
    27  	"bazil.org/fuse"
    28  	"github.com/ethereum/go-ethereum/log"
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"bazil.org/fuse/fs"
    31  	"sync"
    32  )
    33  
    34  
    35  var (
    36  	inode  uint64  = 1
    37  	inodeLock   sync.RWMutex
    38  )
    39  
    40  // information about every active mount
    41  type MountInfo struct {
    42  	mountPoint     string
    43  	manifestHash   string
    44  	resolvedKey    storage.Key
    45  	rootDir        *Dir
    46  	fuseConnection *fuse.Conn
    47  }
    48  
    49  // Inode numbers need to be unique, they are used for caching inside fuse
    50  func NewInode() uint64 {
    51  	inodeLock.Lock()
    52  	defer  inodeLock.Unlock()
    53  	inode += 1
    54  	return inode
    55  }
    56  
    57  
    58  
    59  func (self *SwarmFS) Mount(mhash, mountpoint string) (string, error)  {
    60  
    61  	self.activeLock.Lock()
    62  	defer self.activeLock.Unlock()
    63  
    64  	noOfActiveMounts := len(self.activeMounts)
    65  	if noOfActiveMounts >= maxFuseMounts {
    66  		err := fmt.Errorf("Max mount count reached. Cannot mount %s ", mountpoint)
    67  		log.Warn(err.Error())
    68  		return err.Error(), err
    69  	}
    70  
    71  	cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
    72  	if err != nil {
    73  		return err.Error(), err
    74  	}
    75  
    76  	if _, ok := self.activeMounts[cleanedMountPoint]; ok {
    77  		err := fmt.Errorf("Mountpoint %s already mounted.", cleanedMountPoint)
    78  		log.Warn(err.Error())
    79  		return err.Error(), err
    80  	}
    81  
    82  	log.Info(fmt.Sprintf("Attempting to mount %s ", cleanedMountPoint))
    83  	key, _, path, err := self.swarmApi.parseAndResolve(mhash, true)
    84  	if err != nil {
    85  		errStr := fmt.Sprintf("Could not resolve %s : %v", mhash, err)
    86  		log.Warn(errStr)
    87  		return errStr, err
    88  	}
    89  
    90  	if len(path) > 0 {
    91  		path += "/"
    92  	}
    93  
    94  	quitC := make(chan bool)
    95  	trie, err := loadManifest(self.swarmApi.dpa, key, quitC)
    96  	if err != nil {
    97  		errStr := fmt.Sprintf("fs.Download: loadManifestTrie error: %v", err)
    98  		log.Warn(errStr)
    99  		return errStr, err
   100  	}
   101  
   102  	dirTree := map[string]*Dir{}
   103  
   104  	rootDir := &Dir{
   105  		inode:       NewInode(),
   106  		name:        "root",
   107  		directories: nil,
   108  		files:       nil,
   109  	}
   110  	dirTree["root"] = rootDir
   111  
   112  	err = trie.listWithPrefix(path, quitC, func(entry *manifestTrieEntry, suffix string) {
   113  
   114  		key = common.Hex2Bytes(entry.Hash)
   115  		fullpath := "/" + suffix
   116  		basepath := filepath.Dir(fullpath)
   117  		filename := filepath.Base(fullpath)
   118  
   119  		parentDir := rootDir
   120  		dirUntilNow := ""
   121  		paths := strings.Split(basepath, "/")
   122  		for i := range paths {
   123  			if paths[i] != "" {
   124  				thisDir := paths[i]
   125  				dirUntilNow = dirUntilNow + "/" + thisDir
   126  
   127  				if _, ok := dirTree[dirUntilNow]; !ok {
   128  					dirTree[dirUntilNow] = &Dir{
   129  						inode:       NewInode(),
   130  						name:        thisDir,
   131  						path:        dirUntilNow,
   132  						directories: nil,
   133  						files:       nil,
   134  					}
   135  					parentDir.directories = append(parentDir.directories, dirTree[dirUntilNow])
   136  					parentDir = dirTree[dirUntilNow]
   137  
   138  				} else {
   139  					parentDir = dirTree[dirUntilNow]
   140  				}
   141  
   142  			}
   143  		}
   144  		thisFile := &File{
   145  			inode:    NewInode(),
   146  			name:     filename,
   147  			path:     fullpath,
   148  			key:      key,
   149  			swarmApi: self.swarmApi,
   150  		}
   151  		parentDir.files = append(parentDir.files, thisFile)
   152  	})
   153  
   154  	fconn, err := fuse.Mount(cleanedMountPoint, fuse.FSName("swarmfs"), fuse.VolumeName(mhash))
   155  	if err != nil {
   156  		fuse.Unmount(cleanedMountPoint)
   157  		errStr := fmt.Sprintf("Mounting %s encountered error: %v", cleanedMountPoint, err)
   158  		log.Warn(errStr)
   159  		return errStr, err
   160  	}
   161  
   162  	mounterr := make(chan error, 1)
   163  	go func() {
   164  		log.Info(fmt.Sprintf("Serving %s at %s", mhash, cleanedMountPoint))
   165  		filesys := &FS{root: rootDir}
   166  		if err := fs.Serve(fconn, filesys); err != nil {
   167  			log.Warn(fmt.Sprintf("Could not Serve FS error: %v", err))
   168  		}
   169  	}()
   170  
   171  	// Check if the mount process has an error to report.
   172  	select {
   173  
   174  	case <-time.After(mountTimeout):
   175  		err := fmt.Errorf("Mounting %s timed out.", cleanedMountPoint)
   176  		log.Warn(err.Error())
   177  		return err.Error(), err
   178  
   179  	case err := <-mounterr:
   180  	        errStr := fmt.Sprintf("Mounting %s encountered error: %v", cleanedMountPoint, err)
   181  		log.Warn(errStr)
   182  		return errStr, err
   183  
   184  	case <-fconn.Ready:
   185  		log.Debug(fmt.Sprintf("Mounting connection succeeded for : %v", cleanedMountPoint))
   186  	}
   187  
   188  
   189  
   190  	//Assemble and Store the mount information for future use
   191  	mountInformation := &MountInfo{
   192  		mountPoint:     cleanedMountPoint,
   193  		manifestHash:   mhash,
   194  		resolvedKey:    key,
   195  		rootDir:        rootDir,
   196  		fuseConnection: fconn,
   197  	}
   198  	self.activeMounts[cleanedMountPoint] = mountInformation
   199  
   200  	succString := fmt.Sprintf("Mounting successful for %s", cleanedMountPoint)
   201  	log.Info(succString)
   202  
   203  	return succString, nil
   204  }
   205  
   206  func (self *SwarmFS) Unmount(mountpoint string) (string, error)  {
   207  
   208  	self.activeLock.Lock()
   209  	defer self.activeLock.Unlock()
   210  
   211  	cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
   212  	if err != nil {
   213  		return err.Error(), err
   214  	}
   215  
   216  	// Get the mount information based on the mountpoint argument
   217  	mountInfo := self.activeMounts[cleanedMountPoint]
   218  
   219  
   220  	if mountInfo == nil || mountInfo.mountPoint != cleanedMountPoint {
   221  		err := fmt.Errorf("Could not find mount information for %s ", cleanedMountPoint)
   222  		log.Warn(err.Error())
   223  		return err.Error(), err
   224  	}
   225  
   226  	err = fuse.Unmount(cleanedMountPoint)
   227  	if err != nil {
   228  		//TODO: try forceful unmount if normal unmount fails
   229  		errStr := fmt.Sprintf("UnMount error: %v", err)
   230  		log.Warn(errStr)
   231  		return errStr, err
   232  	}
   233  
   234  	mountInfo.fuseConnection.Close()
   235  
   236  	//remove the mount information from the active map
   237  	delete(self.activeMounts, cleanedMountPoint)
   238  
   239  	succString := fmt.Sprintf("UnMounting %v succeeded", cleanedMountPoint)
   240  	log.Info(succString)
   241  	return succString, nil
   242  }
   243  
   244  func (self *SwarmFS) Listmounts() (string, error) {
   245  
   246  	self.activeLock.RLock()
   247  	defer self.activeLock.RUnlock()
   248  
   249  	var rows []string
   250  	for mp := range self.activeMounts {
   251  		mountInfo := self.activeMounts[mp]
   252  		rows = append(rows, fmt.Sprintf("Swarm Root: %s, Mount Point: %s ", mountInfo.manifestHash, mountInfo.mountPoint))
   253  	}
   254  
   255  	return strings.Join(rows, "\n"), nil
   256  }
   257  
   258  func (self *SwarmFS) Stop() bool {
   259  
   260  	for mp := range self.activeMounts {
   261  		mountInfo := self.activeMounts[mp]
   262  		self.Unmount(mountInfo.mountPoint)
   263  	}
   264  
   265  	return true
   266  }