github.com/zignig/go-ipfs@v0.0.0-20141111235910-c9e5fdf55a52/core/commands2/add.go (about) 1 package commands 2 3 import ( 4 "errors" 5 "fmt" 6 "io" 7 8 cmds "github.com/jbenet/go-ipfs/commands" 9 core "github.com/jbenet/go-ipfs/core" 10 internal "github.com/jbenet/go-ipfs/core/commands2/internal" 11 importer "github.com/jbenet/go-ipfs/importer" 12 "github.com/jbenet/go-ipfs/importer/chunk" 13 dag "github.com/jbenet/go-ipfs/merkledag" 14 pinning "github.com/jbenet/go-ipfs/pin" 15 ) 16 17 // Error indicating the max depth has been exceded. 18 var ErrDepthLimitExceeded = fmt.Errorf("depth limit exceeded") 19 20 type AddOutput struct { 21 Added []*Object 22 } 23 24 var addCmd = &cmds.Command{ 25 Options: []cmds.Option{ 26 cmds.BoolOption("recursive", "r", "Must be specified when adding directories"), 27 }, 28 Arguments: []cmds.Argument{ 29 cmds.FileArg("file", true, true, "The path to a file to be added to IPFS"), 30 }, 31 Description: "Add an object to ipfs.", 32 Help: `Adds contents of <path> to ipfs. Use -r to add directories. 33 Note that directories are added recursively, to form the ipfs 34 MerkleDAG. A smarter partial add with a staging area (like git) 35 remains to be implemented. 36 `, 37 Run: func(req cmds.Request) (interface{}, error) { 38 n := req.Context().Node 39 40 readers, err := internal.CastToReaders(req.Arguments()) 41 if err != nil { 42 return nil, err 43 } 44 45 dagnodes, err := add(n, readers) 46 if err != nil { 47 return nil, errors.New("cast error") 48 } 49 50 // TODO: include fs paths in output (will need a way to specify paths in underlying filearg system) 51 added := make([]*Object, 0, len(req.Arguments())) 52 for _, dagnode := range dagnodes { 53 object, err := getOutput(dagnode) 54 if err != nil { 55 return nil, err 56 } 57 58 added = append(added, object) 59 } 60 61 return &AddOutput{added}, nil 62 }, 63 Marshallers: map[cmds.EncodingType]cmds.Marshaller{ 64 cmds.Text: func(res cmds.Response) ([]byte, error) { 65 val, ok := res.Output().(*AddOutput) 66 if !ok { 67 return nil, errors.New("cast err") 68 } 69 added := val.Added 70 if len(added) == 1 { 71 s := fmt.Sprintf("Added object: %s\n", added[0].Hash) 72 return []byte(s), nil 73 } 74 75 s := fmt.Sprintf("Added %v objects:\n", len(added)) 76 for _, obj := range added { 77 s += fmt.Sprintf("- %s\n", obj.Hash) 78 } 79 return []byte(s), nil 80 }, 81 }, 82 Type: &AddOutput{}, 83 } 84 85 func add(n *core.IpfsNode, readers []io.Reader) ([]*dag.Node, error) { 86 mp, ok := n.Pinning.(pinning.ManualPinner) 87 if !ok { 88 return nil, errors.New("invalid pinner type! expected manual pinner") 89 } 90 91 dagnodes := make([]*dag.Node, 0) 92 93 // TODO: allow adding directories (will need support for multiple files in filearg system) 94 95 for _, reader := range readers { 96 node, err := importer.BuildDagFromReader(reader, n.DAG, mp, chunk.DefaultSplitter) 97 if err != nil { 98 return nil, err 99 } 100 101 err = addNode(n, node) 102 if err != nil { 103 return nil, err 104 } 105 106 dagnodes = append(dagnodes, node) 107 } 108 109 return dagnodes, nil 110 } 111 112 func addNode(n *core.IpfsNode, node *dag.Node) error { 113 err := n.DAG.AddRecursive(node) // add the file to the graph + local storage 114 if err != nil { 115 return err 116 } 117 118 err = n.Pinning.Pin(node, true) // ensure we keep it 119 if err != nil { 120 return err 121 } 122 123 return nil 124 }