github.com/olivere/camlistore@v0.0.0-20140121221811-1b7ac2da0199/cmd/camget/camget.go (about) 1 /* 2 Copyright 2011 Google Inc. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package main 18 19 import ( 20 "bytes" 21 "errors" 22 "flag" 23 "fmt" 24 "io" 25 "log" 26 "net/http" 27 "os" 28 "path/filepath" 29 30 "camlistore.org/pkg/blob" 31 "camlistore.org/pkg/buildinfo" 32 "camlistore.org/pkg/cacher" 33 "camlistore.org/pkg/client" 34 "camlistore.org/pkg/httputil" 35 "camlistore.org/pkg/index" 36 "camlistore.org/pkg/schema" 37 ) 38 39 var ( 40 flagVersion = flag.Bool("version", false, "show version") 41 flagVerbose = flag.Bool("verbose", false, "be verbose") 42 flagHTTP = flag.Bool("verbose_http", false, "show HTTP request summaries") 43 flagCheck = flag.Bool("check", false, "just check for the existence of listed blobs; returning 0 if all are present") 44 flagOutput = flag.String("o", "-", "Output file/directory to create. Use -f to overwrite.") 45 flagGraph = flag.Bool("graph", false, "Output a graphviz directed graph .dot file of the provided root schema blob, to be rendered with 'dot -Tsvg -o graph.svg graph.dot'") 46 flagContents = flag.Bool("contents", false, "If true and the target blobref is a 'bytes' or 'file' schema blob, the contents of that file are output instead.") 47 flagShared = flag.String("shared", "", "If non-empty, the URL of a \"share\" blob. The URL will be used as the root of future fetches. Only \"haveref\" shares are currently supported.") 48 flagTrustedCert = flag.String("cert", "", "If non-empty, the fingerprint (20 digits lowercase prefix of the SHA256 of the complete certificate) of the TLS certificate we trust for the share URL. Requires --shared.") 49 flagInsecureTLS = flag.Bool("insecure", false, "If set, when using TLS, the server's certificates verification is disabled, and they are not checked against the trustedCerts in the client configuration either.") 50 ) 51 52 func main() { 53 client.AddFlags() 54 flag.Parse() 55 56 if *flagVersion { 57 fmt.Fprintf(os.Stderr, "camget version: %s\n", buildinfo.Version()) 58 return 59 } 60 61 if *flagGraph && flag.NArg() != 1 { 62 log.Fatalf("The --graph option requires exactly one parameter.") 63 } 64 65 var cl *client.Client 66 var items []blob.Ref 67 68 if *flagShared != "" { 69 if client.ExplicitServer() != "" { 70 log.Fatal("Can't use --shared with an explicit blobserver; blobserver is implicit from the --shared URL.") 71 } 72 if flag.NArg() != 0 { 73 log.Fatal("No arguments permitted when using --shared") 74 } 75 cl1, target, err := client.NewFromShareRoot(*flagShared, 76 client.OptionInsecure(*flagInsecureTLS), 77 client.OptionTrustedCert(*flagTrustedCert)) 78 if err != nil { 79 log.Fatal(err) 80 } 81 cl = cl1 82 items = append(items, target) 83 } else { 84 if *flagTrustedCert != "" { 85 log.Fatal("Can't use --cert without --shared.") 86 } 87 cl = client.NewOrFail() 88 for n := 0; n < flag.NArg(); n++ { 89 arg := flag.Arg(n) 90 br, ok := blob.Parse(arg) 91 if !ok { 92 log.Fatalf("Failed to parse argument %q as a blobref.", arg) 93 } 94 items = append(items, br) 95 } 96 } 97 98 cl.InsecureTLS = *flagInsecureTLS 99 tr := cl.TransportForConfig(&client.TransportConfig{ 100 Verbose: *flagHTTP, 101 }) 102 httpStats, _ := tr.(*httputil.StatsTransport) 103 cl.SetHTTPClient(&http.Client{Transport: tr}) 104 105 diskCacheFetcher, err := cacher.NewDiskCache(cl) 106 if err != nil { 107 log.Fatalf("Error setting up local disk cache: %v", err) 108 } 109 defer diskCacheFetcher.Clean() 110 if *flagVerbose { 111 log.Printf("Using temp blob cache directory %s", diskCacheFetcher.Root) 112 } 113 114 for _, br := range items { 115 if *flagGraph { 116 printGraph(diskCacheFetcher, br) 117 return 118 } 119 if *flagCheck { 120 // TODO: do HEAD requests checking if the blobs exists. 121 log.Fatal("not implemented") 122 return 123 } 124 if *flagOutput == "-" { 125 var rc io.ReadCloser 126 var err error 127 if *flagContents { 128 rc, err = schema.NewFileReader(diskCacheFetcher, br) 129 if err == nil { 130 rc.(*schema.FileReader).LoadAllChunks() 131 } 132 } else { 133 rc, err = fetch(diskCacheFetcher, br) 134 } 135 if err != nil { 136 log.Fatal(err) 137 } 138 defer rc.Close() 139 if _, err := io.Copy(os.Stdout, rc); err != nil { 140 log.Fatalf("Failed reading %q: %v", br, err) 141 } 142 } else { 143 if err := smartFetch(diskCacheFetcher, *flagOutput, br); err != nil { 144 log.Fatal(err) 145 } 146 } 147 } 148 149 if *flagVerbose { 150 log.Printf("HTTP requests: %d\n", httpStats.Requests()) 151 } 152 } 153 154 func fetch(src blob.StreamingFetcher, br blob.Ref) (r io.ReadCloser, err error) { 155 if *flagVerbose { 156 log.Printf("Fetching %s", br.String()) 157 } 158 r, _, err = src.FetchStreaming(br) 159 if err != nil { 160 return nil, fmt.Errorf("Failed to fetch %s: %s", br, err) 161 } 162 return r, err 163 } 164 165 // A little less than the sniffer will take, so we don't truncate. 166 const sniffSize = 900 * 1024 167 168 // smartFetch the things that blobs point to, not just blobs. 169 func smartFetch(src blob.StreamingFetcher, targ string, br blob.Ref) error { 170 rc, err := fetch(src, br) 171 if err != nil { 172 return err 173 } 174 defer rc.Close() 175 176 sniffer := index.NewBlobSniffer(br) 177 _, err = io.CopyN(sniffer, rc, sniffSize) 178 if err != nil && err != io.EOF { 179 return err 180 } 181 182 sniffer.Parse() 183 b, ok := sniffer.SchemaBlob() 184 185 if !ok { 186 if *flagVerbose { 187 log.Printf("Fetching opaque data %v into %q", br, targ) 188 } 189 190 // opaque data - put it in a file 191 f, err := os.Create(targ) 192 if err != nil { 193 return fmt.Errorf("opaque: %v", err) 194 } 195 defer f.Close() 196 body, _ := sniffer.Body() 197 r := io.MultiReader(bytes.NewReader(body), rc) 198 _, err = io.Copy(f, r) 199 return err 200 } 201 202 switch b.Type() { 203 case "directory": 204 dir := filepath.Join(targ, b.FileName()) 205 if *flagVerbose { 206 log.Printf("Fetching directory %v into %s", br, dir) 207 } 208 if err := os.MkdirAll(dir, b.FileMode()); err != nil { 209 return err 210 } 211 if err := setFileMeta(dir, b); err != nil { 212 log.Print(err) 213 } 214 entries, ok := b.DirectoryEntries() 215 if !ok { 216 return fmt.Errorf("bad entries blobref in dir %v", b.BlobRef()) 217 } 218 return smartFetch(src, dir, entries) 219 case "static-set": 220 if *flagVerbose { 221 log.Printf("Fetching directory entries %v into %s", br, targ) 222 } 223 224 // directory entries 225 const numWorkers = 10 226 type work struct { 227 br blob.Ref 228 errc chan<- error 229 } 230 members := b.StaticSetMembers() 231 workc := make(chan work, len(members)) 232 defer close(workc) 233 for i := 0; i < numWorkers; i++ { 234 go func() { 235 for wi := range workc { 236 wi.errc <- smartFetch(src, targ, wi.br) 237 } 238 }() 239 } 240 var errcs []<-chan error 241 for _, mref := range members { 242 errc := make(chan error, 1) 243 errcs = append(errcs, errc) 244 workc <- work{mref, errc} 245 } 246 for _, errc := range errcs { 247 if err := <-errc; err != nil { 248 return err 249 } 250 } 251 return nil 252 case "file": 253 seekFetcher := blob.SeekerFromStreamingFetcher(src) 254 fr, err := schema.NewFileReader(seekFetcher, br) 255 if err != nil { 256 return fmt.Errorf("NewFileReader: %v", err) 257 } 258 fr.LoadAllChunks() 259 defer fr.Close() 260 261 name := filepath.Join(targ, b.FileName()) 262 263 if fi, err := os.Stat(name); err == nil && fi.Size() == fi.Size() { 264 if *flagVerbose { 265 log.Printf("Skipping %s; already exists.", name) 266 return nil 267 } 268 } 269 270 if *flagVerbose { 271 log.Printf("Writing %s to %s ...", br, name) 272 } 273 274 f, err := os.Create(name) 275 if err != nil { 276 return fmt.Errorf("file type: %v", err) 277 } 278 defer f.Close() 279 if _, err := io.Copy(f, fr); err != nil { 280 return fmt.Errorf("Copying %s to %s: %v", br, name, err) 281 } 282 if err := setFileMeta(name, b); err != nil { 283 log.Print(err) 284 } 285 return nil 286 default: 287 return errors.New("unknown blob type: " + b.Type()) 288 } 289 panic("unreachable") 290 } 291 292 func setFileMeta(name string, blob *schema.Blob) error { 293 err1 := os.Chmod(name, blob.FileMode()) 294 var err2 error 295 if mt := blob.ModTime(); !mt.IsZero() { 296 err2 = os.Chtimes(name, mt, mt) 297 } 298 // TODO: we previously did os.Chown here, but it's rarely wanted, 299 // then the schema.Blob refactor broke it, so it's gone. 300 // Add it back later once we care? 301 for _, err := range []error{err1, err2} { 302 if err != nil { 303 return err 304 } 305 } 306 return nil 307 }