github.com/janelia-flyem/dvid@v1.0.0/datatype/annotation/annotation.go (about) 1 // Package annotation supports point annotation management and queries. 2 package annotation 3 4 import ( 5 "bytes" 6 "encoding/binary" 7 "encoding/gob" 8 "encoding/json" 9 "fmt" 10 "io" 11 "io/ioutil" 12 "net/http" 13 "reflect" 14 "sort" 15 "strings" 16 "sync" 17 "time" 18 19 "github.com/janelia-flyem/dvid/datastore" 20 "github.com/janelia-flyem/dvid/datatype/labelarray" 21 "github.com/janelia-flyem/dvid/datatype/labelblk" 22 "github.com/janelia-flyem/dvid/datatype/labelmap" 23 "github.com/janelia-flyem/dvid/datatype/labelvol" 24 "github.com/janelia-flyem/dvid/datatype/roi" 25 "github.com/janelia-flyem/dvid/dvid" 26 "github.com/janelia-flyem/dvid/storage" 27 ) 28 29 const ( 30 Version = "0.1" 31 RepoURL = "github.com/janelia-flyem/dvid/datatype/annotation" 32 TypeName = "annotation" 33 ) 34 35 const helpMessage = ` 36 API for synapse data type (github.com/janelia-flyem/dvid/datatype/annotation) 37 ======================================================================================= 38 39 Note: UUIDs referenced below are strings that may either be a unique prefix of a 40 hexadecimal UUID string (e.g., 3FA22) or a branch leaf specification that adds 41 a colon (":") followed by the case-dependent branch name. In the case of a 42 branch leaf specification, the unique UUID prefix just identifies the repo of 43 the branch, and the UUID referenced is really the leaf of the branch name. 44 For example, if we have a DAG with root A -> B -> C where C is the current 45 HEAD or leaf of the "master" (default) branch, then asking for "B:master" is 46 the same as asking for "C". If we add another version so A -> B -> C -> D, then 47 references to "B:master" now return the data from "D". 48 49 Command-line: 50 51 $ dvid repo <UUID> new annotation <data name> <settings...> 52 53 Adds newly named data of the 'type name' to repo with specified UUID. 54 55 Example: 56 57 $ dvid repo 3f8c new annotation synapses 58 59 Arguments: 60 61 UUID Hexadecimal string with enough characters to uniquely identify a version node. 62 data name Name of data to create, e.g., "synapses" 63 settings Configuration settings in "key=value" format separated by spaces. 64 65 $ dvid node <UUID> <data name> reload <settings...> 66 67 Forces asynchornous denormalization of all annotations for labels and tags. Because 68 this is a special request for mass mutations that require static "normalized" data 69 (only verifies and changes the label and tag denormalizations), any POST requests 70 while this is running results in an error. 71 72 Configuration Settings (case-insensitive keys) 73 74 check "true": (default "false") check denormalizations, writing to log when issues 75 are detected, and only replacing denormalization when it is incorrect. 76 inmemory "false": (default "true") use in-memory reload, which assumes the server 77 has enough memory to hold all annotations in memory. 78 79 ------------------ 80 81 HTTP API (Level 2 REST): 82 83 GET <api URL>/node/<UUID>/<data name>/help 84 85 Returns data-specific help message. 86 87 88 GET <api URL>/node/<UUID>/<data name>/info 89 POST <api URL>/node/<UUID>/<data name>/info 90 91 Retrieves or puts DVID-specific data properties for these voxels. 92 93 Example: 94 95 GET <api URL>/node/3f8c/synapses/info 96 97 Returns JSON with configuration settings. 98 99 Arguments: 100 101 UUID Hexadecimal string with enough characters to uniquely identify a version node. 102 data name Name of annotation data. 103 104 105 POST <api URL>/node/<UUID>/<data name>/sync?<options> 106 107 Appends to list of data instances with which the annotations are synced. Expects JSON to be POSTed 108 with the following format: 109 110 { "sync": "labels,bodies" } 111 112 To delete syncs, pass an empty string of names with query string "replace=true": 113 114 { "sync": "" } 115 116 The "sync" property should be followed by a comma-delimited list of data instances that MUST 117 already exist. Currently, syncs should be created before any annotations are pushed to 118 the server. If annotations already exist, these are currently not synced. 119 120 The annotations data type only accepts syncs to label-oriented datatypes: labelblk, labelvol, 121 labelarray, and labelmap. 122 123 POST Query-string Options: 124 125 replace Set to "true" if you want passed syncs to replace and not be appended to current syncs. 126 Default operation is false. 127 128 129 GET <api URL>/node/<UUID>/<data name>/tags 130 POST <api URL>/node/<UUID>/<data name>/tags?<options> 131 132 GET retrieves JSON of tags for this instance. 133 POST appends or replaces tags provided in POST body. Expects JSON to be POSTed 134 with the following format: 135 136 { "tag1": "anything you want", "tag2": "something else" } 137 138 To delete tags, pass an empty object with query string "replace=true". 139 140 POST Query-string Options: 141 142 replace Set to "true" if you want passed tags to replace and not be appended to current tags. 143 Default operation is false (append). 144 145 146 Note: For the following URL endpoints that return and accept POSTed JSON values, see the JSON format 147 at end of this documentation. 148 149 GET <api URL>/node/<UUID>/<data name>/label/<label>[?<options>] 150 151 Returns all point annotations within the given label as an array of elements. 152 This endpoint is only available if the annotation data instance is synced with 153 voxel label data instances (labelblk, labelarray, labelmap). 154 155 GET Query-string Option: 156 157 relationships Set to true to return all relationships for each annotation. 158 159 Example: 160 161 GET http://foo.com/api/node/83af/myannotations/label/23?relationships=true 162 163 164 POST <api URL>/node/<UUID>/<data name>/labels 165 166 Ingest point annotations for labels. The POSTed JSON should be an object 167 with label string keys (allowing uint64) and a string that contains the 168 JSON array of annotations (without relationships) for that label. For example: 169 170 { "23": "[{...},{...}]", "45": "[{...},{...}]" } 171 172 173 GET <api URL>/node/<UUID>/<data name>/tag/<tag>[?<options>] 174 175 Returns all point annotations with the given tag as an array of elements. 176 By default, the Relationships of an annotation to other annotations is not 177 returned. If you want the Relationships, use the query string below. 178 179 GET Query-string Option: 180 181 relationships Set to true to return all relationships for each annotation. 182 183 Example: 184 185 GET http://foo.com/api/node/83af/myannotations/tag/goodstuff?relationships=true 186 187 188 DELETE <api URL>/node/<UUID>/<data name>/element/<coord>[?<options>] 189 190 Deletes a point annotation given its location. 191 192 Kafka JSON message generated by this request where "User" is optional: 193 { 194 "Action": "element-delete", 195 "Point": <3d point>, 196 "UUID": <UUID on which delete was done>, 197 "User": <user name> 198 } 199 200 POST Query-string Options: 201 202 kafkalog Set to "off" if you don't want this mutation logged to kafka. 203 204 205 GET <api URL>/node/<UUID>/<data name>/roi/<ROI specification> 206 207 Returns all point annotations within the ROI. The ROI specification must be specified 208 using a string of format "roiname,uuid". If just "roiname" is specified without 209 a full UUID string, the current UUID of the request will be used. Currently, this 210 request will only work for ROIs that have same block size as the annotation data instance. 211 212 The returned point annotations will be an array of elements. 213 214 GET <api URL>/node/<UUID>/<data name>/elements/<size>/<offset> 215 216 Returns all point annotations within subvolume of given size with upper left corner 217 at given offset. The size and offset should be voxels separated by underscore, e.g., 218 "400_300_200" can describe a 400 x 300 x 200 volume or an offset of (400,300,200). 219 220 The returned point annotations will be an array of elements with relationships. 221 222 POST <api URL>/node/<UUID>/<data name>/elements[?<options>] 223 224 Adds or modifies point annotations. The POSTed content is an array of elements. 225 Note that deletes are handled via a separate API (see above). 226 227 Kafka JSON message generated by this request where "User" is optional: 228 { 229 "Action": "element-post", 230 "DataRef": <string for reference to posted binary data>, 231 "UUID": <UUID on which POST was done>, 232 "User": <user name> 233 } 234 235 The data reference above can be used to download the binary data by calling 236 this data instance's BlobStore API. See the node-level HTTP API documentation. 237 238 GET /api/node/{uuid}/{data name}/blobstore/{reference} 239 240 POST Query-string Options: 241 242 kafkalog Set to "off" if you don't want this mutation logged to kafka. 243 244 GET <api URL>/node/<UUID>/<data name>/scan[?<options>] 245 246 Scans the annotations stored in blocks and returns simple stats on usage 247 in JSON format. 248 249 GET Query-string Options: 250 251 byCoord If "true" (not set by default), the scan bounds will be by min/max 252 block coord instead of internal constants. 253 keysOnly If "true" (not set by default), scans using keys only range query 254 and will not check if value is empty. 255 256 257 GET <api URL>/node/<UUID>/<data name>/all-elements 258 259 Returns all point annotations in the entire data instance, which could exceed data 260 response sizes (set by server) if too many elements are present. This should be 261 equivalent to the /blocks endpoint but without the need to determine extents. 262 263 The returned stream of data is the same as /blocks endpoint below. 264 265 266 GET <api URL>/node/<UUID>/<data name>/blocks/<size>/<offset> 267 268 Returns all point annotations within all blocks intersecting the subvolume of given size 269 with upper left corner at given offset. The size and offset should be voxels separated by 270 underscore, e.g., "400_300_200" can describe a 400 x 300 x 200 volume or an offset of (400,300,200). 271 272 Unlike the /elements endpoint, the /blocks endpoint is the fastest way to retrieve 273 all point annotations within a bounding box. It does not screen points based on the specified 274 subvolume but simply streams all elements (including relationships) in the intersecting blocks. 275 The fastest way to get all point annotations in entire volume (no bounding box) is via /all-elements. 276 277 The returned stream of data is an object with block coordinate as keys and an array of point 278 annotation elements within that block, meeting the JSON described below. 279 280 If the data instance has Tag "ScanAllForBlocks" is set to "true", it's assumed there are 281 relatively few annotations across blocks so a single range query is used rather than many 282 range queries to span the given X range of the bounding box. 283 284 Returned JSON: 285 286 { 287 "10,381,28": [ array of point annotation elements ], 288 "11,381,28": [ array of point annotation elements ], 289 ... 290 } 291 292 POST <api URL>/node/<UUID>/<data name>/blocks[?<options>] 293 294 Unlike the POST /elements endpoint, the /blocks endpoint is the fastest way to store 295 all point annotations and assumes the caller has (1) properly partitioned the elements 296 int the appropriate block for the block size (default 64) and (2) will do a POST /reload 297 to create the denormalized Label and Tag versions of the annotations after all 298 ingestion is completed. 299 300 This low-level ingestion also does not transmit subscriber events to associated 301 synced data (e.g., labelsz). 302 303 The POSTed JSON should be similar to the GET version with the block coordinate as 304 the key: 305 306 { 307 "10,381,28": [ array of point annotation elements ], 308 "11,381,28": [ array of point annotation elements ], 309 ... 310 } 311 312 POST Query-string Options: 313 314 kafkalog Set to "off" if you don't want this mutation logged to kafka. 315 316 317 POST <api URL>/node/<UUID>/<data name>/move/<from_coord>/<to_coord>[?<options>] 318 319 Moves the point annotation from <from_coord> to <to_coord> where 320 <from_coord> and <to_coord> are of the form X_Y_Z. 321 322 Kafka JSON message generated by this request where "User" is optional: 323 { 324 "Action": "element-move", 325 "From": <3d point>, 326 "To": <3d point>, 327 "UUID": <UUID on which move was done>, 328 User: <user name> 329 } 330 331 POST Query-string Options: 332 333 kafkalog Set to "off" if you don't want this mutation logged to kafka. 334 335 336 337 ------ 338 339 Example JSON Format of point annotation elements with ... marking omitted elements: 340 341 [ 342 { 343 "Pos":[33,30,31], 344 "Kind":"PostSyn", 345 "Rels":[ 346 {"Rel":"PostSynTo", "To":[15,27,35]} 347 ], 348 "Tags":["Synapse1"], 349 "Prop": { 350 "SomeVar": "SomeValue", 351 "Another Var": "A More Complex Value" 352 } 353 }, 354 { 355 "Pos":[15,27,35], 356 "Kind":"PreSyn", 357 "Rels":[ 358 {"Rel":"PreSynTo", "To":[20,30,40]}, 359 {"Rel":"PreSynTo", "To":[14,25,37]}, 360 {"Rel":"PreSynTo", "To":[33,30,31]} 361 ], 362 "Tags":["Synapse1"] 363 }, 364 { 365 "Pos":[20,30,40], 366 "Kind":"PostSyn", 367 "Rels":[ 368 {"Rel":"PostSynTo","To":[15,27,35]} 369 ], 370 "Tags":["Synapse1"], 371 "Prop": { 372 "SomeVar": "SomeValue", 373 "Another Var 2": "A More Complex Value 2" 374 } 375 }, 376 ... 377 ] 378 379 The "Kind" property can be one of "Unknown", "PostSyn", "PreSyn", "Gap", or "Note". 380 381 The "Rel" property can be one of "UnknownRelationship", "PostSynTo", "PreSynTo", "ConvergentTo", or "GroupedWith". 382 383 The "Tags" property will be indexed and so can be costly if used for very large numbers of synapse elements. 384 385 The "Prop" property is an arbitrary object with string values. The "Prop" object's key are not indexed. 386 387 -------- 388 389 POST <api URL>/node/<UUID>/<data name>/reload[?<options>] 390 391 Forces asynchronous recreation of its tag and label indexed denormalizations. Can be 392 used to initialize a newly added instance. Note that this instance will return errors 393 for any POST request while denormalization is ongoing. 394 395 POST Query-string Options: 396 397 check "true": (default "false") check denormalizations, writing to log when issues 398 are detected, and only replacing denormalization when it is incorrect. 399 inmemory "false": (default "true") use in-memory reload, which assumes the server 400 has enough memory to hold all annotations in memory. 401 ` 402 403 var ( 404 dtype *Type 405 406 DefaultBlockSize int32 = 64 // labelblk.DefaultBlockSize 407 DefaultRes float32 = labelblk.DefaultRes 408 DefaultUnits = labelblk.DefaultUnits 409 ) 410 411 func init() { 412 dtype = new(Type) 413 dtype.Type = datastore.Type{ 414 Name: TypeName, 415 URL: RepoURL, 416 Version: Version, 417 Requirements: &storage.Requirements{ 418 Batcher: true, 419 }, 420 } 421 422 // See doc for package on why channels are segregated instead of interleaved. 423 // Data types must be registered with the datastore to be used. 424 datastore.Register(dtype) 425 426 // Need to register types that will be used to fulfill interfaces. 427 gob.Register(&Type{}) 428 gob.Register(&Data{}) 429 } 430 431 const ( 432 UnknownElem ElementType = iota 433 PostSyn // Post-synaptic element 434 PreSyn // Pre-synaptic element 435 Gap // Gap junction 436 Note // A note or bookmark with some description 437 ) 438 439 // ElementType gives the type of a synaptic element. 440 type ElementType uint8 441 442 // IsSynaptic returns true if the ElementType is some synaptic component. 443 func (e ElementType) IsSynaptic() bool { 444 switch e { 445 case PostSyn, PreSyn, Gap: 446 return true 447 default: 448 return false 449 } 450 } 451 452 // StringToElementType converts a string 453 func StringToElementType(s string) ElementType { 454 switch s { 455 case "PostSyn": 456 return PostSyn 457 case "PreSyn": 458 return PreSyn 459 case "Gap": 460 return Gap 461 case "Note": 462 return Note 463 default: 464 return UnknownElem 465 } 466 } 467 468 func (e ElementType) String() string { 469 switch e { 470 case PostSyn: 471 return "PostSyn" 472 case PreSyn: 473 return "PreSyn" 474 case Gap: 475 return "Gap" 476 case Note: 477 return "Note" 478 default: 479 return fmt.Sprintf("Unknown element type: %d", e) 480 } 481 } 482 483 func (e ElementType) MarshalJSON() ([]byte, error) { 484 switch e { 485 case UnknownElem: 486 return []byte(`"Unknown"`), nil 487 case PostSyn: 488 return []byte(`"PostSyn"`), nil 489 case PreSyn: 490 return []byte(`"PreSyn"`), nil 491 case Gap: 492 return []byte(`"Gap"`), nil 493 case Note: 494 return []byte(`"Note"`), nil 495 default: 496 return nil, fmt.Errorf("Unknown element type: %s", e) 497 } 498 } 499 500 func (e *ElementType) UnmarshalJSON(b []byte) error { 501 switch string(b) { 502 case `"Unknown"`: 503 *e = UnknownElem 504 case `"PostSyn"`: 505 *e = PostSyn 506 case `"PreSyn"`: 507 *e = PreSyn 508 case `"Gap"`: 509 *e = Gap 510 case `"Note"`: 511 *e = Note 512 default: 513 return fmt.Errorf("Unknown element type in JSON: %s", string(b)) 514 } 515 return nil 516 } 517 518 type RelationType uint8 519 520 const ( 521 UnknownRel RelationType = iota 522 PostSynTo 523 PreSynTo 524 ConvergentTo 525 GroupedWith 526 ) 527 528 func (r RelationType) MarshalJSON() ([]byte, error) { 529 switch r { 530 case UnknownRel: 531 return []byte(`"UnknownRelationship"`), nil 532 case PostSynTo: 533 return []byte(`"PostSynTo"`), nil 534 case PreSynTo: 535 return []byte(`"PreSynTo"`), nil 536 case ConvergentTo: 537 return []byte(`"ConvergentTo"`), nil 538 case GroupedWith: 539 return []byte(`"GroupedWith"`), nil 540 default: 541 return nil, fmt.Errorf("Unknown relation type: %d", r) 542 } 543 } 544 545 func (r *RelationType) UnmarshalJSON(b []byte) error { 546 switch string(b) { 547 case `"UnknownRelationship"`: 548 *r = UnknownRel 549 case `"PostSynTo"`: 550 *r = PostSynTo 551 case `"PreSynTo"`: 552 *r = PreSynTo 553 case `"ConvergentTo"`: 554 *r = ConvergentTo 555 case `"GroupedWith"`: 556 *r = GroupedWith 557 default: 558 return fmt.Errorf("Unknown relationship type in JSON: %s", string(b)) 559 } 560 return nil 561 } 562 563 // Tag is a string description of a synaptic element grouping, e.g., "convergent". 564 type Tag string 565 566 // Relationship is a link between two synaptic elements. 567 type Relationship struct { 568 Rel RelationType 569 To dvid.Point3d 570 } 571 572 type Relationships []Relationship 573 574 func (r Relationships) Len() int { 575 return len(r) 576 } 577 578 func (r Relationships) Less(i, j int) bool { 579 if r[i].To.Less(r[j].To) { 580 return true 581 } 582 if r[i].To.Equals(r[j].To) { 583 return r[i].Rel < r[j].Rel 584 } 585 return false 586 } 587 588 func (r Relationships) Swap(i, j int) { 589 r[i], r[j] = r[j], r[i] 590 } 591 592 // given a list of element indices to be deleted, it returns slice of remaining Relationships 593 func (r Relationships) delete(todel []int) Relationships { 594 out := make(Relationships, len(r)-len(todel)) 595 j, k := 0, 0 596 for i, rel := range r { 597 if k >= len(todel) || i != todel[k] { 598 out[j] = rel 599 j++ 600 } else { 601 k++ 602 } 603 } 604 return out 605 } 606 607 type Tags []Tag 608 609 func (t Tags) Len() int { 610 return len(t) 611 } 612 613 func (t Tags) Less(i, j int) bool { 614 return t[i] < t[j] 615 } 616 617 func (t Tags) Swap(i, j int) { 618 t[i], t[j] = t[j], t[i] 619 } 620 621 // Changes returns tags removed or added from the receiver. 622 func (t Tags) Changes(t2 Tags) (removed, added Tags) { 623 if len(t) == 0 { 624 added = make(Tags, len(t2)) 625 copy(added, t2) 626 return 627 } 628 if len(t2) == 0 { 629 removed = make(Tags, len(t)) 630 copy(removed, t) 631 return 632 } 633 curTags := make(map[Tag]struct{}, len(t)) 634 newTags := make(map[Tag]struct{}, len(t2)) 635 for _, tag := range t { 636 curTags[tag] = struct{}{} 637 } 638 for _, tag := range t2 { 639 newTags[tag] = struct{}{} 640 } 641 for _, tag := range t2 { 642 if _, found := curTags[tag]; !found { 643 added = append(added, tag) 644 } 645 } 646 for _, tag := range t { 647 if _, found := newTags[tag]; !found { 648 removed = append(removed, tag) 649 } 650 } 651 return 652 } 653 654 // Removed returns tags removed from the receiver. 655 func (t Tags) Removed(t2 Tags) Tags { 656 if len(t) == 0 { 657 return Tags{} 658 } 659 if len(t2) == 0 { 660 removed := make(Tags, len(t)) 661 copy(removed, t) 662 return removed 663 } 664 newTags := make(map[Tag]struct{}, len(t2)) 665 for _, tag := range t2 { 666 newTags[tag] = struct{}{} 667 } 668 var removed Tags 669 for _, tag := range t { 670 if _, found := newTags[tag]; !found { 671 removed = append(removed, tag) 672 } 673 } 674 return removed 675 } 676 677 // ElementNR describes a synaptic element's properties with No Relationships (NR), 678 // used for label and tag annotations while block-indexed annotations include the 679 // relationships. 680 type ElementNR struct { 681 Pos dvid.Point3d 682 Kind ElementType 683 Tags Tags // Indexed 684 Prop map[string]string // Non-Indexed 685 } 686 687 func (e ElementNR) String() string { 688 s := fmt.Sprintf("Pos %s; Kind: %s; ", e.Pos, e.Kind) 689 s += fmt.Sprintf("Tags: %v; Prop: %v", e.Tags, e.Prop) 690 return s 691 } 692 693 func (e ElementNR) Copy() *ElementNR { 694 c := new(ElementNR) 695 c.Pos = e.Pos 696 c.Kind = e.Kind 697 c.Tags = make(Tags, len(e.Tags)) 698 copy(c.Tags, e.Tags) 699 c.Prop = make(map[string]string, len(e.Prop)) 700 for k, v := range e.Prop { 701 c.Prop[k] = v 702 } 703 return c 704 } 705 706 // Element describes a synaptic element's properties, including Relationships. 707 type Element struct { 708 ElementNR 709 Rels Relationships 710 } 711 712 func (e Element) Copy() *Element { 713 c := new(Element) 714 c.ElementNR = *(e.ElementNR.Copy()) 715 c.Rels = make(Relationships, len(e.Rels)) 716 copy(c.Rels, e.Rels) 717 return c 718 } 719 720 // ElementsNR is a slice of elements without relationships. 721 type ElementsNR []ElementNR 722 723 // Normalize returns ElementsNR that can be used for DeepEqual because all positions and tags are sorted. 724 func (elems ElementsNR) Normalize() ElementsNR { 725 // For every element, create a duplicate that has sorted relationships and sorted tags. 726 out := make(ElementsNR, len(elems), len(elems)) 727 for i, elem := range elems { 728 out[i].Pos = elem.Pos 729 out[i].Kind = elem.Kind 730 out[i].Tags = make(Tags, len(elem.Tags)) 731 copy(out[i].Tags, elem.Tags) 732 733 out[i].Prop = make(map[string]string, len(elem.Prop)) 734 for k, v := range elem.Prop { 735 out[i].Prop[k] = v 736 } 737 738 sort.Sort(out[i].Tags) 739 } 740 741 // Sort all elements based on their position. 742 sort.Sort(out) 743 return out 744 } 745 746 // --- Sort interface 747 748 func (elems ElementsNR) Len() int { 749 return len(elems) 750 } 751 752 // Less returns true if element[i] < element[j] where ordering is determined by 753 // Pos and Kind in that order. Tags are not considered in ordering. 754 func (elems ElementsNR) Less(i, j int) bool { 755 if elems[i].Pos.Less(elems[j].Pos) { 756 return true 757 } 758 if elems[i].Pos.Equals(elems[j].Pos) { 759 return elems[i].Kind < elems[j].Kind 760 } 761 return false 762 } 763 764 func (elems ElementsNR) Swap(i, j int) { 765 elems[i], elems[j] = elems[j], elems[i] 766 } 767 768 // Adds elements but if element at position already exists, it replaces the properties of that element. 769 func (elems *ElementsNR) add(toAdd ElementsNR) { 770 emap := make(map[string]int) 771 for i, elem := range *elems { 772 emap[elem.Pos.MapKey()] = i 773 } 774 for _, elem := range toAdd { 775 i, found := emap[elem.Pos.MapKey()] 776 if !found { 777 *elems = append(*elems, *elem.Copy()) 778 } else { 779 (*elems)[i] = elem 780 } 781 } 782 } 783 784 // Deletes element position as well as relationships that reference that element. 785 func (elems *ElementsNR) delete(pt dvid.Point3d) (deleted *ElementNR, changed bool) { 786 // Delete any elements at point. 787 var cut = -1 788 for i, elem := range *elems { 789 if pt.Equals(elem.Pos) { 790 cut = i 791 break 792 } 793 } 794 if cut >= 0 { 795 deleted = (*elems)[cut].Copy() 796 changed = true 797 (*elems)[cut] = (*elems)[len(*elems)-1] // Delete without preserving order. 798 *elems = (*elems)[:len(*elems)-1] 799 } 800 return 801 } 802 803 // Moves element position as well as relationships. 804 func (elems *ElementsNR) move(from, to dvid.Point3d, deleteElement bool) (moved *ElementNR, changed bool) { 805 for i, elem := range *elems { 806 if from.Equals(elem.Pos) { 807 changed = true 808 (*elems)[i].Pos = to 809 moved = (*elems)[i].Copy() 810 if deleteElement { 811 (*elems)[i] = (*elems)[len(*elems)-1] // Delete without preserving order. 812 *elems = (*elems)[:len(*elems)-1] 813 break 814 } 815 } 816 } 817 return 818 } 819 820 // Elements is a slice of Element, which includes relationships. 821 type Elements []Element 822 823 // helper function that just returns slice of positions suitable for intersect calcs in dvid package. 824 func (elems Elements) positions() []dvid.Point3d { 825 pts := make([]dvid.Point3d, len(elems)) 826 for i, elem := range elems { 827 pts[i] = elem.Pos 828 } 829 return pts 830 } 831 832 // Returns Elements that can be used for DeepEqual because all positions, relationships, and tags are sorted. 833 func (elems Elements) Normalize() Elements { 834 // For every element, create a duplicate that has sorted relationships and sorted tags. 835 out := make(Elements, len(elems), len(elems)) 836 for i, elem := range elems { 837 out[i].Pos = elem.Pos 838 out[i].Kind = elem.Kind 839 out[i].Rels = make(Relationships, len(elem.Rels)) 840 copy(out[i].Rels, elem.Rels) 841 out[i].Tags = make(Tags, len(elem.Tags)) 842 copy(out[i].Tags, elem.Tags) 843 844 out[i].Prop = make(map[string]string, len(elem.Prop)) 845 for k, v := range elem.Prop { 846 out[i].Prop[k] = v 847 } 848 849 sort.Sort(out[i].Rels) 850 sort.Sort(out[i].Tags) 851 } 852 853 // Sort all elements based on their position. 854 sort.Sort(out) 855 return out 856 } 857 858 // Adds elements but if element at position already exists, it replaces the properties of that element. 859 func (elems *Elements) add(toAdd Elements) { 860 emap := make(map[string]int) 861 for i, elem := range *elems { 862 emap[elem.Pos.MapKey()] = i 863 } 864 for _, elem := range toAdd { 865 i, found := emap[elem.Pos.MapKey()] 866 if !found { 867 *elems = append(*elems, *elem.Copy()) 868 } else { 869 (*elems)[i] = elem 870 } 871 } 872 } 873 874 // Deletes element position as well as relationships that reference that element. 875 func (elems *Elements) delete(pt dvid.Point3d) (deleted *Element, changed bool) { 876 // Delete any elements at point. 877 var cut = -1 878 for i, elem := range *elems { 879 if pt.Equals(elem.Pos) { 880 cut = i 881 break 882 } 883 } 884 if cut >= 0 { 885 deleted = (*elems)[cut].Copy() 886 changed = true 887 (*elems)[cut] = (*elems)[len(*elems)-1] // Delete without preserving order. 888 *elems = (*elems)[:len(*elems)-1] 889 } 890 891 // Delete any relationships with the point. 892 if elems.deleteRel(pt) { 893 changed = true 894 } 895 return 896 } 897 898 func (elems *Elements) deleteRel(pt dvid.Point3d) (changed bool) { 899 for i, elem := range *elems { 900 // Remove any relationship with given pt. 901 var todel []int 902 for j, r := range elem.Rels { 903 if pt.Equals(r.To) { 904 todel = append(todel, j) 905 } 906 } 907 if len(todel) > 0 { 908 (*elems)[i].Rels = elem.Rels.delete(todel) 909 changed = true 910 } 911 } 912 return 913 } 914 915 // Moves element position as well as relationships. 916 func (elems *Elements) move(from, to dvid.Point3d, deleteElement bool) (moved *Element, changed bool) { 917 for i, elem := range *elems { 918 if from.Equals(elem.Pos) { 919 changed = true 920 (*elems)[i].Pos = to 921 moved = (*elems)[i].Copy() 922 if deleteElement { 923 (*elems)[i] = (*elems)[len(*elems)-1] // Delete without preserving order. 924 *elems = (*elems)[:len(*elems)-1] 925 break 926 } 927 } 928 } 929 930 // Check relationships for any moved points. 931 for i, elem := range *elems { 932 // Move any relationship with given pt. 933 for j, r := range elem.Rels { 934 if from.Equals(r.To) { 935 r.To = to 936 (*elems)[i].Rels[j] = r 937 changed = true 938 } 939 } 940 } 941 return 942 } 943 944 // --- Sort interface 945 946 func (elems Elements) Len() int { 947 return len(elems) 948 } 949 950 // Less returns true if element[i] < element[j] where ordering is determined by 951 // Pos and Kind in that order. Relationships and Tags are not considered in ordering. 952 func (elems Elements) Less(i, j int) bool { 953 if elems[i].Pos.Less(elems[j].Pos) { 954 return true 955 } 956 if elems[i].Pos.Equals(elems[j].Pos) { 957 return elems[i].Kind < elems[j].Kind 958 } 959 return false 960 } 961 962 func (elems Elements) Swap(i, j int) { 963 elems[i], elems[j] = elems[j], elems[i] 964 } 965 966 // NewData returns a pointer to annotation data. 967 func NewData(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (*Data, error) { 968 // Initialize the Data for this data type 969 basedata, err := datastore.NewDataService(dtype, uuid, id, name, c) 970 if err != nil { 971 return nil, err 972 } 973 data := &Data{ 974 Data: basedata, 975 Properties: Properties{}, 976 } 977 return data, nil 978 } 979 980 // --- Annotation Datatype ----- 981 982 type Type struct { 983 datastore.Type 984 } 985 986 // --- TypeService interface --- 987 988 func (dtype *Type) NewDataService(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (datastore.DataService, error) { 989 return NewData(uuid, id, name, c) 990 } 991 992 func (dtype *Type) Help() string { 993 return helpMessage 994 } 995 996 type formatType uint8 997 998 const ( 999 FormatFlatBuffers formatType = iota 1000 FormatProtobuf3 1001 FormatJSON 1002 ) 1003 1004 func getElements(ctx *datastore.VersionedCtx, tk storage.TKey) (Elements, error) { 1005 store, err := ctx.GetOrderedKeyValueDB() 1006 if err != nil { 1007 return nil, err 1008 } 1009 val, err := store.Get(ctx, tk) 1010 if err != nil { 1011 return nil, err 1012 } 1013 if val == nil { 1014 return Elements{}, nil 1015 } 1016 var elems Elements 1017 if err := json.Unmarshal(val, &elems); err != nil { 1018 return nil, err 1019 } 1020 return elems, nil 1021 } 1022 1023 // makes sure that no relationships are returned since they could be out of date. 1024 func getElementsNR(ctx *datastore.VersionedCtx, tk storage.TKey) (ElementsNR, error) { 1025 store, err := ctx.GetOrderedKeyValueDB() 1026 if err != nil { 1027 return nil, err 1028 } 1029 val, err := store.Get(ctx, tk) 1030 if err != nil { 1031 return nil, err 1032 } 1033 if val == nil { 1034 return ElementsNR{}, nil 1035 } 1036 var elems ElementsNR 1037 if err := json.Unmarshal(val, &elems); err != nil { 1038 return nil, err 1039 } 1040 return elems, nil 1041 } 1042 1043 func getBlockElementsNR(ctx *datastore.VersionedCtx, tk storage.TKey, blockSize dvid.Point3d) (map[dvid.IZYXString]ElementsNR, error) { 1044 elems, err := getElementsNR(ctx, tk) 1045 if err != nil { 1046 return nil, err 1047 } 1048 blockE := make(map[dvid.IZYXString]ElementsNR) 1049 for _, elem := range elems { 1050 izyxStr := elem.Pos.ToBlockIZYXString(blockSize) 1051 be := blockE[izyxStr] 1052 be = append(be, elem) 1053 blockE[izyxStr] = be 1054 } 1055 return blockE, nil 1056 } 1057 1058 func putBatchElements(batch storage.Batch, tk storage.TKey, elems interface{}) error { 1059 val, err := json.Marshal(elems) 1060 if err != nil { 1061 return err 1062 } 1063 batch.Put(tk, val) 1064 return nil 1065 } 1066 1067 func putElements(ctx *datastore.VersionedCtx, tk storage.TKey, elems interface{}) error { 1068 val, err := json.Marshal(elems) 1069 if err != nil { 1070 return err 1071 } 1072 store, err := ctx.GetOrderedKeyValueDB() 1073 if err != nil { 1074 return err 1075 } 1076 return store.Put(ctx, tk, val) 1077 } 1078 1079 // Returns elements within the sparse volume represented by the blocks of RLEs. 1080 func getElementsInRLE(ctx *datastore.VersionedCtx, brles dvid.BlockRLEs) (Elements, error) { 1081 rleElems := Elements{} 1082 for izyx, rles := range brles { 1083 // Get elements for this block 1084 bcoord, err := izyx.ToChunkPoint3d() 1085 tk := NewBlockTKey(bcoord) 1086 elems, err := getElements(ctx, tk) 1087 if err != nil { 1088 return nil, err 1089 } 1090 1091 // Append the elements in current block RLE 1092 in := rles.Within(elems.positions()) 1093 for _, idx := range in { 1094 rleElems = append(rleElems, elems[idx]) 1095 } 1096 } 1097 return rleElems, nil 1098 } 1099 1100 type tagDeltaT struct { 1101 add ElementsNR // elements to add or modify 1102 erase map[string]struct{} // points to erase 1103 } 1104 1105 func addTagDelta(newBlockE, curBlockE Elements, tagDelta map[Tag]tagDeltaT) { 1106 if len(newBlockE) == 0 { 1107 return 1108 } 1109 elemsByPoint := make(map[string]ElementNR, len(newBlockE)) 1110 for _, newElem := range newBlockE { 1111 zyx := string(newElem.Pos.ToZYXBytes()) 1112 elemsByPoint[zyx] = newElem.ElementNR 1113 // add every new point -- could check to see if exactly same but costs computation 1114 for _, tag := range newElem.Tags { 1115 td, found := tagDelta[tag] 1116 if found { 1117 td.add = append(td.add, newElem.ElementNR) 1118 } else { 1119 td.add = ElementsNR{newElem.ElementNR} 1120 } 1121 tagDelta[tag] = td 1122 } 1123 } 1124 for _, curElem := range curBlockE { 1125 zyx := string(curElem.Pos.ToZYXBytes()) 1126 newElem, found := elemsByPoint[zyx] 1127 if !found { 1128 continue 1129 } 1130 removed := curElem.Tags.Removed(newElem.Tags) 1131 for _, tag := range removed { 1132 td, found := tagDelta[tag] 1133 if found { 1134 td.erase[zyx] = struct{}{} 1135 } else { 1136 td.erase = map[string]struct{}{ 1137 zyx: struct{}{}, 1138 } 1139 } 1140 tagDelta[tag] = td 1141 } 1142 delete(elemsByPoint, zyx) 1143 } 1144 } 1145 1146 // Properties are additional properties for data beyond those in standard datastore.Data. 1147 type Properties struct { 1148 // Currently unused since block sizes are either default or taken from synced labelblk. 1149 } 1150 1151 // Data instance of labelvol, label sparse volumes. 1152 type Data struct { 1153 *datastore.Data 1154 Properties 1155 1156 // Keep track of sync operations that could be updating the data. 1157 datastore.Updater 1158 1159 // sync channels for receiving subscribed events like merge, split, and block changes. 1160 syncCh chan datastore.SyncMessage 1161 syncDone chan *sync.WaitGroup 1162 1163 // Cached in-memory so we only have to lookup block size once. 1164 cachedBlockSize *dvid.Point3d 1165 1166 denormOngoing bool // true if we are doing denormalizations so avoid ops on them. 1167 1168 sync.RWMutex // For CAS ops. TODO: Make more specific (e.g., point locks) for efficiency. 1169 } 1170 1171 func (d *Data) Equals(d2 *Data) bool { 1172 if !d.Data.Equals(d2.Data) { 1173 return false 1174 } 1175 return reflect.DeepEqual(d.Properties, d2.Properties) 1176 } 1177 1178 // blockSize is either defined by any synced labelblk or by the default block size. 1179 // Also checks to make sure that synced data is consistent. 1180 func (d *Data) blockSize() dvid.Point3d { 1181 if d.cachedBlockSize != nil { 1182 return *d.cachedBlockSize 1183 } 1184 var bsize dvid.Point3d 1185 d.cachedBlockSize = &bsize 1186 if lb := d.getSyncedLabels(); lb != nil { 1187 bsize = lb.BlockSize().(dvid.Point3d) 1188 return bsize 1189 } 1190 if lv := d.GetSyncedLabelvol(); lv != nil { 1191 bsize = lv.BlockSize 1192 return bsize 1193 } 1194 bsize = dvid.Point3d{DefaultBlockSize, DefaultBlockSize, DefaultBlockSize} 1195 return bsize 1196 } 1197 1198 func (d *Data) GetSyncedLabelvol() *labelvol.Data { 1199 for dataUUID := range d.SyncedData() { 1200 source, err := labelvol.GetByDataUUID(dataUUID) 1201 if err == nil { 1202 return source 1203 } 1204 } 1205 return nil 1206 } 1207 1208 type labelType interface { 1209 GetLabelAtPoint(dvid.VersionID, dvid.Point) (uint64, error) 1210 GetLabelBytes(dvid.VersionID, dvid.ChunkPoint3d) ([]byte, error) 1211 BlockSize() dvid.Point 1212 DataName() dvid.InstanceName 1213 } 1214 1215 type labelPointType interface { 1216 GetLabelPoints(v dvid.VersionID, pts []dvid.Point3d, scale uint8, useSupervoxels bool) ([]uint64, error) 1217 } 1218 1219 type supervoxelType interface { 1220 GetPointsInSupervoxels(v dvid.VersionID, pts []dvid.Point3d, supervoxels []uint64) ([]bool, error) 1221 BlockSize() dvid.Point 1222 DataName() dvid.InstanceName 1223 } 1224 1225 func (d *Data) getSyncedLabels() labelType { 1226 for dataUUID := range d.SyncedData() { 1227 source0, err := labelmap.GetByDataUUID(dataUUID) 1228 if err == nil { 1229 return source0 1230 } 1231 source1, err := labelarray.GetByDataUUID(dataUUID) 1232 if err == nil { 1233 return source1 1234 } 1235 source2, err := labelblk.GetByDataUUID(dataUUID) 1236 if err == nil { 1237 return source2 1238 } 1239 } 1240 return nil 1241 } 1242 1243 func (d *Data) getSyncedSupervoxels() supervoxelType { 1244 for dataUUID := range d.SyncedData() { 1245 source0, err := labelmap.GetByDataUUID(dataUUID) 1246 if err == nil { 1247 return source0 1248 } 1249 } 1250 return nil 1251 } 1252 1253 // returns Elements with Relationships added by querying the block-indexed elements. 1254 func (d *Data) getExpandedElements(ctx *datastore.VersionedCtx, tk storage.TKey) (Elements, error) { 1255 elems, err := getElements(ctx, tk) 1256 if err != nil { 1257 return elems, err 1258 } 1259 1260 // Batch each element into blocks. 1261 blockSize := d.blockSize() 1262 blockE := make(map[dvid.IZYXString]Elements) 1263 for _, elem := range elems { 1264 // Get block coord for this element. 1265 izyxStr := elem.Pos.ToBlockIZYXString(blockSize) 1266 1267 // Append to block 1268 be := blockE[izyxStr] 1269 be = append(be, elem) 1270 blockE[izyxStr] = be 1271 } 1272 1273 expanded := make(Elements, 0, len(elems)) 1274 for izyx, be := range blockE { 1275 // Read the block-indexed elements 1276 chunkPt, err := izyx.ToChunkPoint3d() 1277 if err != nil { 1278 return nil, err 1279 } 1280 btk := NewBlockTKey(chunkPt) 1281 relElems, err := getElements(ctx, btk) 1282 if err != nil { 1283 return nil, err 1284 } 1285 1286 // Construct a point map for quick lookup to element data 1287 emap := make(map[string]int) 1288 for i, relem := range relElems { 1289 emap[relem.Pos.MapKey()] = i 1290 } 1291 1292 // Expand. 1293 for _, elem := range be { 1294 i, found := emap[elem.Pos.MapKey()] 1295 if found { 1296 expanded = append(expanded, relElems[i]) 1297 } else { 1298 dvid.Errorf("Can't expand relationships for data %q, element @ %s, didn't find it in block %s!\n", d.DataName(), elem.Pos, izyx) 1299 } 1300 } 1301 } 1302 return expanded, nil 1303 } 1304 1305 // delete all reference to given element point in the slice of tags. 1306 // This is private method and assumes outer locking. 1307 func (d *Data) deleteElementInTags(ctx *datastore.VersionedCtx, batch storage.Batch, pt dvid.Point3d, tags []Tag) error { 1308 for _, tag := range tags { 1309 // Get the elements in tag. 1310 tk, err := NewTagTKey(tag) 1311 if err != nil { 1312 return err 1313 } 1314 elems, err := getElementsNR(ctx, tk) 1315 if err != nil { 1316 return err 1317 } 1318 1319 // Note all elements to be deleted. 1320 var toDel []int 1321 for i, elem := range elems { 1322 if pt.Equals(elem.Pos) { 1323 toDel = append(toDel, i) 1324 } 1325 } 1326 if len(toDel) == 0 { 1327 continue 1328 } 1329 1330 // Delete them from high index to low index due while reusing slice. 1331 for i := len(toDel) - 1; i >= 0; i-- { 1332 d := toDel[i] 1333 elems[d] = elems[len(elems)-1] 1334 elems[len(elems)-1] = ElementNR{} 1335 elems = elems[:len(elems)-1] 1336 } 1337 1338 // Save the tag. 1339 if err := putBatchElements(batch, tk, elems); err != nil { 1340 return err 1341 } 1342 } 1343 return nil 1344 } 1345 1346 func (d *Data) deleteElementInLabel(ctx *datastore.VersionedCtx, batch storage.Batch, pt dvid.Point3d) error { 1347 labelData := d.getSyncedLabels() 1348 if labelData == nil { 1349 return nil // no synced labels 1350 } 1351 label, err := labelData.GetLabelAtPoint(ctx.VersionID(), pt) 1352 if err != nil { 1353 return err 1354 } 1355 tk := NewLabelTKey(label) 1356 elems, err := getElementsNR(ctx, tk) 1357 if err != nil { 1358 return fmt.Errorf("err getting elements for label %d: %v", label, err) 1359 } 1360 1361 // Note all elements to be deleted. 1362 var delta DeltaModifyElements 1363 var toDel []int 1364 for i, elem := range elems { 1365 if pt.Equals(elem.Pos) { 1366 delta.Del = append(delta.Del, ElementPos{Label: label, Kind: elem.Kind, Pos: elem.Pos}) 1367 toDel = append(toDel, i) 1368 } 1369 } 1370 if len(toDel) == 0 { 1371 dvid.Errorf("Deleted point %s had label %d (from synced instance %q) but was not found in annotation %q for that label\n", pt, label, labelData.DataName(), d.DataName()) 1372 return nil 1373 } 1374 1375 // Delete them from high index to low index due while reusing slice. 1376 for i := len(toDel) - 1; i >= 0; i-- { 1377 d := toDel[i] 1378 elems[d] = elems[len(elems)-1] 1379 elems[len(elems)-1] = ElementNR{} 1380 elems = elems[:len(elems)-1] 1381 } 1382 1383 // Put the modified list of elements 1384 if err := putBatchElements(batch, tk, elems); err != nil { 1385 return err 1386 } 1387 1388 // Notify any subscribers of label annotation changes. 1389 evt := datastore.SyncEvent{Data: d.DataUUID(), Event: ModifyElementsEvent} 1390 msg := datastore.SyncMessage{Event: ModifyElementsEvent, Version: ctx.VersionID(), Delta: delta} 1391 if err := datastore.NotifySubscribers(evt, msg); err != nil { 1392 return err 1393 } 1394 return nil 1395 } 1396 1397 // delete all reference to given element point in the related points. 1398 // This is private method and assumes outer locking. 1399 func (d *Data) deleteElementInRelationships(ctx *datastore.VersionedCtx, batch storage.Batch, pt dvid.Point3d, rels []Relationship) error { 1400 blockSize := d.blockSize() 1401 for _, rel := range rels { 1402 // Get the block elements containing the related element. 1403 bcoord := rel.To.Chunk(blockSize).(dvid.ChunkPoint3d) 1404 tk := NewBlockTKey(bcoord) 1405 elems, err := getElements(ctx, tk) 1406 if err != nil { 1407 return err 1408 } 1409 1410 // Delete the point in relationships 1411 if !elems.deleteRel(pt) { 1412 continue 1413 } 1414 1415 // Save the block elements. 1416 if err := putBatchElements(batch, tk, elems); err != nil { 1417 return err 1418 } 1419 } 1420 return nil 1421 } 1422 1423 // move all reference to given element point in the slice of tags. 1424 // This is private method and assumes outer locking. 1425 func (d *Data) moveElementInTags(ctx *datastore.VersionedCtx, batch storage.Batch, from, to dvid.Point3d, tags []Tag) error { 1426 for _, tag := range tags { 1427 // Get the elements in tag. 1428 tk, err := NewTagTKey(tag) 1429 if err != nil { 1430 return err 1431 } 1432 elems, err := getElementsNR(ctx, tk) 1433 if err != nil { 1434 return err 1435 } 1436 1437 // Move element in tag. 1438 if moved, _ := elems.move(from, to, false); moved == nil { 1439 dvid.Errorf("Unable to find moved element %s in tag %q", from, tag) 1440 continue 1441 } 1442 1443 // Save the tag. 1444 if err := putBatchElements(batch, tk, elems); err != nil { 1445 return err 1446 } 1447 } 1448 return nil 1449 } 1450 1451 func (d *Data) moveElementInLabels(ctx *datastore.VersionedCtx, batch storage.Batch, from, to dvid.Point3d, moved ElementNR) error { 1452 labelData := d.getSyncedLabels() 1453 if labelData == nil { 1454 return nil // no label denormalization possible 1455 } 1456 oldLabel, err := labelData.GetLabelAtPoint(ctx.VersionID(), from) 1457 if err != nil { 1458 return err 1459 } 1460 newLabel, err := labelData.GetLabelAtPoint(ctx.VersionID(), to) 1461 if err != nil { 1462 return err 1463 } 1464 if oldLabel == newLabel { 1465 return nil 1466 } 1467 1468 var delta DeltaModifyElements 1469 if oldLabel != 0 { 1470 tk := NewLabelTKey(oldLabel) 1471 elems, err := getElementsNR(ctx, tk) 1472 if err != nil { 1473 return fmt.Errorf("err getting elements for label %d: %v", oldLabel, err) 1474 } 1475 if _, changed := elems.delete(from); changed { 1476 if err := putBatchElements(batch, tk, elems); err != nil { 1477 return fmt.Errorf("err putting deleted label %d element: %v", oldLabel, err) 1478 } 1479 delta.Del = append(delta.Del, ElementPos{Label: oldLabel, Kind: moved.Kind, Pos: from}) 1480 } 1481 } 1482 if newLabel != 0 { 1483 tk := NewLabelTKey(newLabel) 1484 elems, err := getElementsNR(ctx, tk) 1485 if err != nil { 1486 return fmt.Errorf("err getting elements for label %d: %v", newLabel, err) 1487 } 1488 elems.add(ElementsNR{moved}) 1489 if err := putBatchElements(batch, tk, elems); err != nil { 1490 return err 1491 } 1492 delta.Add = append(delta.Add, ElementPos{Label: newLabel, Kind: moved.Kind, Pos: to}) 1493 } 1494 1495 // Notify any subscribers of label annotation changes. 1496 if len(delta.Del) != 0 || len(delta.Add) != 0 { 1497 evt := datastore.SyncEvent{Data: d.DataUUID(), Event: ModifyElementsEvent} 1498 msg := datastore.SyncMessage{Event: ModifyElementsEvent, Version: ctx.VersionID(), Delta: delta} 1499 if err := datastore.NotifySubscribers(evt, msg); err != nil { 1500 return err 1501 } 1502 } 1503 1504 return nil 1505 } 1506 1507 // move all reference to given element point in the related points in different blocks. 1508 // This is private method and assumes outer locking as well as current "from" block already being modified, 1509 // including relationships. 1510 func (d *Data) moveElementInRelationships(ctx *datastore.VersionedCtx, batch storage.Batch, from, to dvid.Point3d, rels []Relationship) error { 1511 blockSize := d.blockSize() 1512 fromBlockCoord := from.Chunk(blockSize).(dvid.ChunkPoint3d) 1513 1514 // Get list of blocks with related points. 1515 relBlocks := make(map[dvid.IZYXString]struct{}) 1516 for _, rel := range rels { 1517 bcoord := rel.To.Chunk(blockSize).(dvid.ChunkPoint3d) 1518 if bcoord.Equals(fromBlockCoord) { 1519 continue // relationships are almoved in from block 1520 } 1521 relBlocks[bcoord.ToIZYXString()] = struct{}{} 1522 } 1523 1524 // Alter the moved points in those related blocks. 1525 for izyxstr := range relBlocks { 1526 bcoord, err := izyxstr.ToChunkPoint3d() 1527 if err != nil { 1528 return err 1529 } 1530 tk := NewBlockTKey(bcoord) 1531 elems, err := getElements(ctx, tk) 1532 if err != nil { 1533 return err 1534 } 1535 1536 // Move element in related element. 1537 if _, changed := elems.move(from, to, false); !changed { 1538 dvid.Errorf("Unable to find moved element %s in related element @ block %s:\n%v\n", from, bcoord, elems) 1539 continue 1540 } 1541 1542 // Save the block elements. 1543 if err := putBatchElements(batch, tk, elems); err != nil { 1544 return err 1545 } 1546 } 1547 return nil 1548 } 1549 1550 func (d *Data) modifyElements(ctx *datastore.VersionedCtx, batch storage.Batch, tk storage.TKey, toAdd Elements) error { 1551 storeE, err := getElements(ctx, tk) 1552 if err != nil { 1553 return err 1554 } 1555 if storeE != nil { 1556 storeE.add(toAdd) 1557 } else { 1558 storeE = toAdd 1559 } 1560 return putBatchElements(batch, tk, storeE) 1561 } 1562 1563 // stores synaptic elements arranged by block, replacing any 1564 // elements at same position. 1565 func (d *Data) storeBlockElements(ctx *datastore.VersionedCtx, batch storage.Batch, be map[dvid.IZYXString]Elements) error { 1566 for izyxStr, elems := range be { 1567 bcoord, err := izyxStr.ToChunkPoint3d() 1568 if err != nil { 1569 return err 1570 } 1571 // Modify the block annotations 1572 tk := NewBlockTKey(bcoord) 1573 if err := d.modifyElements(ctx, batch, tk, elems); err != nil { 1574 return err 1575 } 1576 } 1577 return nil 1578 } 1579 1580 // returns label elements with relationships for block elements, using 1581 // specialized point requests if available (e.g., labelmap sync) 1582 func (d *Data) getLabelElements(v dvid.VersionID, elems Elements) (labelElems LabelElements, err error) { 1583 labelData := d.getSyncedLabels() 1584 if labelData == nil { 1585 dvid.Errorf("No synced labels for annotation %q, skipping label-aware denormalization\n", d.DataName()) 1586 return 1587 } 1588 labelPointData, ok := labelData.(labelPointType) 1589 labelElems = LabelElements{} 1590 if ok { 1591 pts := make([]dvid.Point3d, len(elems)) 1592 for i, elem := range elems { 1593 pts[i] = elem.Pos 1594 } 1595 var labels []uint64 1596 labels, err = labelPointData.GetLabelPoints(v, pts, 0, false) 1597 if err != nil { 1598 return 1599 } 1600 for i, elem := range elems { 1601 if labels[i] != 0 { 1602 labelElems.add(labels[i], elem.ElementNR) 1603 } 1604 } 1605 } else { 1606 blockSize := d.blockSize() 1607 bX := blockSize[0] * 8 1608 bY := blockSize[1] * bX 1609 blockBytes := int(blockSize[0] * blockSize[1] * blockSize[2] * 8) 1610 1611 blockElems := make(map[dvid.IZYXString]Elements) 1612 for _, elem := range elems { 1613 izyxStr := elem.Pos.ToBlockIZYXString(blockSize) 1614 be := blockElems[izyxStr] 1615 be = append(be, elem) 1616 blockElems[izyxStr] = be 1617 } 1618 for izyxStr, elems := range blockElems { 1619 var bcoord dvid.ChunkPoint3d 1620 bcoord, err = izyxStr.ToChunkPoint3d() 1621 if err != nil { 1622 return 1623 } 1624 var labels []byte 1625 labels, err = labelData.GetLabelBytes(v, bcoord) 1626 if err != nil { 1627 return 1628 } 1629 if len(labels) == 0 { 1630 continue 1631 } 1632 if len(labels) != blockBytes { 1633 err = fmt.Errorf("expected %d bytes in %q label block, got %d instead. aborting", blockBytes, d.DataName(), len(labels)) 1634 return 1635 } 1636 1637 // Group annotations by label 1638 for _, elem := range elems { 1639 pt := elem.Pos.Point3dInChunk(blockSize) 1640 i := pt[2]*bY + pt[1]*bX + pt[0]*8 1641 label := binary.LittleEndian.Uint64(labels[i : i+8]) 1642 if label != 0 { 1643 labelElems.add(label, elem.ElementNR) 1644 } 1645 } 1646 } 1647 } 1648 return 1649 } 1650 1651 // returns label elements without relationships, using specialized point 1652 // requests if available or falling back to reading label blocks. 1653 func (d *Data) getLabelElementsNR(labelData labelType, v dvid.VersionID, elems ElementsNR) (labelElems LabelElements, err error) { 1654 labelPointData, pointOK := labelData.(labelPointType) 1655 labelElems = LabelElements{} 1656 if pointOK { 1657 pts := make([]dvid.Point3d, len(elems)) 1658 for i, elem := range elems { 1659 pts[i] = elem.Pos 1660 } 1661 var labels []uint64 1662 labels, err = labelPointData.GetLabelPoints(v, pts, 0, false) 1663 if err != nil { 1664 return 1665 } 1666 for i, elem := range elems { 1667 if labels[i] != 0 { 1668 labelElems.add(labels[i], elem) 1669 } 1670 } 1671 return 1672 } 1673 1674 blockSize := d.blockSize() 1675 bX := blockSize[0] * 8 1676 bY := blockSize[1] * bX 1677 blockBytes := int(blockSize[0] * blockSize[1] * blockSize[2] * 8) 1678 1679 blockElems := make(map[dvid.IZYXString]ElementsNR) 1680 for _, elem := range elems { 1681 izyxStr := elem.Pos.ToBlockIZYXString(blockSize) 1682 be := blockElems[izyxStr] 1683 be = append(be, elem) 1684 blockElems[izyxStr] = be 1685 } 1686 for izyxStr, elems := range blockElems { 1687 var bcoord dvid.ChunkPoint3d 1688 bcoord, err = izyxStr.ToChunkPoint3d() 1689 if err != nil { 1690 return 1691 } 1692 var labels []byte 1693 labels, err = labelData.GetLabelBytes(v, bcoord) 1694 if err != nil { 1695 return 1696 } 1697 if len(labels) == 0 { 1698 continue 1699 } 1700 if len(labels) != blockBytes { 1701 err = fmt.Errorf("expected %d bytes in %q label block, got %d instead. aborting", blockBytes, d.DataName(), len(labels)) 1702 return 1703 } 1704 1705 // Group annotations by label 1706 for _, elem := range elems { 1707 pt := elem.Pos.Point3dInChunk(blockSize) 1708 i := pt[2]*bY + pt[1]*bX + pt[0]*8 1709 label := binary.LittleEndian.Uint64(labels[i : i+8]) 1710 if label != 0 { 1711 labelElems.add(label, elem) 1712 } 1713 } 1714 } 1715 return 1716 } 1717 1718 // lookup labels for given elements and add them to label element map 1719 func (d *Data) addLabelElements(v dvid.VersionID, labelE LabelElements, bcoord dvid.ChunkPoint3d, elems Elements) (int, error) { 1720 le, err := d.getLabelElements(v, elems) 1721 if err != nil { 1722 return 0, err 1723 } 1724 1725 // Add annotations by label 1726 var nonzeroElems int 1727 for label, elems := range le { 1728 for _, elem := range elems { 1729 if label != 0 { 1730 labelE.add(label, elem) 1731 nonzeroElems++ 1732 } else { 1733 dvid.Infof("Annotation %s was at voxel with label 0\n", elem.Pos) 1734 } 1735 } 1736 } 1737 return nonzeroElems, nil 1738 } 1739 1740 // stores synaptic elements arranged by label, replacing any 1741 // elements at same position. 1742 func (d *Data) storeLabelElements(ctx *datastore.VersionedCtx, batch storage.Batch, elems Elements) error { 1743 toAdd, err := d.getLabelElements(ctx.VersionID(), elems) 1744 if err != nil { 1745 return err 1746 } 1747 if len(toAdd) == 0 { 1748 return nil 1749 } 1750 1751 // Store all the added annotations to the appropriate labels. 1752 var delta DeltaModifyElements 1753 for label, additions := range toAdd { 1754 tk := NewLabelTKey(label) 1755 elems, err := getElementsNR(ctx, tk) 1756 if err != nil { 1757 return fmt.Errorf("err getting elements for label %d: %v", label, err) 1758 } 1759 1760 // Check if these annotations already exist. 1761 emap := make(map[string]int) 1762 for i, elem := range elems { 1763 emap[elem.Pos.MapKey()] = i 1764 } 1765 for _, elem := range additions { 1766 i, found := emap[elem.Pos.MapKey()] 1767 if !found { 1768 elems = append(elems, elem) 1769 delta.Add = append(delta.Add, ElementPos{Label: label, Kind: elem.Kind, Pos: elem.Pos}) 1770 } else { 1771 elems[i] = elem // replace properties if same position 1772 } 1773 } 1774 if err := putBatchElements(batch, tk, elems); err != nil { 1775 return fmt.Errorf("couldn't serialize label %d annotations in instance %q: %v", label, d.DataName(), err) 1776 } 1777 } 1778 1779 // Notify any subscribers of label annotation changes. 1780 evt := datastore.SyncEvent{Data: d.DataUUID(), Event: ModifyElementsEvent} 1781 msg := datastore.SyncMessage{Event: ModifyElementsEvent, Version: ctx.VersionID(), Delta: delta} 1782 if err := datastore.NotifySubscribers(evt, msg); err != nil { 1783 return err 1784 } 1785 1786 return nil 1787 } 1788 1789 // stores synaptic elements arranged by tag, replacing any 1790 // elements at same position. 1791 func (d *Data) storeTagElements(ctx *datastore.VersionedCtx, batch storage.Batch, te map[Tag]Elements) error { 1792 for tag, elems := range te { 1793 tk, err := NewTagTKey(tag) 1794 if err != nil { 1795 return err 1796 } 1797 if err := d.modifyElements(ctx, batch, tk, elems); err != nil { 1798 return err 1799 } 1800 } 1801 return nil 1802 } 1803 1804 func (d *Data) modifyTagElements(ctx *datastore.VersionedCtx, batch storage.Batch, tagDelta map[Tag]tagDeltaT) error { 1805 for tag, td := range tagDelta { 1806 tk, err := NewTagTKey(tag) 1807 if err != nil { 1808 return err 1809 } 1810 tagElems, err := getElementsNR(ctx, tk) 1811 if err != nil { 1812 return err 1813 } 1814 if len(td.add) != 0 { 1815 if tagElems != nil { 1816 tagElems.add(td.add) 1817 } else { 1818 tagElems = make(ElementsNR, len(td.add)) 1819 copy(tagElems, td.add) 1820 } 1821 } 1822 // Note all elements to be deleted. 1823 var toDel []int 1824 for i, elem := range tagElems { 1825 zyx := string(elem.Pos.ToZYXBytes()) 1826 if _, found := td.erase[zyx]; found { 1827 toDel = append(toDel, i) 1828 } 1829 } 1830 if len(toDel) != 0 { 1831 // Delete them from high index to low index due while reusing slice. 1832 for i := len(toDel) - 1; i >= 0; i-- { 1833 d := toDel[i] 1834 tagElems[d] = tagElems[len(tagElems)-1] 1835 tagElems[len(tagElems)-1] = ElementNR{} 1836 tagElems = tagElems[:len(tagElems)-1] 1837 } 1838 } 1839 1840 // Save the tag. 1841 if err := putBatchElements(batch, tk, tagElems); err != nil { 1842 return err 1843 } 1844 } 1845 return nil 1846 } 1847 1848 // ProcessLabelAnnotations will pass all annotations, label by label, to the given function. 1849 func (d *Data) ProcessLabelAnnotations(v dvid.VersionID, f func(label uint64, elems ElementsNR)) error { 1850 minTKey := storage.MinTKey(keyLabel) 1851 maxTKey := storage.MaxTKey(keyLabel) 1852 1853 store, err := datastore.GetOrderedKeyValueDB(d) 1854 if err != nil { 1855 return fmt.Errorf("annotation %q had error initializing store: %v", d.DataName(), err) 1856 } 1857 ctx := datastore.NewVersionedCtx(d, v) 1858 err = store.ProcessRange(ctx, minTKey, maxTKey, &storage.ChunkOp{}, func(c *storage.Chunk) error { 1859 if c == nil { 1860 return fmt.Errorf("received nil chunk in reload for data %q", d.DataName()) 1861 } 1862 if c.V == nil { 1863 return nil 1864 } 1865 label, err := DecodeLabelTKey(c.K) 1866 if err != nil { 1867 return fmt.Errorf("couldn't decode label key %v for data %q", c.K, d.DataName()) 1868 } 1869 1870 var elems ElementsNR 1871 if err := json.Unmarshal(c.V, &elems); err != nil { 1872 return fmt.Errorf("couldn't unmarshal elements for label %d of data %q", label, d.DataName()) 1873 } 1874 if len(elems) == 0 { 1875 return nil 1876 } 1877 f(label, elems) 1878 return nil 1879 }) 1880 if err != nil { 1881 return fmt.Errorf("Unable to get label-based annotations for data %q: %v\n", d.DataName(), err) 1882 } 1883 return nil 1884 } 1885 1886 // GetLabelJSON returns JSON for synapse elements in a given label. 1887 func (d *Data) GetLabelJSON(ctx *datastore.VersionedCtx, label uint64, addRels bool) ([]byte, error) { 1888 // d.RLock() 1889 // defer d.RUnlock() 1890 1891 tk := NewLabelTKey(label) 1892 if addRels { 1893 elems, err := d.getExpandedElements(ctx, tk) 1894 if err != nil { 1895 return nil, err 1896 } 1897 return json.Marshal(elems) 1898 } 1899 elems, err := getElementsNR(ctx, tk) 1900 if err != nil { 1901 return nil, err 1902 } 1903 return json.Marshal(elems) 1904 } 1905 1906 // GetTagJSON returns JSON for synapse elements in a given tag. 1907 func (d *Data) GetTagJSON(ctx *datastore.VersionedCtx, tag Tag, addRels bool) (jsonBytes []byte, err error) { 1908 // d.RLock() 1909 // defer d.RUnlock() 1910 1911 var tk storage.TKey 1912 tk, err = NewTagTKey(tag) 1913 if err != nil { 1914 return 1915 } 1916 var elems interface{} 1917 if addRels { 1918 elems, err = d.getExpandedElements(ctx, tk) 1919 } else { 1920 elems, err = getElementsNR(ctx, tk) 1921 } 1922 if err == nil { 1923 jsonBytes, err = json.Marshal(elems) 1924 } 1925 return 1926 } 1927 1928 // StreamAll returns all elements for this data instance. 1929 func (d *Data) StreamAll(ctx *datastore.VersionedCtx, w http.ResponseWriter) error { 1930 store, err := datastore.GetOrderedKeyValueDB(d) 1931 if err != nil { 1932 return err 1933 } 1934 minTKey, maxTKey := BlockTKeyRange() 1935 1936 // d.RLock() 1937 // defer d.RUnlock() 1938 1939 if _, err := w.Write([]byte("{")); err != nil { 1940 return err 1941 } 1942 numBlocks := 0 1943 err = store.ProcessRange(ctx, minTKey, maxTKey, nil, func(chunk *storage.Chunk) error { 1944 bcoord, err := DecodeBlockTKey(chunk.K) 1945 if err != nil { 1946 return err 1947 } 1948 if len(chunk.V) == 0 { 1949 return nil 1950 } 1951 s := fmt.Sprintf(`"%d,%d,%d":`, bcoord[0], bcoord[1], bcoord[2]) 1952 if numBlocks > 0 { 1953 s = "," + s 1954 } 1955 if _, err := w.Write([]byte(s)); err != nil { 1956 return err 1957 } 1958 if _, err := w.Write(chunk.V); err != nil { 1959 return err 1960 } 1961 numBlocks++ 1962 return nil 1963 }) 1964 if err != nil { 1965 return err 1966 } 1967 if _, err := w.Write([]byte("}")); err != nil { 1968 return err 1969 } 1970 dvid.Infof("Returned %d blocks worth of elements in a /all-elements request\n", numBlocks) 1971 return nil 1972 } 1973 1974 // StreamBlocks returns synapse elements for a given subvolume of image space. 1975 func (d *Data) StreamBlocks(ctx *datastore.VersionedCtx, w http.ResponseWriter, ext *dvid.Extents3d) error { 1976 store, err := datastore.GetOrderedKeyValueDB(d) 1977 if err != nil { 1978 return err 1979 } 1980 1981 // d.RLock() 1982 // defer d.RUnlock() 1983 1984 blockSize := d.blockSize() 1985 begBlockCoord, endBlockCoord := ext.BlockRange(blockSize) 1986 1987 // Check if we should use single range query based on suggested BlockSize Tag if present 1988 var useAllScan bool 1989 tags := d.Tags() 1990 if scanStr, found := tags["ScanAllForBlocks"]; found { 1991 useAllScan = (strings.ToLower(scanStr) == "true") 1992 } 1993 /** 1994 } else { 1995 // if there would be too many range requests, just get all 1996 dz := endBlockCoord[2] - begBlockCoord[2] 1997 dy := endBlockCoord[1] - begBlockCoord[1] 1998 if dy*dz > 100 { 1999 useAllScan = true 2000 } 2001 } 2002 **/ 2003 2004 if _, err := w.Write([]byte("{")); err != nil { 2005 return err 2006 } 2007 numBlocks := 0 2008 if useAllScan { 2009 minTKey, maxTKey := BlockTKeyZRange(begBlockCoord, endBlockCoord) 2010 err = store.ProcessRange(ctx, minTKey, maxTKey, nil, func(chunk *storage.Chunk) error { 2011 bcoord, err := DecodeBlockTKey(chunk.K) 2012 if err != nil { 2013 return err 2014 } 2015 if !bcoord.WithinChunkBoundingBox(begBlockCoord, endBlockCoord) || len(chunk.V) == 0 { 2016 return nil 2017 } 2018 s := fmt.Sprintf(`"%d,%d,%d":`, bcoord[0], bcoord[1], bcoord[2]) 2019 if numBlocks > 0 { 2020 s = "," + s 2021 } 2022 if _, err := w.Write([]byte(s)); err != nil { 2023 return err 2024 } 2025 if _, err := w.Write(chunk.V); err != nil { 2026 return err 2027 } 2028 numBlocks++ 2029 return nil 2030 }) 2031 if err != nil { 2032 return err 2033 } 2034 dvid.Infof("Using single range scan\n") 2035 } else { 2036 for blockZ := begBlockCoord[2]; blockZ <= endBlockCoord[2]; blockZ++ { 2037 for blockY := begBlockCoord[1]; blockY <= endBlockCoord[1]; blockY++ { 2038 begTKey := NewBlockTKey(dvid.ChunkPoint3d{begBlockCoord[0], blockY, blockZ}) 2039 endTKey := NewBlockTKey(dvid.ChunkPoint3d{endBlockCoord[0], blockY, blockZ}) 2040 err = store.ProcessRange(ctx, begTKey, endTKey, nil, func(chunk *storage.Chunk) error { 2041 bcoord, err := DecodeBlockTKey(chunk.K) 2042 if err != nil { 2043 return err 2044 } 2045 if len(chunk.V) == 0 { 2046 return nil 2047 } 2048 s := fmt.Sprintf(`"%d,%d,%d":`, bcoord[0], bcoord[1], bcoord[2]) 2049 if numBlocks > 0 { 2050 s = "," + s 2051 } 2052 if _, err := w.Write([]byte(s)); err != nil { 2053 return err 2054 } 2055 if _, err := w.Write(chunk.V); err != nil { 2056 return err 2057 } 2058 numBlocks++ 2059 return nil 2060 }) 2061 if err != nil { 2062 return err 2063 } 2064 } 2065 } 2066 } 2067 if _, err := w.Write([]byte("}")); err != nil { 2068 return err 2069 } 2070 dvid.Infof("Returned %d blocks of elements in a /blocks with bounds %s -> %s (used single range query: %t)\n", 2071 numBlocks, begBlockCoord, endBlockCoord, useAllScan) 2072 return nil 2073 } 2074 2075 // GetRegionSynapses returns synapse elements for a given subvolume of image space. 2076 func (d *Data) GetRegionSynapses(ctx *datastore.VersionedCtx, ext *dvid.Extents3d) (Elements, error) { 2077 store, err := datastore.GetOrderedKeyValueDB(d) 2078 if err != nil { 2079 return nil, err 2080 } 2081 2082 // Setup block bounds for synapse element query in supplied Z range. 2083 blockSize := d.blockSize() 2084 begBlockCoord, endBlockCoord := ext.BlockRange(blockSize) 2085 2086 // d.RLock() 2087 // defer d.RUnlock() 2088 2089 // Iterate through all synapse elements block k/v, making sure the elements are also 2090 // within the given subvolume. 2091 var elements Elements 2092 for blockZ := begBlockCoord[2]; blockZ <= endBlockCoord[2]; blockZ++ { 2093 for blockY := begBlockCoord[1]; blockY <= endBlockCoord[1]; blockY++ { 2094 begTKey := NewBlockTKey(dvid.ChunkPoint3d{begBlockCoord[0], blockY, blockZ}) 2095 endTKey := NewBlockTKey(dvid.ChunkPoint3d{endBlockCoord[0], blockY, blockZ}) 2096 err = store.ProcessRange(ctx, begTKey, endTKey, nil, func(chunk *storage.Chunk) error { 2097 bcoord, err := DecodeBlockTKey(chunk.K) 2098 if err != nil { 2099 return err 2100 } 2101 if !ext.BlockWithin(blockSize, bcoord) { 2102 return nil 2103 } 2104 // Deserialize the JSON value into a slice of elements 2105 var blockElems Elements 2106 if err := json.Unmarshal(chunk.V, &blockElems); err != nil { 2107 return err 2108 } 2109 // Iterate through elements, screening on extents before adding to region elements. 2110 for _, elem := range blockElems { 2111 if ext.VoxelWithin(elem.Pos) { 2112 elements = append(elements, elem) 2113 } 2114 } 2115 return nil 2116 }) 2117 if err != nil { 2118 return nil, err 2119 } 2120 } 2121 } 2122 return elements, nil 2123 } 2124 2125 // GetROISynapses returns synapse elements for a given ROI. 2126 func (d *Data) GetROISynapses(ctx *datastore.VersionedCtx, roiSpec storage.FilterSpec) (Elements, error) { 2127 roidata, roiV, roiFound, err := roi.DataByFilter(roiSpec) 2128 if err != nil { 2129 return nil, fmt.Errorf("ROI specification was not parsable (%s): %v\n", roiSpec, err) 2130 } 2131 if !roiFound { 2132 return nil, fmt.Errorf("No ROI found that matches specification %q", roiSpec) 2133 } 2134 roiSpans, err := roidata.GetSpans(roiV) 2135 if err != nil { 2136 return nil, fmt.Errorf("Unable to get ROI spans for %q: %v\n", roiSpec, err) 2137 } 2138 if !d.blockSize().Equals(roidata.BlockSize) { 2139 return nil, fmt.Errorf("/roi endpoint currently requires ROI %q to have same block size as annotation %q", roidata.DataName(), d.DataName()) 2140 } 2141 2142 store, err := datastore.GetOrderedKeyValueDB(d) 2143 if err != nil { 2144 return nil, err 2145 } 2146 2147 // d.RLock() 2148 // defer d.RUnlock() 2149 2150 var elements Elements 2151 for _, span := range roiSpans { 2152 begBlockCoord := dvid.ChunkPoint3d{span[2], span[1], span[0]} 2153 endBlockCoord := dvid.ChunkPoint3d{span[3], span[1], span[0]} 2154 begTKey := NewBlockTKey(begBlockCoord) 2155 endTKey := NewBlockTKey(endBlockCoord) 2156 err = store.ProcessRange(ctx, begTKey, endTKey, nil, func(chunk *storage.Chunk) error { 2157 var blockElems Elements 2158 if err := json.Unmarshal(chunk.V, &blockElems); err != nil { 2159 return err 2160 } 2161 elements = append(elements, blockElems...) 2162 return nil 2163 }) 2164 if err != nil { 2165 return nil, fmt.Errorf("error retrieving annotations from %s -> %s: %v", begBlockCoord, endBlockCoord, err) 2166 } 2167 } 2168 return elements, nil 2169 } 2170 2171 type blockList map[string]Elements 2172 2173 // StoreBlocks performs a synchronous store of synapses in JSON format, not 2174 // returning until the data blocks are complete. 2175 func (d *Data) StoreBlocks(ctx *datastore.VersionedCtx, r io.Reader, kafkaOff bool) (numBlocks int, err error) { 2176 jsonBytes, err := ioutil.ReadAll(r) 2177 if err != nil { 2178 return 0, err 2179 } 2180 var blocks blockList 2181 if err := json.Unmarshal(jsonBytes, &blocks); err != nil { 2182 return 0, err 2183 } 2184 2185 // d.Lock() 2186 // defer d.Unlock() 2187 2188 // Do modifications under a batch. 2189 store, err := d.KVStore() 2190 if err != nil { 2191 return 0, err 2192 } 2193 batcher, ok := store.(storage.KeyValueBatcher) 2194 if !ok { 2195 return 0, fmt.Errorf("data type annotation requires batch-enabled store, which %q is not", store) 2196 } 2197 batch := batcher.NewBatch(ctx) 2198 2199 var blockX, blockY, blockZ int32 2200 for key, elems := range blocks { 2201 _, err := fmt.Sscanf(key, "%d,%d,%d", &blockX, &blockY, &blockZ) 2202 if err != nil { 2203 return 0, err 2204 } 2205 blockCoord := dvid.ChunkPoint3d{blockX, blockY, blockZ} 2206 tk := NewBlockTKey(blockCoord) 2207 if err := putBatchElements(batch, tk, elems); err != nil { 2208 return 0, err 2209 } 2210 } 2211 2212 if !kafkaOff { 2213 // store synapse info into blob store for kakfa reference 2214 var postRef string 2215 if postRef, err = d.PutBlob(jsonBytes); err != nil { 2216 dvid.Errorf("storing block posted synapse data %q to kafka: %v", d.DataName(), err) 2217 } 2218 2219 versionuuid, _ := datastore.UUIDFromVersion(ctx.VersionID()) 2220 msginfo := map[string]interface{}{ 2221 "Action": "blocks-post", 2222 "DataRef": postRef, 2223 "UUID": string(versionuuid), 2224 "Timestamp": time.Now().String(), 2225 } 2226 if ctx.User != "" { 2227 msginfo["User"] = ctx.User 2228 } 2229 jsonmsg, err := json.Marshal(msginfo) 2230 if err != nil { 2231 dvid.Errorf("error marshaling JSON for annotations %q blocks post: %v\n", d.DataName(), err) 2232 } else if err = d.PublishKafkaMsg(jsonmsg); err != nil { 2233 dvid.Errorf("error on sending block post op to kafka: %v\n", err) 2234 } 2235 } 2236 2237 return len(blocks), batch.Commit() 2238 } 2239 2240 // StoreElements performs a synchronous store of synapses in JSON format, not 2241 // returning until the data and its denormalizations are complete. 2242 func (d *Data) StoreElements(ctx *datastore.VersionedCtx, r io.Reader, kafkaOff bool) error { 2243 jsonBytes, err := ioutil.ReadAll(r) 2244 if err != nil { 2245 return err 2246 } 2247 var elems Elements 2248 if err := json.Unmarshal(jsonBytes, &elems); err != nil { 2249 return err 2250 } 2251 2252 // d.Lock() 2253 // defer d.Unlock() 2254 2255 dvid.Infof("%d annotation elements received via POST\n", len(elems)) 2256 2257 blockSize := d.blockSize() 2258 addToBlock := make(map[dvid.IZYXString]Elements) 2259 tagDelta := make(map[Tag]tagDeltaT) 2260 2261 // Organize added elements into blocks 2262 for _, elem := range elems { 2263 // Get block coord for this element. 2264 izyxStr := elem.Pos.ToBlockIZYXString(blockSize) 2265 2266 // Append to block 2267 be := addToBlock[izyxStr] 2268 be = append(be, elem) 2269 addToBlock[izyxStr] = be 2270 } 2271 2272 // Find current elements under the blocks. 2273 for izyxStr, elems := range addToBlock { 2274 bcoord, err := izyxStr.ToChunkPoint3d() 2275 if err != nil { 2276 return err 2277 } 2278 tk := NewBlockTKey(bcoord) 2279 curBlockE, err := getElements(ctx, tk) 2280 if err != nil { 2281 return err 2282 } 2283 addTagDelta(elems, curBlockE, tagDelta) 2284 } 2285 2286 // Do modifications under a batch. 2287 store, err := d.KVStore() 2288 if err != nil { 2289 return err 2290 } 2291 batcher, ok := store.(storage.KeyValueBatcher) 2292 if !ok { 2293 return fmt.Errorf("data type annotation requires batch-enabled store, which %q is not", store) 2294 } 2295 batch := batcher.NewBatch(ctx) 2296 2297 // Store the new block elements 2298 if err := d.storeBlockElements(ctx, batch, addToBlock); err != nil { 2299 return err 2300 } 2301 2302 // Store new elements among label denormalizations 2303 if err := d.storeLabelElements(ctx, batch, elems); err != nil { 2304 return err 2305 } 2306 2307 // Store the new tag elements 2308 if err := d.modifyTagElements(ctx, batch, tagDelta); err != nil { 2309 return err 2310 } 2311 2312 if !kafkaOff { 2313 // store synapse info into blob store for kakfa reference 2314 var postRef string 2315 if postRef, err = d.PutBlob(jsonBytes); err != nil { 2316 dvid.Errorf("storing posted synapse data %q to kafka: %v", d.DataName(), err) 2317 } 2318 2319 versionuuid, _ := datastore.UUIDFromVersion(ctx.VersionID()) 2320 msginfo := map[string]interface{}{ 2321 "Action": "element-post", 2322 "DataRef": postRef, 2323 "UUID": string(versionuuid), 2324 "Timestamp": time.Now().String(), 2325 } 2326 if ctx.User != "" { 2327 msginfo["User"] = ctx.User 2328 } 2329 jsonmsg, err := json.Marshal(msginfo) 2330 if err != nil { 2331 dvid.Errorf("error marshaling JSON for annotations %q element post: %v\n", d.DataName(), err) 2332 } else if err = d.PublishKafkaMsg(jsonmsg); err != nil { 2333 dvid.Errorf("error on sending move element op to kafka: %v\n", err) 2334 } 2335 } 2336 2337 return batch.Commit() 2338 } 2339 2340 func (d *Data) DeleteElement(ctx *datastore.VersionedCtx, pt dvid.Point3d, kafkaOff bool) error { 2341 // Get from block key 2342 blockSize := d.blockSize() 2343 bcoord := pt.Chunk(blockSize).(dvid.ChunkPoint3d) 2344 tk := NewBlockTKey(bcoord) 2345 2346 // d.Lock() 2347 // defer d.Unlock() 2348 2349 elems, err := getElements(ctx, tk) 2350 if err != nil { 2351 return err 2352 } 2353 2354 // Delete the given element 2355 deleted, _ := elems.delete(pt) 2356 if deleted == nil { 2357 return fmt.Errorf("Did not find element %s in datastore", pt) 2358 } 2359 2360 // Put block key version without given element 2361 if err := putElements(ctx, tk, elems); err != nil { 2362 return err 2363 } 2364 2365 // Alter all stored versions of this annotation using a batch. 2366 store, err := d.KVStore() 2367 if err != nil { 2368 return err 2369 } 2370 batcher, ok := store.(storage.KeyValueBatcher) 2371 if !ok { 2372 return fmt.Errorf("Data type annotation requires batch-enabled store, which %q is not\n", store) 2373 } 2374 batch := batcher.NewBatch(ctx) 2375 2376 // Delete in label key 2377 if err := d.deleteElementInLabel(ctx, batch, deleted.Pos); err != nil { 2378 return err 2379 } 2380 2381 // Delete element in any tags 2382 if err := d.deleteElementInTags(ctx, batch, deleted.Pos, deleted.Tags); err != nil { 2383 return err 2384 } 2385 2386 // Modify any reference in relationships 2387 if err := d.deleteElementInRelationships(ctx, batch, deleted.Pos, deleted.Rels); err != nil { 2388 return err 2389 } 2390 2391 if !kafkaOff { 2392 versionuuid, _ := datastore.UUIDFromVersion(ctx.VersionID()) 2393 msginfo := map[string]interface{}{ 2394 "Action": "element-delete", 2395 "Point": pt, 2396 "UUID": string(versionuuid), 2397 "Timestamp": time.Now().String(), 2398 } 2399 if ctx.User != "" { 2400 msginfo["User"] = ctx.User 2401 } 2402 jsonmsg, err := json.Marshal(msginfo) 2403 if err != nil { 2404 dvid.Errorf("error marshaling JSON for annotations %q element delete: %v\n", d.DataName(), err) 2405 } else if err = d.PublishKafkaMsg(jsonmsg); err != nil { 2406 dvid.Errorf("error on sending delete element op to kafka: %v\n", err) 2407 } 2408 } 2409 2410 return batch.Commit() 2411 } 2412 2413 func (d *Data) MoveElement(ctx *datastore.VersionedCtx, from, to dvid.Point3d, kafkaOff bool) error { 2414 // Calc block keys 2415 blockSize := d.blockSize() 2416 fromCoord := from.Chunk(blockSize).(dvid.ChunkPoint3d) 2417 fromTk := NewBlockTKey(fromCoord) 2418 2419 toCoord := to.Chunk(blockSize).(dvid.ChunkPoint3d) 2420 toTk := NewBlockTKey(toCoord) 2421 2422 // d.Lock() 2423 // defer d.Unlock() 2424 2425 // Alter all stored versions of this annotation using a batch. 2426 store, err := d.KVStore() 2427 if err != nil { 2428 return err 2429 } 2430 batcher, ok := store.(storage.KeyValueBatcher) 2431 if !ok { 2432 return fmt.Errorf("Data type annotation requires batch-enabled store, which %q is not\n", store) 2433 } 2434 batch := batcher.NewBatch(ctx) 2435 2436 // Handle from block 2437 fromElems, err := getElements(ctx, fromTk) 2438 if err != nil { 2439 return err 2440 } 2441 2442 deleteElement := (bytes.Compare(fromTk, toTk) != 0) 2443 moved, _ := fromElems.move(from, to, deleteElement) 2444 if moved == nil { 2445 return fmt.Errorf("Did not find moved element %s in datastore", from) 2446 } 2447 dvid.Infof("moved element %v from %s -> %s\n", *moved, fromCoord, toCoord) 2448 2449 if err := putBatchElements(batch, fromTk, fromElems); err != nil { 2450 return err 2451 } 2452 2453 // If we've moved blocks, add the element in new place. 2454 if deleteElement { 2455 toElems, err := getElements(ctx, toTk) 2456 if err != nil { 2457 return err 2458 } 2459 toElems.add(Elements{*moved}) 2460 2461 if err := putBatchElements(batch, toTk, toElems); err != nil { 2462 return err 2463 } 2464 } 2465 2466 if err := batch.Commit(); err != nil { 2467 return err 2468 } 2469 2470 if !kafkaOff { 2471 versionuuid, _ := datastore.UUIDFromVersion(ctx.VersionID()) 2472 msginfo := map[string]interface{}{ 2473 "Action": "element-move", 2474 "From": from, 2475 "To": to, 2476 "UUID": string(versionuuid), 2477 "Timestamp": time.Now().String(), 2478 } 2479 if ctx.User != "" { 2480 msginfo["User"] = ctx.User 2481 } 2482 jsonmsg, err := json.Marshal(msginfo) 2483 if err != nil { 2484 dvid.Errorf("error marshaling JSON for annotations %q element move: %v\n", d.DataName(), err) 2485 } else if err = d.PublishKafkaMsg(jsonmsg); err != nil { 2486 dvid.Errorf("error on sending move element op to kafka: %v\n", err) 2487 } 2488 } 2489 2490 batch = batcher.NewBatch(ctx) 2491 2492 // Move in label key 2493 if err := d.moveElementInLabels(ctx, batch, from, to, moved.ElementNR); err != nil { 2494 return err 2495 } 2496 2497 // Move element in any tags 2498 if err := d.moveElementInTags(ctx, batch, from, to, moved.Tags); err != nil { 2499 return err 2500 } 2501 2502 // Move any reference in relationships 2503 if err := d.moveElementInRelationships(ctx, batch, from, to, moved.Rels); err != nil { 2504 return err 2505 } 2506 2507 return batch.Commit() 2508 } 2509 2510 // GetByDataUUID returns a pointer to annotation data given a data UUID. 2511 func GetByDataUUID(dataUUID dvid.UUID) (*Data, error) { 2512 source, err := datastore.GetDataByDataUUID(dataUUID) 2513 if err != nil { 2514 return nil, err 2515 } 2516 data, ok := source.(*Data) 2517 if !ok { 2518 return nil, fmt.Errorf("instance %q is not an annotation datatype", source.DataName()) 2519 } 2520 return data, nil 2521 } 2522 2523 // GetByUUIDName returns a pointer to annotation data given a version (UUID) and data name. 2524 func GetByUUIDName(uuid dvid.UUID, name dvid.InstanceName) (*Data, error) { 2525 source, err := datastore.GetDataByUUIDName(uuid, name) 2526 if err != nil { 2527 return nil, err 2528 } 2529 data, ok := source.(*Data) 2530 if !ok { 2531 return nil, fmt.Errorf("instance %q is not an annotation datatype", name) 2532 } 2533 return data, nil 2534 } 2535 2536 // --- datastore.DataService interface --------- 2537 2538 func (d *Data) Help() string { 2539 return helpMessage 2540 } 2541 2542 func (d *Data) MarshalJSON() ([]byte, error) { 2543 return json.Marshal(struct { 2544 Base *datastore.Data 2545 Extended Properties 2546 }{ 2547 d.Data, 2548 d.Properties, 2549 }) 2550 } 2551 2552 func (d *Data) GobDecode(b []byte) error { 2553 buf := bytes.NewBuffer(b) 2554 dec := gob.NewDecoder(buf) 2555 if err := dec.Decode(&(d.Data)); err != nil { 2556 return err 2557 } 2558 if err := dec.Decode(&(d.Properties)); err != nil { 2559 return err 2560 } 2561 return nil 2562 } 2563 2564 func (d *Data) GobEncode() ([]byte, error) { 2565 var buf bytes.Buffer 2566 enc := gob.NewEncoder(&buf) 2567 if err := enc.Encode(d.Data); err != nil { 2568 return nil, err 2569 } 2570 if err := enc.Encode(d.Properties); err != nil { 2571 return nil, err 2572 } 2573 return buf.Bytes(), nil 2574 }