github.com/janelia-flyem/dvid@v1.0.0/datatype/annotation/sync.go (about)

     1  package annotation
     2  
     3  import (
     4  	"encoding/binary"
     5  	"encoding/json"
     6  	"fmt"
     7  	"sync"
     8  	"time"
     9  
    10  	"github.com/janelia-flyem/dvid/datastore"
    11  	"github.com/janelia-flyem/dvid/datatype/common/labels"
    12  	"github.com/janelia-flyem/dvid/datatype/imageblk"
    13  	"github.com/janelia-flyem/dvid/datatype/labelarray"
    14  	"github.com/janelia-flyem/dvid/datatype/labelmap"
    15  	"github.com/janelia-flyem/dvid/dvid"
    16  	"github.com/janelia-flyem/dvid/server"
    17  	"github.com/janelia-flyem/dvid/storage"
    18  )
    19  
    20  // ElementPos describes the label and kind of an annotation, useful for synchronizing
    21  // changes in data to other data types like labelsz.
    22  type ElementPos struct {
    23  	Label uint64
    24  	Kind  ElementType
    25  	Pos   dvid.Point3d
    26  }
    27  
    28  // DeltaModifyElements is a change in the elements assigned to a label.
    29  // Need positions of elements because subscribers may have ROI filtering.
    30  type DeltaModifyElements struct {
    31  	Add []ElementPos
    32  	Del []ElementPos
    33  }
    34  
    35  // DeltaSetElements is a replacement of elements assigned to a label.
    36  type DeltaSetElements struct {
    37  	Set []ElementPos
    38  }
    39  
    40  // Annotation number change event identifiers.
    41  const (
    42  	ModifyElementsEvent = "ANNOTATION_MOD_ELEMENTS"
    43  	SetElementsEvent    = "ANNOTATION_SET_ELEMENTS"
    44  )
    45  
    46  // Number of change messages we can buffer before blocking on sync channel.
    47  const syncBufferSize = 1000
    48  
    49  type LabelElements map[uint64]ElementsNR
    50  
    51  func (le LabelElements) add(label uint64, elem ElementNR) {
    52  	if label == 0 {
    53  		return
    54  	}
    55  	elems, found := le[label]
    56  	if found {
    57  		elems = append(elems, elem)
    58  		le[label] = elems
    59  	} else {
    60  		le[label] = ElementsNR{elem}
    61  	}
    62  }
    63  
    64  type LabelPoints map[uint64][]dvid.Point3d
    65  
    66  func (lp LabelPoints) add(label uint64, pt dvid.Point3d) {
    67  	if label == 0 {
    68  		return
    69  	}
    70  	pts, found := lp[label]
    71  	if found {
    72  		pts = append(pts, pt)
    73  		lp[label] = pts
    74  	} else {
    75  		lp[label] = []dvid.Point3d{pt}
    76  	}
    77  }
    78  
    79  // InitDataHandlers launches goroutines to handle each labelblk instance's syncs.
    80  func (d *Data) InitDataHandlers() error {
    81  	if d.syncCh != nil || d.syncDone != nil {
    82  		return nil
    83  	}
    84  	d.syncCh = make(chan datastore.SyncMessage, syncBufferSize)
    85  	d.syncDone = make(chan *sync.WaitGroup)
    86  
    87  	// Launch handlers of sync events.
    88  	dvid.Infof("Launching sync event handler for data %q...\n", d.DataName())
    89  	go d.processEvents()
    90  	return nil
    91  }
    92  
    93  // Shutdown terminates blocks until syncs are done then terminates background goroutines processing data.
    94  func (d *Data) Shutdown(wg *sync.WaitGroup) {
    95  	if d.syncDone != nil {
    96  		dwg := new(sync.WaitGroup)
    97  		dwg.Add(1)
    98  		d.syncDone <- dwg
    99  		dwg.Wait() // Block until we are done.
   100  	}
   101  	wg.Done()
   102  }
   103  
   104  // GetSyncSubs implements the datastore.Syncer interface.  Returns a list of subscriptions
   105  // to the sync data instance that will notify the receiver.
   106  func (d *Data) GetSyncSubs(synced dvid.Data) (subs datastore.SyncSubs, err error) {
   107  	if d.syncCh == nil {
   108  		if err = d.InitDataHandlers(); err != nil {
   109  			err = fmt.Errorf("unable to initialize handlers for data %q: %v", d.DataName(), err)
   110  			return
   111  		}
   112  	}
   113  
   114  	// Our syncing depends on the datatype we are syncing.
   115  	switch synced.TypeName() {
   116  	case "labelblk":
   117  		subs = datastore.SyncSubs{
   118  			{
   119  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.IngestBlockEvent},
   120  				Notify: d.DataUUID(),
   121  				Ch:     d.syncCh,
   122  			},
   123  			{
   124  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.MutateBlockEvent},
   125  				Notify: d.DataUUID(),
   126  				Ch:     d.syncCh,
   127  			},
   128  			{
   129  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.DeleteBlockEvent},
   130  				Notify: d.DataUUID(),
   131  				Ch:     d.syncCh,
   132  			},
   133  		}
   134  	case "labelvol":
   135  		subs = datastore.SyncSubs{
   136  			datastore.SyncSub{
   137  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.MergeBlockEvent},
   138  				Notify: d.DataUUID(),
   139  				Ch:     d.syncCh,
   140  			},
   141  			datastore.SyncSub{
   142  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.SplitLabelEvent},
   143  				Notify: d.DataUUID(),
   144  				Ch:     d.syncCh,
   145  			},
   146  		}
   147  	case "labelarray":
   148  		subs = datastore.SyncSubs{
   149  			{
   150  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.IngestBlockEvent},
   151  				Notify: d.DataUUID(),
   152  				Ch:     d.syncCh,
   153  			},
   154  			{
   155  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.MutateBlockEvent},
   156  				Notify: d.DataUUID(),
   157  				Ch:     d.syncCh,
   158  			},
   159  			{
   160  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.DeleteBlockEvent},
   161  				Notify: d.DataUUID(),
   162  				Ch:     d.syncCh,
   163  			},
   164  			datastore.SyncSub{
   165  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.MergeBlockEvent},
   166  				Notify: d.DataUUID(),
   167  				Ch:     d.syncCh,
   168  			},
   169  			datastore.SyncSub{
   170  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.SplitLabelEvent},
   171  				Notify: d.DataUUID(),
   172  				Ch:     d.syncCh,
   173  			},
   174  		}
   175  	case "labelmap":
   176  		subs = datastore.SyncSubs{
   177  			{
   178  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.IngestBlockEvent},
   179  				Notify: d.DataUUID(),
   180  				Ch:     d.syncCh,
   181  			},
   182  			{
   183  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.MutateBlockEvent},
   184  				Notify: d.DataUUID(),
   185  				Ch:     d.syncCh,
   186  			},
   187  			{
   188  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.DeleteBlockEvent},
   189  				Notify: d.DataUUID(),
   190  				Ch:     d.syncCh,
   191  			},
   192  			datastore.SyncSub{
   193  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.MergeBlockEvent},
   194  				Notify: d.DataUUID(),
   195  				Ch:     d.syncCh,
   196  			},
   197  			datastore.SyncSub{
   198  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.SplitLabelEvent},
   199  				Notify: d.DataUUID(),
   200  				Ch:     d.syncCh,
   201  			},
   202  			// --- supervoxel split does not change labels of a point, so ignored
   203  			// datastore.SyncSub{
   204  			// 	Event:  datastore.SyncEvent{synced.DataUUID(), labels.SupervoxelSplitEvent},
   205  			// 	Notify: d.DataUUID(),
   206  			// 	Ch:     d.syncCh,
   207  			// },
   208  			datastore.SyncSub{
   209  				Event:  datastore.SyncEvent{synced.DataUUID(), labels.CleaveLabelEvent},
   210  				Notify: d.DataUUID(),
   211  				Ch:     d.syncCh,
   212  			},
   213  		}
   214  	default:
   215  		err = fmt.Errorf("unable to sync %s with %s since datatype %q is not supported", d.DataName(), synced.DataName(), synced.TypeName())
   216  	}
   217  	return
   218  }
   219  
   220  // Processes each labelblk change as we get it.
   221  func (d *Data) processEvents() {
   222  	defer func() {
   223  		if e := recover(); e != nil {
   224  			msg := fmt.Sprintf("Panic detected on annotation sync thread: %+v\n", e)
   225  			dvid.ReportPanic(msg, server.WebServer())
   226  		}
   227  	}()
   228  	batcher, err := datastore.GetKeyValueBatcher(d)
   229  	if err != nil {
   230  		dvid.Errorf("handleBlockEvent %v\n", err)
   231  		return
   232  	}
   233  	var stop bool
   234  	var wg *sync.WaitGroup
   235  	for {
   236  		select {
   237  		case wg = <-d.syncDone:
   238  			queued := len(d.syncCh)
   239  			if queued > 0 {
   240  				dvid.Infof("Received shutdown signal for %q sync events (%d in queue)\n", d.DataName(), queued)
   241  				stop = true
   242  			} else {
   243  				dvid.Infof("Shutting down sync event handler for instance %q...\n", d.DataName())
   244  				wg.Done()
   245  				return
   246  			}
   247  		case msg := <-d.syncCh:
   248  			ctx := datastore.NewVersionedCtx(d, msg.Version)
   249  			d.handleSyncMessage(ctx, msg, batcher)
   250  
   251  			if stop && len(d.syncCh) == 0 {
   252  				dvid.Infof("Shutting down sync even handler for instance %q after draining sync events.\n", d.DataName())
   253  				wg.Done()
   254  				return
   255  			}
   256  		}
   257  	}
   258  }
   259  
   260  func (d *Data) handleSyncMessage(ctx *datastore.VersionedCtx, msg datastore.SyncMessage, batcher storage.KeyValueBatcher) {
   261  	d.StartUpdate()
   262  	defer d.StopUpdate()
   263  
   264  	t0 := time.Now()
   265  	mutation := fmt.Sprintf("sync of data %s: event %s", d.DataName(), msg.Event)
   266  	var diagnostic string
   267  	var mutID uint64
   268  	successful := true
   269  
   270  	switch delta := msg.Delta.(type) {
   271  
   272  	case imageblk.Block:
   273  		chunkPt := dvid.ChunkPoint3d(*delta.Index)
   274  		d.ingestBlock(ctx, chunkPt, delta.Data, batcher)
   275  		mutID = delta.MutID
   276  
   277  	case imageblk.MutatedBlock:
   278  		chunkPt := dvid.ChunkPoint3d(*delta.Index)
   279  		d.mutateBlock(ctx, delta.MutID, chunkPt, delta.Prev, delta.Data, batcher)
   280  		mutID = delta.MutID
   281  
   282  	case labelarray.IngestedBlock:
   283  		chunkPt, _ := delta.BCoord.ToChunkPoint3d()
   284  		data, _ := delta.Data.MakeLabelVolume()
   285  		d.ingestBlock(ctx, chunkPt, data, batcher)
   286  		mutID = delta.MutID
   287  
   288  	case labelmap.IngestedBlock:
   289  		chunkPt, _ := delta.BCoord.ToChunkPoint3d()
   290  		data, _ := delta.Data.MakeLabelVolume()
   291  		d.ingestBlock(ctx, chunkPt, data, batcher)
   292  		mutID = delta.MutID
   293  
   294  	case labelarray.MutatedBlock:
   295  		chunkPt, _ := delta.BCoord.ToChunkPoint3d()
   296  		prev, _ := delta.Prev.MakeLabelVolume()
   297  		data, _ := delta.Data.MakeLabelVolume()
   298  		d.mutateBlock(ctx, delta.MutID, chunkPt, prev, data, batcher)
   299  		mutID = delta.MutID
   300  
   301  	case labelmap.MutatedBlock:
   302  		chunkPt, _ := delta.BCoord.ToChunkPoint3d()
   303  		prev, _ := delta.Prev.MakeLabelVolume()
   304  		data, _ := delta.Data.MakeLabelVolume()
   305  		d.mutateBlock(ctx, delta.MutID, chunkPt, prev, data, batcher)
   306  		mutID = delta.MutID
   307  
   308  	case labels.DeltaMergeStart:
   309  		// ignore
   310  	case labels.DeltaMerge:
   311  		// process annotation type
   312  		err := d.mergeLabels(batcher, msg.Version, delta.MergeOp)
   313  		if err != nil {
   314  			diagnostic = fmt.Sprintf("error on merging labels for data %s: %v", d.DataName(), err)
   315  			successful = false
   316  		}
   317  
   318  	case labels.DeltaSplitStart:
   319  		// ignore for now
   320  	case labels.DeltaSplit:
   321  		if delta.Split == nil {
   322  			// This is a coarse split so can't be mapped data.
   323  			err := d.splitLabelsCoarse(batcher, msg.Version, delta)
   324  			if err != nil {
   325  				diagnostic = fmt.Sprintf("error on splitting labels for data %s: %v", d.DataName(), err)
   326  				successful = false
   327  			}
   328  		} else {
   329  			err := d.splitLabels(batcher, msg.Version, delta)
   330  			if err != nil {
   331  				diagnostic = fmt.Sprintf("error on splitting labels for data %s: %v", d.DataName(), err)
   332  				successful = false
   333  			}
   334  		}
   335  
   336  	case labels.CleaveOp:
   337  		err := d.cleaveLabels(batcher, msg.Version, delta)
   338  		if err != nil {
   339  			diagnostic = fmt.Sprintf("error on cleaving label for data %s: %v", d.DataName(), err)
   340  			successful = false
   341  		}
   342  		mutID = delta.MutID
   343  
   344  	default:
   345  		diagnostic = fmt.Sprintf("critical error - unexpected delta: %v\n", msg)
   346  		successful = false
   347  	}
   348  
   349  	if server.KafkaAvailable() {
   350  		t := time.Since(t0)
   351  		activity := map[string]interface{}{
   352  			"time":       t0.Unix(),
   353  			"duration":   t.Seconds() * 1000.0,
   354  			"mutation":   mutation,
   355  			"successful": successful,
   356  		}
   357  		if diagnostic != "" {
   358  			activity["diagnostic"] = diagnostic
   359  		}
   360  		if mutID != 0 {
   361  			activity["mutation_id"] = mutID
   362  		}
   363  		storage.LogActivityToKafka(activity)
   364  	}
   365  }
   366  
   367  // If a block of labels is ingested, adjust each label's synaptic element list.
   368  func (d *Data) ingestBlock(ctx *datastore.VersionedCtx, chunkPt dvid.ChunkPoint3d, data []byte, batcher storage.KeyValueBatcher) {
   369  	blockSize := d.blockSize()
   370  	expectedDataBytes := blockSize.Prod() * 8
   371  	if int64(len(data)) != expectedDataBytes {
   372  		dvid.Errorf("ingested block %s during sync of annotation %q is not appropriate block size %s (block data bytes = %d)... skipping\n", chunkPt, d.DataName(), blockSize, len(data))
   373  		return
   374  	}
   375  
   376  	// Get the synaptic elements for this block
   377  	tk := NewBlockTKey(chunkPt)
   378  	elems, err := getElements(ctx, tk)
   379  	if err != nil {
   380  		dvid.Errorf("err getting elements for block %s: %v\n", chunkPt, err)
   381  		return
   382  	}
   383  	if len(elems) == 0 {
   384  		return
   385  	}
   386  	batch := batcher.NewBatch(ctx)
   387  
   388  	// Iterate through all element positions, finding corresponding label and storing elements.
   389  	added := 0
   390  	toAdd := LabelElements{}
   391  	for n := range elems {
   392  		pt := elems[n].Pos.Point3dInChunk(blockSize)
   393  		i := (pt[2]*blockSize[1]+pt[1])*blockSize[0]*8 + pt[0]*8
   394  		label := binary.LittleEndian.Uint64(data[i : i+8])
   395  		if label != 0 {
   396  			toAdd.add(label, elems[n].ElementNR)
   397  			added++
   398  		}
   399  	}
   400  
   401  	// Add any non-zero label elements to their respective label k/v.
   402  	var delta DeltaModifyElements
   403  	delta.Add = make([]ElementPos, added)
   404  	i := 0
   405  	for label, addElems := range toAdd {
   406  		tk := NewLabelTKey(label)
   407  		labelElems, err := getElementsNR(ctx, tk)
   408  		if err != nil {
   409  			dvid.Errorf("err getting elements for label %d: %v\n", label, err)
   410  			return
   411  		}
   412  		labelElems.add(addElems)
   413  		val, err := json.Marshal(labelElems)
   414  		if err != nil {
   415  			dvid.Errorf("couldn't serialize annotation elements in instance %q: %v\n", d.DataName(), err)
   416  			return
   417  		}
   418  		batch.Put(tk, val)
   419  
   420  		for _, addElem := range addElems {
   421  			delta.Add[i] = ElementPos{Label: label, Kind: addElem.Kind, Pos: addElem.Pos}
   422  			i++
   423  		}
   424  	}
   425  
   426  	if err := batch.Commit(); err != nil {
   427  		dvid.Criticalf("bad commit in annotations %q after delete block: %v\n", d.DataName(), err)
   428  		return
   429  	}
   430  
   431  	// Notify any subscribers of label annotation changes.
   432  	evt := datastore.SyncEvent{Data: d.DataUUID(), Event: ModifyElementsEvent}
   433  	msg := datastore.SyncMessage{Event: ModifyElementsEvent, Version: ctx.VersionID(), Delta: delta}
   434  	if err := datastore.NotifySubscribers(evt, msg); err != nil {
   435  		dvid.Criticalf("unable to notify subscribers of event %s: %v\n", evt, err)
   436  	}
   437  
   438  	// send kafka merge event to instance-uuid topic
   439  	versionuuid, _ := datastore.UUIDFromVersion(ctx.VersionID())
   440  	msginfo := map[string]interface{}{
   441  		"Action": "ingest-block",
   442  		// "MutationID": op.MutID,
   443  		"UUID":      string(versionuuid),
   444  		"Timestamp": time.Now().String(),
   445  		"Delta":     delta,
   446  	}
   447  	jsonmsg, _ := json.Marshal(msginfo)
   448  	if err := d.PublishKafkaMsg(jsonmsg); err != nil {
   449  		dvid.Errorf("unable to write ingest-block to kafka for data %q: %v\n", d.DataName(), err)
   450  	}
   451  }
   452  
   453  // If a block of labels is mutated, adjust any label that was either removed or added.
   454  func (d *Data) mutateBlock(ctx *datastore.VersionedCtx, mutID uint64, chunkPt dvid.ChunkPoint3d, prev, data []byte, batcher storage.KeyValueBatcher) {
   455  	// Get the synaptic elements for this block
   456  	tk := NewBlockTKey(chunkPt)
   457  	elems, err := getElements(ctx, tk)
   458  	if err != nil {
   459  		dvid.Errorf("err getting elements for block %s: %v\n", chunkPt, err)
   460  		return
   461  	}
   462  	if len(elems) == 0 {
   463  		return
   464  	}
   465  	blockSize := d.blockSize()
   466  	batch := batcher.NewBatch(ctx)
   467  
   468  	// Compute the strides (in bytes)
   469  	bX := blockSize[0] * 8
   470  	bY := blockSize[1] * bX
   471  
   472  	// Iterate through all element positions, finding corresponding label and storing elements.
   473  	var delta DeltaModifyElements
   474  	labels := make(map[uint64]struct{})
   475  	toAdd := LabelElements{}
   476  	toDel := LabelPoints{}
   477  	for n := range elems {
   478  		pt := elems[n].Pos.Point3dInChunk(blockSize)
   479  		i := pt[2]*bY + pt[1]*bX + pt[0]*8
   480  		label := binary.LittleEndian.Uint64(data[i : i+8])
   481  		var old uint64
   482  		if len(prev) != 0 {
   483  			old = binary.LittleEndian.Uint64(prev[i : i+8])
   484  		}
   485  		if label != 0 {
   486  			toAdd.add(label, elems[n].ElementNR)
   487  			labels[label] = struct{}{}
   488  			delta.Add = append(delta.Add, ElementPos{Label: label, Kind: elems[n].Kind, Pos: elems[n].Pos})
   489  		}
   490  		if old != 0 {
   491  			toDel.add(old, elems[n].Pos)
   492  			labels[old] = struct{}{}
   493  			delta.Del = append(delta.Del, ElementPos{Label: old, Kind: elems[n].Kind, Pos: elems[n].Pos})
   494  		}
   495  	}
   496  
   497  	// Modify any modified label k/v.
   498  	for label := range labels {
   499  		tk := NewLabelTKey(label)
   500  		labelElems, err := getElementsNR(ctx, tk)
   501  		if err != nil {
   502  			dvid.Errorf("err getting elements for label %d: %v\n", label, err)
   503  			return
   504  		}
   505  		deletions, found := toDel[label]
   506  		if found {
   507  			for _, pt := range deletions {
   508  				labelElems.delete(pt)
   509  			}
   510  		}
   511  		additions, found := toAdd[label]
   512  		if found {
   513  			labelElems.add(additions)
   514  		}
   515  		val, err := json.Marshal(labelElems)
   516  		if err != nil {
   517  			dvid.Errorf("couldn't serialize annotation elements in instance %q: %v\n", d.DataName(), err)
   518  			return
   519  		}
   520  		batch.Put(tk, val)
   521  	}
   522  	if err := batch.Commit(); err != nil {
   523  		dvid.Criticalf("bad commit in annotations %q after delete block: %v\n", d.DataName(), err)
   524  		return
   525  	}
   526  
   527  	// Notify any subscribers of label annotation changes.
   528  	evt := datastore.SyncEvent{Data: d.DataUUID(), Event: ModifyElementsEvent}
   529  	msg := datastore.SyncMessage{Event: ModifyElementsEvent, Version: ctx.VersionID(), Delta: delta}
   530  	if err := datastore.NotifySubscribers(evt, msg); err != nil {
   531  		dvid.Criticalf("unable to notify subscribers of event %s: %v\n", evt, err)
   532  	}
   533  
   534  	// send kafka merge event to instance-uuid topic
   535  	versionuuid, _ := datastore.UUIDFromVersion(ctx.VersionID())
   536  	msginfo := map[string]interface{}{
   537  		"Action":     "mutate-block",
   538  		"MutationID": mutID,
   539  		"UUID":       string(versionuuid),
   540  		"Timestamp":  time.Now().String(),
   541  		"Delta":      delta,
   542  	}
   543  	jsonmsg, _ := json.Marshal(msginfo)
   544  	if err := d.PublishKafkaMsg(jsonmsg); err != nil {
   545  		dvid.Errorf("unable to write mutate-block to kafka for data %q: %v\n", d.DataName(), err)
   546  	}
   547  }
   548  
   549  func (d *Data) mergeLabels(batcher storage.KeyValueBatcher, v dvid.VersionID, op labels.MergeOp) error {
   550  	d.StartUpdate()
   551  	defer d.StopUpdate()
   552  
   553  	ctx := datastore.NewVersionedCtx(d, v)
   554  	batch := batcher.NewBatch(ctx)
   555  
   556  	// Get the target label
   557  	targetTk := NewLabelTKey(op.Target)
   558  	targetElems, err := getElementsNR(ctx, targetTk)
   559  	if err != nil {
   560  		return fmt.Errorf("get annotations for instance %q, target %d, in syncMerge: %v", d.DataName(), op.Target, err)
   561  	}
   562  
   563  	// Iterate through each merged label, read old elements, delete that k/v, then add it to the current target elements.
   564  	var delta DeltaModifyElements
   565  	elemsAdded := 0
   566  	for label := range op.Merged {
   567  		tk := NewLabelTKey(label)
   568  		elems, err := getElementsNR(ctx, tk)
   569  		if err != nil {
   570  			return fmt.Errorf("unable to get annotation elements for instance %q, label %d in syncMerge: %v", d.DataName(), label, err)
   571  		}
   572  		if elems == nil || len(elems) == 0 {
   573  			continue
   574  		}
   575  		batch.Delete(tk)
   576  		elemsAdded += len(elems)
   577  		targetElems = append(targetElems, elems...)
   578  
   579  		// for labelsz.  TODO, only do this computation if really subscribed.
   580  		for _, elem := range elems {
   581  			delta.Add = append(delta.Add, ElementPos{Label: op.Target, Kind: elem.Kind, Pos: elem.Pos})
   582  			delta.Del = append(delta.Del, ElementPos{Label: label, Kind: elem.Kind, Pos: elem.Pos})
   583  		}
   584  	}
   585  	if elemsAdded > 0 {
   586  		val, err := json.Marshal(targetElems)
   587  		if err != nil {
   588  			return fmt.Errorf("couldn't serialize annotation elements in instance %q: %v", d.DataName(), err)
   589  		}
   590  		batch.Put(targetTk, val)
   591  		if err := batch.Commit(); err != nil {
   592  			return fmt.Errorf("unable to commit merge for instance %q: %v", d.DataName(), err)
   593  		}
   594  
   595  		// send kafka merge event to instance-uuid topic
   596  		versionuuid, _ := datastore.UUIDFromVersion(v)
   597  		msginfo := map[string]interface{}{
   598  			"Action":     "merge",
   599  			"MutationID": op.MutID,
   600  			"UUID":       string(versionuuid),
   601  			"Timestamp":  time.Now().String(),
   602  			"Delta":      delta,
   603  		}
   604  		jsonBytes, _ := json.Marshal(msginfo)
   605  		if len(jsonBytes) > storage.KafkaMaxMessageSize {
   606  			var postRef string
   607  			if postRef, err = d.PutBlob(jsonBytes); err != nil {
   608  				dvid.Errorf("couldn't post large payload for merge annotations %q: %v", d.DataName(), err)
   609  			}
   610  			delete(msginfo, "Delta")
   611  			msginfo["DataRef"] = postRef
   612  			jsonBytes, _ = json.Marshal(msginfo)
   613  		}
   614  		if err := d.PublishKafkaMsg(jsonBytes); err != nil {
   615  			dvid.Errorf("unable to write merge to kafka for data %q: %v\n", d.DataName(), err)
   616  		}
   617  	}
   618  
   619  	// Notify any subscribers of label annotation changes.
   620  	evt := datastore.SyncEvent{Data: d.DataUUID(), Event: ModifyElementsEvent}
   621  	msg := datastore.SyncMessage{Event: ModifyElementsEvent, Version: ctx.VersionID(), Delta: delta}
   622  	if err := datastore.NotifySubscribers(evt, msg); err != nil {
   623  		dvid.Criticalf("unable to notify subscribers of event %s: %v\n", evt, err)
   624  	}
   625  	return nil
   626  }
   627  
   628  func (d *Data) cleaveLabels(batcher storage.KeyValueBatcher, v dvid.VersionID, op labels.CleaveOp) error {
   629  	// d.Lock()
   630  	// defer d.Unlock()
   631  	dvid.Infof("Starting cleave sync on instance %q to target %d with resulting label %d using %d cleaved svs\n", d.DataName(), op.Target, op.CleavedLabel, len(op.CleavedSupervoxels))
   632  	timedLog := dvid.NewTimeLog()
   633  
   634  	labelData := d.getSyncedLabels()
   635  	if labelData == nil {
   636  		return fmt.Errorf("no synced labels for annotation %q, skipping label-aware denormalization", d.DataName())
   637  	}
   638  
   639  	d.StartUpdate()
   640  	defer d.StopUpdate()
   641  
   642  	ctx := datastore.NewVersionedCtx(d, v)
   643  	targetTk := NewLabelTKey(op.Target)
   644  	targetElems, err := getElementsNR(ctx, targetTk)
   645  	if err != nil {
   646  		return err
   647  	}
   648  	if len(targetElems) == 0 {
   649  		return nil
   650  	}
   651  
   652  	supervoxelData, ok := labelData.(supervoxelType)
   653  	if !ok {
   654  		return fmt.Errorf("annotation instance %q is synced with label data %q that doesn't support supervoxels yet had cleave", d.DataName(), labelData.DataName())
   655  	}
   656  
   657  	var delta DeltaModifyElements
   658  	labelElems := LabelElements{}
   659  	pts := make([]dvid.Point3d, len(targetElems))
   660  	for i, elem := range targetElems {
   661  		pts[i] = elem.Pos
   662  	}
   663  	inCleaved, err := supervoxelData.GetPointsInSupervoxels(v, pts, op.CleavedSupervoxels)
   664  	if err != nil {
   665  		return err
   666  	}
   667  	for i, cleaved := range inCleaved {
   668  		elem := targetElems[i]
   669  		if cleaved {
   670  			labelElems.add(op.CleavedLabel, elem)
   671  			delta.Del = append(delta.Del, ElementPos{Label: op.Target, Kind: elem.Kind, Pos: elem.Pos})
   672  			delta.Add = append(delta.Add, ElementPos{Label: op.CleavedLabel, Kind: elem.Kind, Pos: elem.Pos})
   673  		} else {
   674  			labelElems.add(op.Target, elem)
   675  		}
   676  	}
   677  
   678  	// Write the new label-indexed denormalizations
   679  	batch := batcher.NewBatch(ctx)
   680  	for label, elems := range labelElems {
   681  		labelTKey := NewLabelTKey(label)
   682  		val, err := json.Marshal(elems)
   683  		if err != nil {
   684  			return fmt.Errorf("couldn't serialize annotation elements for label %d in instance %q: %v", label, d.DataName(), err)
   685  		}
   686  		batch.Put(labelTKey, val)
   687  	}
   688  
   689  	// Handle case of a completely removed label
   690  	if _, found := labelElems[op.Target]; !found {
   691  		batch.Delete(NewLabelTKey(op.Target))
   692  	}
   693  
   694  	if err := batch.Commit(); err != nil {
   695  		return fmt.Errorf("bad commit in annotations %q after split: %v", d.DataName(), err)
   696  	}
   697  
   698  	// Notify any subscribers of label annotation changes.
   699  	if len(delta.Add) != 0 || len(delta.Del) != 0 {
   700  		evt := datastore.SyncEvent{Data: d.DataUUID(), Event: ModifyElementsEvent}
   701  		msg := datastore.SyncMessage{Event: ModifyElementsEvent, Version: ctx.VersionID(), Delta: delta}
   702  		if err := datastore.NotifySubscribers(evt, msg); err != nil {
   703  			dvid.Criticalf("unable to notify subscribers of event %s: %v\n", evt, err)
   704  		}
   705  
   706  		// send kafka cleave event to instance-uuid topic
   707  		versionuuid, _ := datastore.UUIDFromVersion(v)
   708  		msginfo := map[string]interface{}{
   709  			"Action":     "cleave",
   710  			"MutationID": op.MutID,
   711  			"UUID":       string(versionuuid),
   712  			"Timestamp":  time.Now().String(),
   713  			"Delta":      delta,
   714  		}
   715  		jsonBytes, _ := json.Marshal(msginfo)
   716  		if len(jsonBytes) > storage.KafkaMaxMessageSize {
   717  			var postRef string
   718  			if postRef, err = d.PutBlob(jsonBytes); err != nil {
   719  				dvid.Errorf("couldn't post large payload for cleave annotations %q: %v", d.DataName(), err)
   720  			}
   721  			delete(msginfo, "Delta")
   722  			msginfo["DataRef"] = postRef
   723  			jsonBytes, _ = json.Marshal(msginfo)
   724  		}
   725  		if err := d.PublishKafkaMsg(jsonBytes); err != nil {
   726  			dvid.Errorf("unable to write cleave to kafka for data %q: %v\n", d.DataName(), err)
   727  		}
   728  	}
   729  	timedLog.Infof("Finished cleave sync to annotation %q: mutation id %d", d.DataName(), op.MutID)
   730  	return nil
   731  }
   732  
   733  func (d *Data) splitLabelsCoarse(batcher storage.KeyValueBatcher, v dvid.VersionID, op labels.DeltaSplit) error {
   734  	// d.Lock()
   735  	// defer d.Unlock()
   736  
   737  	d.StartUpdate()
   738  	defer d.StopUpdate()
   739  
   740  	ctx := datastore.NewVersionedCtx(d, v)
   741  	batch := batcher.NewBatch(ctx)
   742  
   743  	// Get the elements for the old label.
   744  	oldTk := NewLabelTKey(op.OldLabel)
   745  	oldElems, err := getElementsNR(ctx, oldTk)
   746  	if err != nil {
   747  		return fmt.Errorf("unable to get annotations for instance %q, label %d in syncSplit: %v", d.DataName(), op.OldLabel, err)
   748  	}
   749  
   750  	// Create a map to test each point.
   751  	splitBlocks := make(map[dvid.IZYXString]struct{})
   752  	for _, zyxStr := range op.SortedBlocks {
   753  		splitBlocks[zyxStr] = struct{}{}
   754  	}
   755  
   756  	// Move any elements that are within the split blocks.
   757  	var delta DeltaModifyElements
   758  	toDel := make(map[int]struct{})
   759  	toAdd := ElementsNR{}
   760  	blockSize := d.blockSize()
   761  	for i, elem := range oldElems {
   762  		zyxStr := elem.Pos.ToBlockIZYXString(blockSize)
   763  		if _, found := splitBlocks[zyxStr]; found {
   764  			toDel[i] = struct{}{}
   765  			toAdd = append(toAdd, elem)
   766  
   767  			// for downstream annotation syncs like labelsz.  TODO: only perform if subscribed.  Better: do ROI filtering here.
   768  			delta.Del = append(delta.Del, ElementPos{Label: op.OldLabel, Kind: elem.Kind, Pos: elem.Pos})
   769  			delta.Add = append(delta.Add, ElementPos{Label: op.NewLabel, Kind: elem.Kind, Pos: elem.Pos})
   770  		}
   771  	}
   772  	if len(toDel) == 0 {
   773  		return nil
   774  	}
   775  
   776  	// Store split elements into new label elements.
   777  	newTk := NewLabelTKey(op.NewLabel)
   778  	newElems, err := getElementsNR(ctx, newTk)
   779  	if err != nil {
   780  		return fmt.Errorf("unable to get annotations for instance %q, label %d in syncSplit: %v", d.DataName(), op.NewLabel, err)
   781  	}
   782  	newElems.add(toAdd)
   783  	val, err := json.Marshal(newElems)
   784  	if err != nil {
   785  		return fmt.Errorf("couldn't serialize annotation elements in instance %q: %v", d.DataName(), err)
   786  	}
   787  	batch.Put(newTk, val)
   788  
   789  	// Delete any split from old label elements without removing the relationships.
   790  	// This filters without allocating, using fact that a slice shares the same backing array and
   791  	// capacity as the original, so storage is reused.
   792  	filtered := oldElems[:0]
   793  	for i, elem := range oldElems {
   794  		if _, found := toDel[i]; !found {
   795  			filtered = append(filtered, elem)
   796  		}
   797  	}
   798  
   799  	// Delete or store k/v depending on what remains.
   800  	if len(filtered) == 0 {
   801  		batch.Delete(oldTk)
   802  	} else {
   803  		val, err := json.Marshal(filtered)
   804  		if err != nil {
   805  			return fmt.Errorf("couldn't serialize annotation elements in instance %q: %v", d.DataName(), err)
   806  		}
   807  		batch.Put(oldTk, val)
   808  	}
   809  
   810  	if err := batch.Commit(); err != nil {
   811  		return fmt.Errorf("bad commit in annotations %q after split: %v", d.DataName(), err)
   812  	}
   813  
   814  	// Notify any subscribers of label annotation changes.
   815  	evt := datastore.SyncEvent{Data: d.DataUUID(), Event: ModifyElementsEvent}
   816  	msg := datastore.SyncMessage{Event: ModifyElementsEvent, Version: ctx.VersionID(), Delta: delta}
   817  	if err := datastore.NotifySubscribers(evt, msg); err != nil {
   818  		dvid.Criticalf("unable to notify subscribers of event %s: %v\n", evt, err)
   819  	}
   820  
   821  	// send kafka coarse split event to instance-uuid topic
   822  	versionuuid, _ := datastore.UUIDFromVersion(v)
   823  	msginfo := map[string]interface{}{
   824  		"Action":     "split-coarse",
   825  		"MutationID": op.MutID,
   826  		"UUID":       string(versionuuid),
   827  		"Timestamp":  time.Now().String(),
   828  		"Delta":      delta,
   829  	}
   830  	jsonBytes, _ := json.Marshal(msginfo)
   831  	if len(jsonBytes) > storage.KafkaMaxMessageSize {
   832  		var postRef string
   833  		if postRef, err = d.PutBlob(jsonBytes); err != nil {
   834  			dvid.Errorf("couldn't post large payload for coarse split annotations %q: %v", d.DataName(), err)
   835  		}
   836  		delete(msginfo, "Delta")
   837  		msginfo["DataRef"] = postRef
   838  		jsonBytes, _ = json.Marshal(msginfo)
   839  	}
   840  	if err := d.PublishKafkaMsg(jsonBytes); err != nil {
   841  		dvid.Errorf("unable to write coarse split to kafka for data %q: %v\n", d.DataName(), err)
   842  	}
   843  	return nil
   844  }
   845  
   846  func (d *Data) splitLabels(batcher storage.KeyValueBatcher, v dvid.VersionID, op labels.DeltaSplit) error {
   847  	// d.Lock()
   848  	// defer d.Unlock()
   849  
   850  	d.StartUpdate()
   851  	defer d.StopUpdate()
   852  
   853  	ctx := datastore.NewVersionedCtx(d, v)
   854  	batch := batcher.NewBatch(ctx)
   855  
   856  	var delta DeltaModifyElements
   857  	toAdd := ElementsNR{}
   858  	toDel := make(map[string]struct{})
   859  
   860  	// Iterate through each split block, get the elements, and then modify the previous and new label k/v.
   861  	for izyx, rles := range op.Split {
   862  		// Get the elements for this block.
   863  		blockPt, err := izyx.ToChunkPoint3d()
   864  		if err != nil {
   865  			return err
   866  		}
   867  		tk := NewBlockTKey(blockPt)
   868  		elems, err := getElements(ctx, tk)
   869  		if err != nil {
   870  			dvid.Errorf("getting annotations for block %s on split of %d from %d: %v\n", blockPt, op.NewLabel, op.OldLabel, err)
   871  			continue
   872  		}
   873  
   874  		// For any element within the split RLEs, add to the delete and addition lists.
   875  		for n, elem := range elems {
   876  			for _, rle := range rles {
   877  				if rle.Within(elem.Pos) {
   878  					toAdd = append(toAdd, elems[n].ElementNR)
   879  					toDel[elem.Pos.String()] = struct{}{}
   880  
   881  					// for downstream annotation syncs like labelsz.  TODO: only perform if subscribed.  Better: do ROI filtering here.
   882  					delta.Del = append(delta.Del, ElementPos{Label: op.OldLabel, Kind: elem.Kind, Pos: elem.Pos})
   883  					delta.Add = append(delta.Add, ElementPos{Label: op.NewLabel, Kind: elem.Kind, Pos: elem.Pos})
   884  					break
   885  				}
   886  			}
   887  		}
   888  	}
   889  
   890  	// Modify the old label k/v
   891  	if len(toDel) != 0 {
   892  		tk := NewLabelTKey(op.OldLabel)
   893  		elems, err := getElementsNR(ctx, tk)
   894  		if err != nil {
   895  			dvid.Errorf("unable to get annotations for instance %q, old label %d in syncSplit: %v\n", d.DataName(), op.OldLabel, err)
   896  		} else {
   897  			filtered := elems[:0]
   898  			for _, elem := range elems {
   899  				if _, found := toDel[elem.Pos.String()]; !found {
   900  					filtered = append(filtered, elem)
   901  				}
   902  			}
   903  			if len(filtered) == 0 {
   904  				batch.Delete(tk)
   905  			} else {
   906  				val, err := json.Marshal(filtered)
   907  				if err != nil {
   908  					dvid.Errorf("couldn't serialize annotation elements in instance %q: %v\n", d.DataName(), err)
   909  				} else {
   910  					batch.Put(tk, val)
   911  				}
   912  			}
   913  		}
   914  	}
   915  
   916  	// Modify the new label k/v
   917  	if len(toAdd) != 0 {
   918  		tk := NewLabelTKey(op.NewLabel)
   919  		elems, err := getElementsNR(ctx, tk)
   920  		if err != nil {
   921  			dvid.Errorf("unable to get annotations for instance %q, label %d in syncSplit: %v\n", d.DataName(), op.NewLabel, err)
   922  		} else {
   923  			elems.add(toAdd)
   924  			val, err := json.Marshal(elems)
   925  			if err != nil {
   926  				dvid.Errorf("couldn't serialize annotation elements in instance %q: %v\n", d.DataName(), err)
   927  			} else {
   928  				batch.Put(tk, val)
   929  			}
   930  		}
   931  	}
   932  
   933  	if err := batch.Commit(); err != nil {
   934  		return fmt.Errorf("bad commit in annotations %q after split: %v", d.DataName(), err)
   935  	}
   936  
   937  	// Notify any subscribers of label annotation changes.
   938  	evt := datastore.SyncEvent{Data: d.DataUUID(), Event: ModifyElementsEvent}
   939  	msg := datastore.SyncMessage{Event: ModifyElementsEvent, Version: ctx.VersionID(), Delta: delta}
   940  	if err := datastore.NotifySubscribers(evt, msg); err != nil {
   941  		dvid.Criticalf("unable to notify subscribers of event %s: %v\n", evt, err)
   942  	}
   943  
   944  	// send kafka merge event to instance-uuid topic
   945  	versionuuid, _ := datastore.UUIDFromVersion(v)
   946  	msginfo := map[string]interface{}{
   947  		"Action":     "split",
   948  		"MutationID": op.MutID,
   949  		"UUID":       string(versionuuid),
   950  		"Timestamp":  time.Now().String(),
   951  		"Delta":      delta,
   952  	}
   953  	jsonBytes, _ := json.Marshal(msginfo)
   954  	if len(jsonBytes) > storage.KafkaMaxMessageSize {
   955  		var err error
   956  		var postRef string
   957  		if postRef, err = d.PutBlob(jsonBytes); err != nil {
   958  			dvid.Errorf("couldn't post large payload for aplit annotations %q: %v", d.DataName(), err)
   959  		}
   960  		delete(msginfo, "Delta")
   961  		msginfo["DataRef"] = postRef
   962  		jsonBytes, _ = json.Marshal(msginfo)
   963  	}
   964  	if err := d.PublishKafkaMsg(jsonBytes); err != nil {
   965  		dvid.Errorf("unable to write split to kafka for data %q: %v\n", d.DataName(), err)
   966  	}
   967  	return nil
   968  }