github.com/janelia-flyem/dvid@v1.0.0/datatype/labelarray/mutate.go (about)

     1  /*
     2  	This file contains code that manages labelblk mutations at a low-level, using sharding
     3  	to specific goroutines depending on the block coordinate being mutated.
     4  	TODO: Move ingest/mutate/delete block ops in write.go into the same system.  Currently,
     5  	we assume that merge/split ops in a version do not overlap the raw block label mutations.
     6  */
     7  
     8  package labelarray
     9  
    10  import (
    11  	"encoding/json"
    12  	"fmt"
    13  	"io"
    14  	"sort"
    15  	"time"
    16  
    17  	"github.com/janelia-flyem/dvid/datastore"
    18  	"github.com/janelia-flyem/dvid/datatype/common/downres"
    19  	"github.com/janelia-flyem/dvid/datatype/common/labels"
    20  	"github.com/janelia-flyem/dvid/dvid"
    21  	"github.com/janelia-flyem/dvid/server"
    22  )
    23  
    24  type sizeChange struct {
    25  	oldSize, newSize uint64
    26  }
    27  
    28  // MergeLabels handles merging of any number of labels throughout the various label data
    29  // structures.  It assumes that the merges aren't cascading, e.g., there is no attempt
    30  // to merge label 3 into 4 and also 4 into 5.  The caller should have flattened the merges.
    31  // TODO: Provide some indication that subset of labels are under evolution, returning
    32  //   an "unavailable" status or 203 for non-authoritative response.  This might not be
    33  //   feasible for clustered DVID front-ends due to coordination issues.
    34  //
    35  // EVENTS
    36  //
    37  // labels.MergeStartEvent occurs at very start of merge and transmits labels.DeltaMergeStart struct.
    38  //
    39  // labels.MergeBlockEvent occurs for every block of a merged label and transmits labels.DeltaMerge struct.
    40  //
    41  // labels.MergeEndEvent occurs at end of merge and transmits labels.DeltaMergeEnd struct.
    42  //
    43  func (d *Data) MergeLabels(v dvid.VersionID, op labels.MergeOp) error {
    44  	dvid.Debugf("Merging %s into label %d ...\n", op.Merged, op.Target)
    45  
    46  	// Only do one large mutation at a time, although each request can start many goroutines.
    47  	server.LargeMutationMutex.Lock()
    48  	defer server.LargeMutationMutex.Unlock()
    49  
    50  	// Get all the affected blocks in the merge.
    51  	targetMeta, _, err := GetMappedLabelIndex(d, v, op.Target, 0, dvid.Bounds{})
    52  	if err != nil {
    53  		return fmt.Errorf("can't get block indices of to merge target label %d: %v", op.Target, err)
    54  	}
    55  	mergedMeta, _, err := GetMappedLabelSetIndex(d, v, op.Merged, 0, dvid.Bounds{})
    56  	if err != nil {
    57  		return fmt.Errorf("can't get block indices of to merge labels %s: %v", op.Merged, err)
    58  	}
    59  
    60  	// Asynchronously perform merge and handle any concurrent requests using the cache map until
    61  	// labelarray is updated and consistent.  Mark these labels as dirty until done.
    62  	d.StartUpdate()
    63  	iv := dvid.InstanceVersion{Data: d.DataUUID(), Version: v}
    64  	if err := labels.MergeStart(iv, op); err != nil {
    65  		d.StopUpdate()
    66  		return err
    67  	}
    68  	mutID := d.NewMutationID()
    69  
    70  	// send kafka merge event to instance-uuid topic
    71  	// msg: {"action": "merge", "target": targetlabel, "labels": [merge labels]}
    72  	lbls := make([]uint64, 0, len(op.Merged))
    73  	for label := range op.Merged {
    74  		lbls = append(lbls, label)
    75  	}
    76  
    77  	versionuuid, _ := datastore.UUIDFromVersion(v)
    78  	msginfo := map[string]interface{}{
    79  		"Action":     "merge",
    80  		"Target":     op.Target,
    81  		"Labels":     lbls,
    82  		"UUID":       string(versionuuid),
    83  		"MutationID": mutID,
    84  		"Timestamp":  time.Now().String(),
    85  	}
    86  	jsonmsg, _ := json.Marshal(msginfo)
    87  	if err := d.PublishKafkaMsg(jsonmsg); err != nil {
    88  		dvid.Errorf("can't send merge op for %q to kafka: %v\n", d.DataName(), err)
    89  	}
    90  
    91  	// Signal that we are starting a merge.
    92  	evt := datastore.SyncEvent{d.DataUUID(), labels.MergeStartEvent}
    93  	msg := datastore.SyncMessage{labels.MergeStartEvent, v, labels.DeltaMergeStart{op}}
    94  	if err := datastore.NotifySubscribers(evt, msg); err != nil {
    95  		d.StopUpdate()
    96  		return err
    97  	}
    98  
    99  	go func() {
   100  		delta := labels.DeltaMerge{
   101  			MergeOp:      op,
   102  			Blocks:       targetMeta.Blocks.MergeCopy(mergedMeta.Blocks),
   103  			TargetVoxels: targetMeta.Voxels,
   104  			MergedVoxels: mergedMeta.Voxels,
   105  		}
   106  		if err := d.processMerge(v, mutID, delta); err != nil {
   107  			dvid.Criticalf("unable to process merge: %v\n", err)
   108  		}
   109  		d.StopUpdate()
   110  		labels.MergeStop(iv, op)
   111  
   112  		dvid.Infof("processed merge for %q in gofunc\n", d.DataName())
   113  	}()
   114  	return nil
   115  }
   116  
   117  // handle block and label index mods for a merge.
   118  func (d *Data) processMerge(v dvid.VersionID, mutID uint64, delta labels.DeltaMerge) error {
   119  	timedLog := dvid.NewTimeLog()
   120  
   121  	evt := datastore.SyncEvent{d.DataUUID(), labels.MergeBlockEvent}
   122  	msg := datastore.SyncMessage{labels.MergeBlockEvent, v, delta}
   123  	if err := datastore.NotifySubscribers(evt, msg); err != nil {
   124  		return fmt.Errorf("can't notify subscribers for event %v: %v\n", evt, err)
   125  	}
   126  
   127  	downresMut := downres.NewMutation(d, v, mutID)
   128  	for _, izyx := range delta.Blocks {
   129  		n := izyx.Hash(numMutateHandlers)
   130  		d.MutAdd(mutID)
   131  		op := mergeOp{mutID: mutID, MergeOp: delta.MergeOp, bcoord: izyx, downresMut: downresMut}
   132  		d.mutateCh[n] <- procMsg{op: op, v: v}
   133  	}
   134  
   135  	// When we've processed all the delta blocks, we can remove this merge op
   136  	// from the merge cache since all labels will have completed.
   137  	d.MutWait(mutID)
   138  	d.MutDelete(mutID)
   139  	timedLog.Debugf("labelarray block-level merge (%d blocks) of %s -> %d", len(delta.Blocks), delta.MergeOp.Merged, delta.MergeOp.Target)
   140  
   141  	// Merge the new blocks into the target label block index.
   142  	mergebdm := make(blockDiffMap, len(delta.Blocks))
   143  	for _, izyx := range delta.Blocks {
   144  		mergebdm[izyx] = labelDiff{delta: int32(delta.MergedVoxels), present: true}
   145  	}
   146  	ChangeLabelIndex(d, v, delta.Target, mergebdm)
   147  
   148  	// Delete all the merged label indices.
   149  	for merged := range delta.Merged {
   150  		DeleteLabelIndex(d, v, merged)
   151  	}
   152  
   153  	deltaRep := labels.DeltaReplaceSize{
   154  		Label:   delta.Target,
   155  		OldSize: delta.TargetVoxels,
   156  		NewSize: delta.TargetVoxels + delta.MergedVoxels,
   157  	}
   158  	evt = datastore.SyncEvent{d.DataUUID(), labels.ChangeSizeEvent}
   159  	msg = datastore.SyncMessage{labels.ChangeSizeEvent, v, deltaRep}
   160  	if err := datastore.NotifySubscribers(evt, msg); err != nil {
   161  		dvid.Criticalf("can't notify subscribers for event %v: %v\n", evt, err)
   162  	}
   163  
   164  	evt = datastore.SyncEvent{d.DataUUID(), labels.MergeEndEvent}
   165  	msg = datastore.SyncMessage{labels.MergeEndEvent, v, labels.DeltaMergeEnd{delta.MergeOp}}
   166  	if err := datastore.NotifySubscribers(evt, msg); err != nil {
   167  		dvid.Criticalf("can't notify subscribers for event %v: %v\n", evt, err)
   168  	}
   169  
   170  	if err := downresMut.Execute(); err != nil {
   171  		return err
   172  	}
   173  
   174  	dvid.Infof("Merged %s -> %d, data %q, resulting in %d blocks\n", delta.Merged, delta.Target, d.DataName(), len(delta.Blocks))
   175  
   176  	// send kafka merge complete event to instance-uuid topic
   177  	versionuuid, _ := datastore.UUIDFromVersion(v)
   178  	msginfo := map[string]interface{}{
   179  		"Action":     "merge-complete",
   180  		"MutationID": mutID,
   181  		"UUID":       string(versionuuid),
   182  		"Timestamp":  time.Now().String(),
   183  	}
   184  	jsonmsg, _ := json.Marshal(msginfo)
   185  	return d.PublishKafkaMsg(jsonmsg)
   186  }
   187  
   188  // SplitLabels splits a portion of a label's voxels into a given split label or, if the given split
   189  // label is 0, a new label, which is returned.  The input is a binary sparse volume and should
   190  // preferably be the smaller portion of a labeled region.  In other words, the caller should chose
   191  // to submit for relabeling the smaller portion of any split.  It is assumed that the given split
   192  // voxels are within the fromLabel set of voxels and will generate unspecified behavior if this is
   193  // not the case.
   194  //
   195  // EVENTS
   196  //
   197  // labels.SplitStartEvent occurs at very start of split and transmits labels.DeltaSplitStart struct.
   198  //
   199  // labels.SplitBlockEvent occurs for every block of a split label and transmits labels.DeltaSplit struct.
   200  //
   201  // labels.SplitEndEvent occurs at end of split and transmits labels.DeltaSplitEnd struct.
   202  //
   203  func (d *Data) SplitLabels(v dvid.VersionID, fromLabel, splitLabel uint64, r io.ReadCloser) (toLabel uint64, err error) {
   204  	// Create a new label id for this version that will persist to store
   205  	if splitLabel != 0 {
   206  		toLabel = splitLabel
   207  		dvid.Debugf("Splitting subset of label %d into given label %d ...\n", fromLabel, splitLabel)
   208  		if err = d.updateMaxLabel(v, splitLabel); err != nil {
   209  			return
   210  		}
   211  	} else {
   212  		toLabel, err = d.NewLabel(v)
   213  		if err != nil {
   214  			return
   215  		}
   216  		dvid.Debugf("Splitting subset of label %d into new label %d ...\n", fromLabel, toLabel)
   217  	}
   218  
   219  	// Read the sparse volume from reader.
   220  	var split dvid.RLEs
   221  	split, err = dvid.ReadRLEs(r)
   222  	if err != nil {
   223  		return
   224  	}
   225  	toLabelSize, _ := split.Stats()
   226  
   227  	// Only do one large mutation at a time, although each request can start many goroutines.
   228  	server.LargeMutationMutex.Lock()
   229  	defer server.LargeMutationMutex.Unlock()
   230  
   231  	// store split info into separate data.
   232  	var splitData []byte
   233  	if splitData, err = split.MarshalBinary(); err != nil {
   234  		return
   235  	}
   236  	var splitRef string
   237  	if splitRef, err = d.PutBlob(splitData); err != nil {
   238  		dvid.Errorf("error storing split data: %v", err)
   239  	}
   240  
   241  	// send kafka split event to instance-uuid topic
   242  	mutID := d.NewMutationID()
   243  	versionuuid, _ := datastore.UUIDFromVersion(v)
   244  	msginfo := map[string]interface{}{
   245  		"Action":     "split",
   246  		"Target":     fromLabel,
   247  		"NewLabel":   toLabel,
   248  		"Split":      splitRef,
   249  		"MutationID": mutID,
   250  		"UUID":       string(versionuuid),
   251  		"Timestamp":  time.Now().String(),
   252  	}
   253  	jsonmsg, _ := json.Marshal(msginfo)
   254  	if err = d.PublishKafkaMsg(jsonmsg); err != nil {
   255  		dvid.Errorf("error on sending split op to kafka: %v", err)
   256  	}
   257  
   258  	evt := datastore.SyncEvent{d.DataUUID(), labels.SplitStartEvent}
   259  	splitOpStart := labels.DeltaSplitStart{fromLabel, toLabel}
   260  	splitOpEnd := labels.DeltaSplitEnd{fromLabel, toLabel}
   261  
   262  	// Make sure we can split given current merges in progress
   263  	d.StartUpdate()
   264  	iv := dvid.InstanceVersion{Data: d.DataUUID(), Version: v}
   265  	if err = labels.SplitStart(iv, splitOpStart); err != nil {
   266  		d.StopUpdate()
   267  		return
   268  	}
   269  	defer func() {
   270  		labels.SplitStop(iv, splitOpEnd)
   271  		d.StopUpdate()
   272  	}()
   273  
   274  	// Signal that we are starting a split.
   275  	msg := datastore.SyncMessage{labels.SplitStartEvent, v, splitOpStart}
   276  	if err = datastore.NotifySubscribers(evt, msg); err != nil {
   277  		return
   278  	}
   279  
   280  	// Partition the split spans into blocks.
   281  	var splitmap dvid.BlockRLEs
   282  	blockSize, ok := d.BlockSize().(dvid.Point3d)
   283  	if !ok {
   284  		err = fmt.Errorf("can't do split because block size for instance %s is not 3d: %v", d.DataName(), d.BlockSize())
   285  		return
   286  	}
   287  	splitmap, err = split.Partition(blockSize)
   288  	if err != nil {
   289  		return
   290  	}
   291  
   292  	// Get a sorted list of blocks that cover split.
   293  	splitblks := splitmap.SortedKeys()
   294  
   295  	// Do the split
   296  	deltaSplit := labels.DeltaSplit{
   297  		MutID:        mutID,
   298  		OldLabel:     fromLabel,
   299  		NewLabel:     toLabel,
   300  		Split:        splitmap,
   301  		SortedBlocks: splitblks,
   302  		SplitVoxels:  toLabelSize,
   303  	}
   304  	if err = d.processSplit(v, mutID, deltaSplit); err != nil {
   305  		return
   306  	}
   307  
   308  	msginfo = map[string]interface{}{
   309  		"Action":     "split-complete",
   310  		"MutationID": mutID,
   311  		"UUID":       string(versionuuid),
   312  		"Timestamp":  time.Now().String(),
   313  	}
   314  	jsonmsg, _ = json.Marshal(msginfo)
   315  	if err = d.PublishKafkaMsg(jsonmsg); err != nil {
   316  		dvid.Errorf("error on sending split complete op to kafka: %v", err)
   317  	}
   318  
   319  	return toLabel, nil
   320  }
   321  
   322  // SplitCoarseLabels splits a portion of a label's voxels into a given split label or, if the given split
   323  // label is 0, a new label, which is returned.  The input is a binary sparse volume defined by block
   324  // coordinates and should be the smaller portion of a labeled region-to-be-split.
   325  //
   326  // EVENTS
   327  //
   328  // labels.SplitStartEvent occurs at very start of split and transmits labels.DeltaSplitStart struct.
   329  //
   330  // labels.SplitBlockEvent occurs for every block of a split label and transmits labels.DeltaSplit struct.
   331  //
   332  // labels.SplitEndEvent occurs at end of split and transmits labels.DeltaSplitEnd struct.
   333  //
   334  func (d *Data) SplitCoarseLabels(v dvid.VersionID, fromLabel, splitLabel uint64, r io.ReadCloser) (toLabel uint64, err error) {
   335  	// Create a new label id for this version that will persist to store
   336  	if splitLabel != 0 {
   337  		toLabel = splitLabel
   338  		dvid.Debugf("Splitting coarse subset of label %d into given label %d ...\n", fromLabel, splitLabel)
   339  		if err = d.updateMaxLabel(v, splitLabel); err != nil {
   340  			return
   341  		}
   342  	} else {
   343  		toLabel, err = d.NewLabel(v)
   344  		if err != nil {
   345  			return
   346  		}
   347  		dvid.Debugf("Splitting coarse subset of label %d into new label %d ...\n", fromLabel, toLabel)
   348  	}
   349  
   350  	// Read the sparse volume from reader.
   351  	var splits dvid.RLEs
   352  	splits, err = dvid.ReadRLEs(r)
   353  	if err != nil {
   354  		return
   355  	}
   356  	numBlocks, _ := splits.Stats()
   357  
   358  	// Only do one request at a time, although each request can start many goroutines.
   359  	server.LargeMutationMutex.Lock()
   360  	defer server.LargeMutationMutex.Unlock()
   361  
   362  	// store split info into separate data.
   363  	var splitData []byte
   364  	if splitData, err = splits.MarshalBinary(); err != nil {
   365  		return
   366  	}
   367  	var splitRef string
   368  	if splitRef, err = d.PutBlob(splitData); err != nil {
   369  		err = fmt.Errorf("coarse split data %v\n", err)
   370  		return
   371  	}
   372  
   373  	// send kafka merge event to instance-uuid topic
   374  	mutID := d.NewMutationID()
   375  	versionuuid, _ := datastore.UUIDFromVersion(v)
   376  	msginfo := map[string]interface{}{
   377  		"Action":     "splitcoarse",
   378  		"Target":     fromLabel,
   379  		"NewLabel":   toLabel,
   380  		"Split":      splitRef,
   381  		"MutationID": mutID,
   382  		"UUID":       string(versionuuid),
   383  		"Timestamp":  time.Now().String(),
   384  	}
   385  	jsonmsg, _ := json.Marshal(msginfo)
   386  	if err = d.PublishKafkaMsg(jsonmsg); err != nil {
   387  		dvid.Errorf("error on sending coarse split op to kafka: %v", err)
   388  	}
   389  
   390  	evt := datastore.SyncEvent{d.DataUUID(), labels.SplitStartEvent}
   391  	splitOpStart := labels.DeltaSplitStart{fromLabel, toLabel}
   392  	splitOpEnd := labels.DeltaSplitEnd{fromLabel, toLabel}
   393  
   394  	// Make sure we can split given current merges in progress
   395  	iv := dvid.InstanceVersion{Data: d.DataUUID(), Version: v}
   396  	if err := labels.SplitStart(iv, splitOpStart); err != nil {
   397  		return toLabel, err
   398  	}
   399  	defer labels.SplitStop(iv, splitOpEnd)
   400  
   401  	// Signal that we are starting a split.
   402  	msg := datastore.SyncMessage{labels.SplitStartEvent, v, splitOpStart}
   403  	if err := datastore.NotifySubscribers(evt, msg); err != nil {
   404  		return 0, err
   405  	}
   406  
   407  	// Order the split blocks
   408  	splitblks := make(dvid.IZYXSlice, numBlocks)
   409  	n := 0
   410  	for _, rle := range splits {
   411  		p := rle.StartPt()
   412  		run := rle.Length()
   413  		for i := int32(0); i < run; i++ {
   414  			izyx := dvid.IndexZYX{p[0] + i, p[1], p[2]}
   415  			splitblks[n] = izyx.ToIZYXString()
   416  			n++
   417  		}
   418  	}
   419  	sort.Sort(splitblks)
   420  
   421  	// Publish split event
   422  	deltaSplit := labels.DeltaSplit{
   423  		MutID:        mutID,
   424  		OldLabel:     fromLabel,
   425  		NewLabel:     toLabel,
   426  		Split:        nil,
   427  		SortedBlocks: splitblks,
   428  	}
   429  	if err = d.processSplit(v, mutID, deltaSplit); err != nil {
   430  		return
   431  	}
   432  	evt = datastore.SyncEvent{d.DataUUID(), labels.SplitLabelEvent}
   433  	msg = datastore.SyncMessage{labels.SplitLabelEvent, v, deltaSplit}
   434  	if err := datastore.NotifySubscribers(evt, msg); err != nil {
   435  		return 0, err
   436  	}
   437  
   438  	msginfo = map[string]interface{}{
   439  		"Action":     "splitcoarse-complete",
   440  		"MutationID": mutID,
   441  		"UUID":       string(versionuuid),
   442  		"Timestamp":  time.Now().String(),
   443  	}
   444  	jsonmsg, _ = json.Marshal(msginfo)
   445  	if err = d.PublishKafkaMsg(jsonmsg); err != nil {
   446  		dvid.Errorf("error on sending coarse split complete op to kafka: %v", err)
   447  	}
   448  
   449  	dvid.Infof("Coarsely split %d blocks from label %d to label %d\n", numBlocks, fromLabel, toLabel)
   450  	return toLabel, nil
   451  }
   452  
   453  func (d *Data) processSplit(v dvid.VersionID, mutID uint64, delta labels.DeltaSplit) error {
   454  	timedLog := dvid.NewTimeLog()
   455  
   456  	downresMut := downres.NewMutation(d, v, mutID)
   457  
   458  	var doneCh chan struct{}
   459  	var deleteBlks dvid.IZYXSlice
   460  	if delta.Split == nil {
   461  		// Coarse Split so block indexing simple because all split blocks are removed from old label.
   462  		deleteBlks = delta.SortedBlocks
   463  		for _, izyx := range delta.SortedBlocks {
   464  			n := izyx.Hash(numMutateHandlers)
   465  			d.MutAdd(mutID)
   466  			op := splitOp{
   467  				mutID: mutID,
   468  				SplitOp: labels.SplitOp{
   469  					Target:   delta.OldLabel,
   470  					NewLabel: delta.NewLabel,
   471  				},
   472  				bcoord:     izyx,
   473  				downresMut: downresMut,
   474  			}
   475  			d.mutateCh[n] <- procMsg{op: op, v: v}
   476  		}
   477  	} else {
   478  		// Fine Split could partially split within a block so both old and new labels have same valid block.
   479  		doneCh = make(chan struct{})
   480  		deleteBlkCh := make(chan dvid.IZYXString) // blocks that should be fully deleted from old label.
   481  		go func() {
   482  			for {
   483  				select {
   484  				case blk := <-deleteBlkCh:
   485  					deleteBlks = append(deleteBlks, blk)
   486  				case <-doneCh:
   487  					return
   488  				}
   489  			}
   490  		}()
   491  
   492  		for izyx, blockRLEs := range delta.Split {
   493  			n := izyx.Hash(numMutateHandlers)
   494  			d.MutAdd(mutID)
   495  			op := splitOp{
   496  				mutID: mutID,
   497  				SplitOp: labels.SplitOp{
   498  					Target:   delta.OldLabel,
   499  					NewLabel: delta.NewLabel,
   500  					RLEs:     blockRLEs,
   501  				},
   502  				bcoord:      izyx,
   503  				deleteBlkCh: deleteBlkCh,
   504  				downresMut:  downresMut,
   505  			}
   506  			d.mutateCh[n] <- procMsg{op: op, v: v}
   507  		}
   508  
   509  		// Publish change in label sizes.
   510  		deltaNewSize := labels.DeltaNewSize{
   511  			Label: delta.NewLabel,
   512  			Size:  delta.SplitVoxels,
   513  		}
   514  		evt := datastore.SyncEvent{d.DataUUID(), labels.ChangeSizeEvent}
   515  		msg := datastore.SyncMessage{labels.ChangeSizeEvent, v, deltaNewSize}
   516  		if err := datastore.NotifySubscribers(evt, msg); err != nil {
   517  			dvid.Errorf("can't notify subscribers for event %v: %v\n", evt, err)
   518  		}
   519  
   520  		deltaModSize := labels.DeltaModSize{
   521  			Label:      delta.OldLabel,
   522  			SizeChange: int64(-delta.SplitVoxels),
   523  		}
   524  		evt = datastore.SyncEvent{d.DataUUID(), labels.ChangeSizeEvent}
   525  		msg = datastore.SyncMessage{labels.ChangeSizeEvent, v, deltaModSize}
   526  		if err := datastore.NotifySubscribers(evt, msg); err != nil {
   527  			dvid.Errorf("can't notify subscribers for event %v: %v\n", evt, err)
   528  		}
   529  	}
   530  
   531  	// Wait for all blocks to be split then modify label indices and mark end of split op.
   532  	d.MutWait(mutID)
   533  	d.MutDelete(mutID)
   534  	if doneCh != nil {
   535  		close(doneCh)
   536  	}
   537  	if err := d.splitIndices(v, delta, deleteBlks); err != nil {
   538  		return err
   539  	}
   540  	if delta.Split == nil {
   541  		timedLog.Debugf("labelarray sync complete for coarse split (%d blocks) of %d -> %d", len(delta.SortedBlocks), delta.OldLabel, delta.NewLabel)
   542  	} else {
   543  		timedLog.Debugf("labelarray sync complete for split (%d blocks) of %d -> %d", len(delta.Split), delta.OldLabel, delta.NewLabel)
   544  	}
   545  
   546  	// Publish split event
   547  	evt := datastore.SyncEvent{d.DataUUID(), labels.SplitLabelEvent}
   548  	msg := datastore.SyncMessage{labels.SplitLabelEvent, v, delta}
   549  	if err := datastore.NotifySubscribers(evt, msg); err != nil {
   550  		dvid.Errorf("can't notify subscribers for event %v: %v\n", evt, err)
   551  	}
   552  
   553  	// Publish split end
   554  	evt = datastore.SyncEvent{d.DataUUID(), labels.SplitEndEvent}
   555  	msg = datastore.SyncMessage{labels.SplitEndEvent, v, labels.DeltaSplitEnd{delta.OldLabel, delta.NewLabel}}
   556  	if err := datastore.NotifySubscribers(evt, msg); err != nil {
   557  		return fmt.Errorf("Unable to notify subscribers to data %q for evt %v\n", d.DataName(), evt)
   558  	}
   559  
   560  	return downresMut.Execute()
   561  }
   562  
   563  // handles modification of the old and new label's block indices on split.
   564  func (d *Data) splitIndices(v dvid.VersionID, delta labels.DeltaSplit, deleteBlks dvid.IZYXSlice) error {
   565  	// note that the blocks to be deleted from old label != split blocks since there
   566  	// may be partial block split.
   567  	deletebdm := make(blockDiffMap, len(deleteBlks))
   568  	for _, izyx := range deleteBlks {
   569  		deletebdm[izyx] = labelDiff{present: false}
   570  	}
   571  	ChangeLabelIndex(d, v, delta.OldLabel, deletebdm)
   572  
   573  	var splitbdm blockDiffMap
   574  	if delta.Split == nil {
   575  		splitbdm = make(blockDiffMap, len(delta.SortedBlocks))
   576  		for _, izyx := range delta.SortedBlocks {
   577  			splitbdm[izyx] = labelDiff{present: true}
   578  		}
   579  	} else {
   580  		splitbdm = make(blockDiffMap, len(delta.Split))
   581  		for izyx := range delta.Split {
   582  			splitbdm[izyx] = labelDiff{present: true}
   583  		}
   584  	}
   585  	ChangeLabelIndex(d, v, delta.NewLabel, splitbdm)
   586  	return nil
   587  }
   588  
   589  // Serializes block operations so despite having concurrent merge/split label requests,
   590  // we make sure any particular block isn't concurrently GET/POSTED.  If more throughput is required
   591  // and the backend is distributed, we can spawn many mutateBlock() goroutines as long as we uniquely
   592  // shard blocks across them, so the same block will always be directed to the same goroutine.
   593  func (d *Data) mutateBlock(ch <-chan procMsg) {
   594  	defer func() {
   595  		if e := recover(); e != nil {
   596  			msg := fmt.Sprintf("Panic detected on labelarray block mutation thread: %+v\n", e)
   597  			dvid.ReportPanic(msg, server.WebServer())
   598  		}
   599  	}()
   600  	for {
   601  		msg, more := <-ch
   602  		if !more {
   603  			return
   604  		}
   605  
   606  		ctx := datastore.NewVersionedCtx(d, msg.v)
   607  		switch op := msg.op.(type) {
   608  		case mergeOp:
   609  			d.mergeBlock(ctx, op)
   610  
   611  		case splitOp:
   612  			d.splitBlock(ctx, op)
   613  
   614  		// TODO
   615  		// case ingestOp:
   616  		// 	d.ingestBlock(ctx, op)
   617  
   618  		// case mutateOp:
   619  		// 	d.mutateBlock(ctx, op)
   620  
   621  		// case deleteOp:
   622  		// 	d.deleteBlock(ctx, op)
   623  
   624  		default:
   625  			dvid.Criticalf("Received unknown processing msg in mutateBlock: %v\n", msg)
   626  		}
   627  	}
   628  }
   629  
   630  // relabels a single block during a merge operation.
   631  func (d *Data) mergeBlock(ctx *datastore.VersionedCtx, op mergeOp) {
   632  	defer d.MutDone(op.mutID)
   633  
   634  	var scale uint8
   635  	pb, err := d.getLabelBlock(ctx, scale, op.bcoord)
   636  	if err != nil {
   637  		dvid.Errorf("error in merge block %s: %v\n", op.bcoord, err)
   638  		return
   639  	}
   640  
   641  	block, err := pb.MergeLabels(op.MergeOp)
   642  	if err != nil {
   643  		dvid.Errorf("error merging labels %s for data %q: %v\n", op.Merged, d.DataName(), err)
   644  		return
   645  	}
   646  	pb.Block = *block
   647  
   648  	if err := d.putLabelBlock(ctx, scale, pb); err != nil {
   649  		dvid.Errorf("error putting label block %s for data %q: %v\n", op.bcoord, d.DataName(), err)
   650  		return
   651  	}
   652  
   653  	if err := op.downresMut.BlockMutated(op.bcoord, &(pb.Block)); err != nil {
   654  		dvid.Errorf("data %q publishing downres: %v\n", d.DataName(), err)
   655  	}
   656  }
   657  
   658  // splits a set of voxels to a specified label within a block
   659  func (d *Data) splitBlock(ctx *datastore.VersionedCtx, op splitOp) {
   660  	defer d.MutDone(op.mutID)
   661  
   662  	var scale uint8
   663  	pb, err := d.getLabelBlock(ctx, scale, op.bcoord)
   664  	if err != nil {
   665  		dvid.Errorf("error in merge block %s: %v\n", op.bcoord, err)
   666  		return
   667  	}
   668  	if pb == nil {
   669  		dvid.Infof("split on block %s attempted but block doesn't exist\n", op.bcoord)
   670  		return
   671  	}
   672  
   673  	// Modify the block using either voxel-level changes or coarser block-level mods.
   674  	// If we are doing coarse block split, we can only get change in # voxels after going through
   675  	// block-level splits, unlike when provided the RLEs for split itself.  Also, we don't know
   676  	// whether block indices can be maintained for fine split until we do split and see if any
   677  	// old label remains.
   678  	var toLabelSize uint64
   679  	var splitBlock *labels.Block
   680  	if op.RLEs != nil {
   681  		var keptSize uint64
   682  		splitBlock, keptSize, toLabelSize, err = pb.Split(op.SplitOp)
   683  		if err != nil {
   684  			dvid.Errorf("can't store label %d RLEs into block %s: %v\n", op.NewLabel, op.bcoord, err)
   685  			return
   686  		}
   687  		if splitBlock == nil {
   688  			dvid.Infof("Attempt to split missing label %d in block %s!\n", op.SplitOp.Target, pb)
   689  			return
   690  		}
   691  		if keptSize == 0 {
   692  			op.deleteBlkCh <- op.bcoord
   693  		}
   694  	} else {
   695  		// We are doing coarse split and will replace all
   696  		splitBlock, toLabelSize, err = pb.ReplaceLabel(op.Target, op.NewLabel)
   697  		if err != nil {
   698  			dvid.Errorf("can't replace label %d with %d in block %s: %v\n", op.Target, op.NewLabel, op.bcoord, err)
   699  			return
   700  		}
   701  		delta := labels.DeltaNewSize{
   702  			Label: op.Target,
   703  			Size:  toLabelSize,
   704  		}
   705  		evt := datastore.SyncEvent{d.DataUUID(), labels.ChangeSizeEvent}
   706  		msg := datastore.SyncMessage{labels.ChangeSizeEvent, ctx.VersionID(), delta}
   707  		if err := datastore.NotifySubscribers(evt, msg); err != nil {
   708  			dvid.Criticalf("Unable to notify subscribers to data %q for evt %v\n", d.DataName(), evt)
   709  		}
   710  
   711  		delta2 := labels.DeltaModSize{
   712  			Label:      op.Target,
   713  			SizeChange: int64(-toLabelSize),
   714  		}
   715  		evt = datastore.SyncEvent{d.DataUUID(), labels.ChangeSizeEvent}
   716  		msg = datastore.SyncMessage{labels.ChangeSizeEvent, ctx.VersionID(), delta2}
   717  		if err := datastore.NotifySubscribers(evt, msg); err != nil {
   718  			dvid.Criticalf("Unable to notify subscribers to data %q for evt %v\n", d.DataName(), evt)
   719  		}
   720  
   721  	}
   722  
   723  	splitpb := labels.PositionedBlock{*splitBlock, op.bcoord}
   724  	if err := d.putLabelBlock(ctx, scale, &splitpb); err != nil {
   725  		dvid.Errorf("unable to put block %s in split of label %d, data %q: %v\n", op.bcoord, op.Target, d.DataName(), err)
   726  		return
   727  	}
   728  
   729  	if err := op.downresMut.BlockMutated(op.bcoord, splitBlock); err != nil {
   730  		dvid.Errorf("data %q publishing downres: %v\n", d.DataName(), err)
   731  	}
   732  }