github.com/janelia-flyem/dvid@v1.0.0/datatype/labelarray/labelarray.go (about)

     1  /*
     2  Package labelarray handles both volumes of label data as well as indexing to
     3  quickly find and generate sparse volumes of any particular label.
     4  */
     5  package labelarray
     6  
     7  import (
     8  	"bytes"
     9  	"crypto/md5"
    10  	"encoding/binary"
    11  	"encoding/gob"
    12  	"encoding/json"
    13  	"fmt"
    14  	"image"
    15  	"io"
    16  	"io/ioutil"
    17  	"math"
    18  	"net/http"
    19  	"net/url"
    20  	"strconv"
    21  	"strings"
    22  	"sync"
    23  
    24  	"compress/gzip"
    25  
    26  	"github.com/janelia-flyem/dvid/datastore"
    27  	"github.com/janelia-flyem/dvid/datatype/common/downres"
    28  	"github.com/janelia-flyem/dvid/datatype/common/labels"
    29  	"github.com/janelia-flyem/dvid/datatype/imageblk"
    30  	"github.com/janelia-flyem/dvid/dvid"
    31  	"github.com/janelia-flyem/dvid/server"
    32  	"github.com/janelia-flyem/dvid/storage"
    33  
    34  	lz4 "github.com/janelia-flyem/go/golz4-updated"
    35  )
    36  
    37  const (
    38  	Version  = "0.1"
    39  	RepoURL  = "github.com/janelia-flyem/dvid/datatype/labelarray"
    40  	TypeName = "labelarray"
    41  )
    42  
    43  const helpMessage = `
    44  API for label block data type (github.com/janelia-flyem/dvid/datatype/labelarray)
    45  ===============================================================================
    46  
    47  Note: UUIDs referenced below are strings that may either be a unique prefix of a
    48  hexadecimal UUID string (e.g., 3FA22) or a branch leaf specification that adds
    49  a colon (":") followed by the case-dependent branch name.  In the case of a
    50  branch leaf specification, the unique UUID prefix just identifies the repo of
    51  the branch, and the UUID referenced is really the leaf of the branch name.
    52  For example, if we have a DAG with root A -> B -> C where C is the current
    53  HEAD or leaf of the "master" (default) branch, then asking for "B:master" is
    54  the same as asking for "C".  If we add another version so A -> B -> C -> D, then
    55  references to "B:master" now return the data from "D".
    56  
    57  ---
    58  
    59  Denormalizations like sparse volumes are *not* performed for the "0" label, which is
    60  considered a special label useful for designating background.  This allows users to define
    61  sparse labeled structures in a large volume without requiring processing of entire volume.
    62  
    63  
    64  Command-line:
    65  
    66  $ dvid repo <UUID> new labelarray <data name> <settings...>
    67  
    68  	Adds newly named data of the 'type name' to repo with specified UUID.
    69  
    70  	Example (note anisotropic resolution specified instead of default 8 nm isotropic):
    71  
    72  	$ dvid repo 3f8c new labelarray superpixels VoxelSize=3.2,3.2,40.0
    73  
    74      Arguments:
    75  
    76      UUID            Hexadecimal string with enough characters to uniquely identify a version node.
    77      data name       Name of data to create, e.g., "superpixels"
    78      settings        Configuration settings in "key=value" format separated by spaces.
    79  
    80      Configuration Settings (case-insensitive keys)
    81  
    82      BlockSize       Size in pixels  (default: %s)
    83      VoxelSize       Resolution of voxels (default: 8.0, 8.0, 8.0)
    84      VoxelUnits      Resolution units (default: "nanometers")
    85  	IndexedLabels   "false" if no sparse volume support is required (default "true")
    86  	CountLabels     "false" if no voxel counts per label is required (default "true")
    87  	MaxDownresLevel  The maximum down-res level supported.  Each down-res is factor of 2.
    88  
    89  $ dvid node <UUID> <data name> load <offset> <image glob> <settings...>
    90  
    91      Initializes version node to a set of XY label images described by glob of filenames.
    92      The DVID server must have access to the named files.  Currently, XY images are required.
    93  
    94      Example: 
    95  
    96      $ dvid node 3f8c superpixels load 0,0,100 "data/*.png" proc=noindex
    97  
    98      Arguments:
    99  
   100      UUID          Hexadecimal string with enough characters to uniquely identify a version node.
   101      data name     Name of data to add.
   102      offset        3d coordinate in the format "x,y,z".  Gives coordinate of top upper left voxel.
   103      image glob    Filenames of label images, preferably in quotes, e.g., "foo-xy-*.png"
   104  
   105  $ dvid node <UUID> <data name> composite <uint8 data name> <new rgba8 data name>
   106  
   107      Creates a RGBA8 image where the RGB is a hash of the labels and the A is the
   108      grayscale intensity.
   109  
   110      Example: 
   111  
   112      $ dvid node 3f8c bodies composite grayscale bodyview
   113  
   114      Arguments:
   115  
   116      UUID          Hexadecimal string with enough characters to uniquely identify a version node.
   117      data name     Name of data to add.
   118  	
   119  	
   120      ------------------
   121  
   122  HTTP API (Level 2 REST):
   123  
   124   POST /api/repo/{uuid}/instance
   125  
   126  	Creates a new instance of the labelarray data type.  Expects configuration data in JSON
   127  	as the body of the POST.  Configuration data is a JSON object with each property
   128  	corresponding to a configuration keyword for the particular data type.  
   129  
   130  	JSON name/value pairs:
   131  
   132  	REQUIRED "typename"         Must be "labelarray"
   133  	REQUIRED "dataname"         Name of the new instance
   134  	OPTIONAL "versioned"        If "false" or "0", the data is unversioned and acts as if 
   135  	                             all UUIDs within a repo become the root repo UUID.  (True by default.)
   136      OPTIONAL "BlockSize"        Size in pixels  (default: 64,64,64)
   137      OPTIONAL "VoxelSize"        Resolution of voxels (default: 8.0,8.0,8.0)
   138      OPTIONAL "VoxelUnits"       Resolution units (default: "nanometers")
   139  	OPTIONAL "IndexedLabels"    "false" if no sparse volume support is required (default "true")
   140  	OPTIONAL "CountLabels"      "false" if no voxel counts per label is required (default "true")
   141  	OPTIONAL "MaxDownresLevel"  The maximum down-res level supported.  Each down-res is factor of 2.
   142  	
   143  
   144  GET  <api URL>/node/<UUID>/<data name>/help
   145  
   146  	Returns data-specific help message.
   147  
   148  
   149  GET  <api URL>/node/<UUID>/<data name>/info
   150  POST <api URL>/node/<UUID>/<data name>/info
   151  
   152      Retrieves or puts DVID-specific data properties for these voxels.
   153  
   154      Example: 
   155  
   156      GET <api URL>/node/3f8c/segmentation/info
   157  
   158      Returns JSON with configuration settings that include location in DVID space and
   159      min/max block indices.
   160  
   161      Arguments:
   162  
   163      UUID          Hexadecimal string with enough characters to uniquely identify a version node.
   164  	data name     Name of voxels data.
   165  
   166  POST  <api URL>/node/<UUID>/<data name>/resolution
   167    
   168    	Sets the resolution for the image volume. 
   169    
   170    	Extents should be in JSON in the following format:
   171    	[8,8,8]
   172  
   173  POST <api URL>/node/<UUID>/<data name>/sync?<options>
   174  
   175      Establishes labelvol data instances with which the annotations are synced.  Expects JSON to be POSTed
   176      with the following format:
   177  
   178      { "sync": "bodies" }
   179  
   180  	To delete syncs, pass an empty string of names with query string "replace=true":
   181  
   182  	{ "sync": "" }
   183  
   184      The "sync" property should be followed by a comma-delimited list of data instances that MUST
   185      already exist.  Currently, syncs should be created before any annotations are pushed to
   186      the server.  If annotations already exist, these are currently not synced.
   187  
   188      The labelarray data type accepts syncs to labelvol data instances.  It also accepts syncs to
   189  	labelarray instances for multiscale.
   190  
   191      GET Query-string Options:
   192  
   193      replace    Set to "true" if you want passed syncs to replace and not be appended to current syncs.
   194  			   Default operation is false.
   195  
   196  
   197  GET  <api URL>/node/<UUID>/<data name>/metadata
   198  
   199  	Retrieves a JSON schema (application/vnd.dvid-nd-data+json) that describes the layout
   200  	of bytes returned for n-d images.
   201  
   202  
   203  GET  <api URL>/node/<UUID>/<data name>/specificblocks[?queryopts]
   204  
   205      Retrieves blocks corresponding to those specified in the query string.  This interface
   206      is useful if the blocks retrieved are not consecutive or if the backend in non ordered.
   207  
   208      TODO: enable arbitrary compression to be specified
   209  
   210      Example: 
   211  
   212      GET <api URL>/node/3f8c/grayscale/specificblocks?blocks=x1,y1,z2,x2,y2,z2,x3,y3,z3
   213  	
   214  	This will fetch blocks at position (x1,y1,z1), (x2,y2,z2), and (x3,y3,z3).
   215  	The returned byte stream has a list of blocks with a leading block 
   216  	coordinate (3 x int32) plus int32 giving the # of bytes in this block, and  then the 
   217  	bytes for the value.  If blocks are unset within the span, they will not appear in the stream,
   218  	so the returned data will be equal to or less than spanX blocks worth of data.  
   219  
   220      The returned data format has the following format where int32 is in little endian and the bytes of
   221      block data have been compressed in JPEG format.
   222  
   223          int32  Block 1 coordinate X (Note that this may not be starting block coordinate if it is unset.)
   224          int32  Block 1 coordinate Y
   225          int32  Block 1 coordinate Z
   226          int32  # bytes for first block (N1)
   227          byte0  Bytes of block data in jpeg-compressed format.
   228          byte1
   229          ...
   230          byteN1
   231  
   232          int32  Block 2 coordinate X
   233          int32  Block 2 coordinate Y
   234          int32  Block 2 coordinate Z
   235          int32  # bytes for second block (N2)
   236          byte0  Bytes of block data in jpeg-compressed format.
   237          byte1
   238          ...
   239          byteN2
   240  
   241          ...
   242  
   243      If no data is available for given block span, nothing is returned.
   244  
   245      Arguments:
   246  
   247      UUID          Hexadecimal string with enough characters to uniquely identify a version node.
   248      data name     Name of data to add.
   249  
   250      Query-string Options:
   251  
   252      blocks	  x,y,z... block string
   253      scale         A number from 0 up to MaxDownresLevel where each level has 1/2 resolution of
   254  	              previous level.  Level 0 (default) is the highest resolution.
   255  
   256  
   257  GET  <api URL>/node/<UUID>/<data name>/isotropic/<dims>/<size>/<offset>[/<format>][?queryopts]
   258  
   259      Retrieves either 2d images (PNG by default) or 3d binary data, depending on the dims parameter.  
   260      The 3d binary data response has "Content-type" set to "application/octet-stream" and is an array of 
   261      voxel values in ZYX order (X iterates most rapidly).
   262  
   263      Example: 
   264  
   265      GET <api URL>/node/3f8c/segmentation/isotropic/0_1/512_256/0_0_100/jpg:80
   266  
   267      Returns an isotropic XY slice (0th and 1st dimensions) with width (x) of 512 voxels and
   268      height (y) of 256 voxels with offset (0,0,100) in JPG format with quality 80.
   269      Additional processing is applied based on voxel resolutions to make sure the retrieved image 
   270      has isotropic pixels.  For example, if an XZ image is requested and the image volume has 
   271      X resolution 3 nm and Z resolution 40 nm, the returned image's height will be magnified 40/3
   272      relative to the raw data.
   273      The example offset assumes the "grayscale" data in version node "3f8c" is 3d.
   274      The "Content-type" of the HTTP response should agree with the requested format.
   275      For example, returned PNGs will have "Content-type" of "image/png", and returned
   276      nD data will be "application/octet-stream".
   277  
   278      Arguments:
   279  
   280      UUID          Hexadecimal string with enough characters to uniquely identify a version node.
   281      data name     Name of data to add.
   282      dims          The axes of data extraction in form "i_j_k,..."  Example: "0_2" can be XZ.
   283                      Slice strings ("xy", "xz", or "yz") are also accepted.
   284      size          Size in voxels along each dimension specified in <dims>.
   285      offset        Gives coordinate of first voxel using dimensionality of data.
   286      format        Valid formats depend on the dimensionality of the request and formats
   287                      available in server implementation.
   288                    2D: "png", "jpg" (default: "png")
   289                      jpg allows lossy quality setting, e.g., "jpg:80"
   290                    nD: uses default "octet-stream".
   291  
   292      Query-string Options:
   293  
   294      roi       	  Name of roi data instance used to mask the requested data.
   295      scale         A number from 0 up to MaxDownresLevel where each level beyond 0 has 1/2 resolution
   296  	                of previous level.  Level 0 is the highest resolution.
   297      compression   Allows retrieval or submission of 3d data in "lz4" and "gzip"
   298                      compressed format.  The 2d data will ignore this and use
   299                      the image-based codec.
   300      throttle      Only works for 3d data requests.  If "true", makes sure only N compute-intense operation 
   301      				(all API calls that can be throttled) are handled.  If the server can't initiate the API 
   302      				call right away, a 503 (Service Unavailable) status code is returned.
   303  
   304  
   305  GET  <api URL>/node/<UUID>/<data name>/raw/<dims>/<size>/<offset>[/<format>][?queryopts]
   306  
   307      Retrieves either 2d images (PNG by default) or 3d binary data, depending on the dims parameter.  
   308      The 3d binary data response has "Content-type" set to "application/octet-stream" and is an array of 
   309      voxel values in ZYX order (X iterates most rapidly).
   310  
   311      Example: 
   312  
   313      GET <api URL>/node/3f8c/segmentation/raw/0_1/512_256/0_0_100/jpg:80
   314  
   315      Returns a raw XY slice (0th and 1st dimensions) with width (x) of 512 voxels and
   316      height (y) of 256 voxels with offset (0,0,100) in JPG format with quality 80.
   317      By "raw", we mean that no additional processing is applied based on voxel
   318      resolutions to make sure the retrieved image has isotropic pixels.
   319      The example offset assumes the "grayscale" data in version node "3f8c" is 3d.
   320      The "Content-type" of the HTTP response should agree with the requested format.
   321      For example, returned PNGs will have "Content-type" of "image/png", and returned
   322      nD data will be "application/octet-stream". 
   323  
   324      Arguments:
   325  
   326      UUID          Hexadecimal string with enough characters to uniquely identify a version node.
   327      data name     Name of data to add.
   328      dims          The axes of data extraction in form "i_j_k,..."  
   329                      Slice strings ("xy", "xz", or "yz") are also accepted.
   330                      Example: "0_2" is XZ, and "0_1_2" is a 3d subvolume.
   331      size          Size in voxels along each dimension specified in <dims>.
   332      offset        Gives coordinate of first voxel using dimensionality of data.
   333      format        Valid formats depend on the dimensionality of the request and formats
   334                      available in server implementation.
   335                      2D: "png", "jpg" (default: "png")
   336                          jpg allows lossy quality setting, e.g., "jpg:80"
   337                      nD: uses default "octet-stream".
   338  
   339      Query-string Options:
   340  
   341      roi           Name of roi data instance used to mask the requested data.
   342      scale         A number from 0 up to MaxDownresLevel where each level beyond 0 has 1/2 resolution
   343  	                of previous level.  Level 0 is the highest resolution.
   344      compression   Allows retrieval or submission of 3d data in "lz4","gzip", "google"
   345                      (neuroglancer compression format), "googlegzip" (google + gzip)
   346                      compressed format.  The 2d data will ignore this and use
   347                      the image-based codec.
   348      throttle      Only works for 3d data requests.  If "true", makes sure only N compute-intense operation 
   349      				(all API calls that can be throttled) are handled.  If the server can't initiate the API 
   350      				call right away, a 503 (Service Unavailable) status code is returned.
   351  
   352  
   353  POST <api URL>/node/<UUID>/<data name>/raw/0_1_2/<size>/<offset>[?queryopts]
   354  
   355      Puts block-aligned voxel data using the block sizes defined for  this data instance.  
   356      For example, if the BlockSize = 32, offset and size must be multiples of 32.
   357  
   358      Example: 
   359  
   360      POST <api URL>/node/3f8c/segmentation/raw/0_1_2/512_256_128/0_0_32
   361  
   362      Arguments:
   363  
   364      UUID          Hexadecimal string with enough characters to uniquely identify a version node.
   365      data name     Name of data to add.
   366      size          Size in voxels along each dimension specified in <dims>.
   367      offset        Gives coordinate of first voxel using dimensionality of data.
   368  
   369      Query-string Options:
   370  
   371      roi           Name of roi data instance used to mask the requested data.
   372      mutate        Default "false" corresponds to ingestion, i.e., the first write of the given block.
   373                      Use "true" to indicate the POST is a mutation of prior data, which allows any
   374                      synced data instance to cleanup prior denormalizations.  If "mutate=true", the
   375                      POST operations will be slower due to a required GET to retrieve past data.
   376      compression   Allows retrieval or submission of 3d data in "lz4" and "gzip"
   377                      compressed format.
   378      throttle      If "true", makes sure only N compute-intense operation (all API calls that can be throttled) 
   379                      are handled.  If the server can't initiate the API call right away, a 503 (Service Unavailable) 
   380                      status code is returned.
   381  
   382  GET  <api URL>/node/<UUID>/<data name>/pseudocolor/<dims>/<size>/<offset>[?queryopts]
   383  
   384      Retrieves label data as pseudocolored 2D PNG color images where each label hashed to a different RGB.
   385  
   386      Example: 
   387  
   388      GET <api URL>/node/3f8c/segmentation/pseudocolor/0_1/512_256/0_0_100
   389  
   390      Returns an XY slice (0th and 1st dimensions) with width (x) of 512 voxels and
   391      height (y) of 256 voxels with offset (0,0,100) in PNG format.
   392  
   393      Arguments:
   394  
   395      UUID          Hexadecimal string with enough characters to uniquely identify a version node.
   396      data name     Name of data to add.
   397      dims          The axes of data extraction.  Example: "0_2" can be XZ.
   398                      Slice strings ("xy", "xz", or "yz") are also accepted.
   399      size          Size in voxels along each dimension specified in <dims>.
   400      offset        Gives coordinate of first voxel using dimensionality of data.
   401  
   402      Query-string Options:
   403  
   404      roi       	  Name of roi data instance used to mask the requested data.
   405      compression   Allows retrieval or submission of 3d data in "lz4" and "gzip"
   406                      compressed format.
   407      throttle      If "true", makes sure only N compute-intense operation (all API calls that can be throttled) 
   408                      are handled.  If the server can't initiate the API call right away, a 503 (Service Unavailable) 
   409                      status code is returned.
   410  
   411  GET <api URL>/node/<UUID>/<data name>/label/<coord>[?queryopts]
   412  
   413  	Returns JSON for the label at the given coordinate:
   414  	{ "Label": 23 }
   415  	
   416      Arguments:
   417      UUID          Hexadecimal string with enough characters to uniquely identify a version node.
   418      data name     Name of label data.
   419      coord     	  Coordinate of voxel with underscore as separator, e.g., 10_20_30
   420  
   421      Query-string Options:
   422  
   423      scale         A number from 0 up to MaxDownresLevel where each level beyond 0 has 1/2 resolution
   424  	                of previous level.  Level 0 is the highest resolution.
   425  
   426  GET <api URL>/node/<UUID>/<data name>/labels[?queryopts]
   427  
   428  	Returns JSON for the labels at a list of coordinates.  Expects JSON in GET body:
   429  
   430  	[ [x0, y0, z0], [x1, y1, z1], ...]
   431  
   432  	Returns for each POSTed coordinate the corresponding label:
   433  
   434  	[ 23, 911, ...]
   435  	
   436      Arguments:
   437      UUID          Hexadecimal string with enough characters to uniquely identify a version node.
   438      data name     Name of label data.
   439  
   440      Query-string Options:
   441  
   442      scale         A number from 0 up to MaxDownresLevel where each level beyond 0 has 1/2 resolution
   443  	                of previous level.  Level 0 is the highest resolution.
   444      hash          MD5 hash of request body content in hexidecimal string format.
   445  
   446  GET <api URL>/node/<UUID>/<data name>/blocks/<size>/<offset>[?queryopts]
   447  
   448      Gets blocks corresponding to the extents specified by the size and offset.  The
   449      subvolume request must be block aligned.  This is the most server-efficient way of
   450      retrieving the labelarray data, where data read from the underlying storage engine is 
   451  	written directly to the HTTP connection possibly after recompression to match the given 
   452  	query-string compression option.  The default labelarray compression 
   453  	is gzip on compressed DVID label Block serialization ("blocks" option).
   454  
   455      Example: 
   456  
   457      GET <api URL>/node/3f8c/segmentation/blocks/64_64_64/0_0_0
   458  
   459  	If block size is 32x32x32, this call retrieves up to 8 blocks where the first potential
   460  	block is at 0, 0, 0.  The returned byte stream has a list of blocks with a leading block 
   461  	coordinate (3 x int32) plus int32 giving the # of bytes in this block, and  then the 
   462  	bytes for the value.  If blocks are unset within the span, they will not appear in the stream,
   463  	so the returned data will be equal to or less than spanX blocks worth of data.  
   464  
   465      The returned data format has the following format where int32 is in little endian and the 
   466  	bytes of block data have been compressed in the desired output format.
   467  
   468          int32  Block 1 coordinate X (Note that this may not be starting block coordinate if it is unset.)
   469          int32  Block 1 coordinate Y
   470          int32  Block 1 coordinate Z
   471          int32  # bytes for first block (N1)
   472          byte0  Block N1 serialization using chosen compression format (see "compression" option below)
   473          byte1
   474          ...
   475          byteN1
   476  
   477          int32  Block 2 coordinate X
   478          int32  Block 2 coordinate Y
   479          int32  Block 2 coordinate Z
   480          int32  # bytes for second block (N2)
   481          byte0  Block N2 serialization using chosen compression format (see "compression" option below)
   482          byte1
   483          ...
   484          byteN2
   485  
   486          ...
   487  
   488      If no data is available for given block span, nothing is returned.
   489  
   490      Arguments:
   491  
   492      UUID          Hexadecimal string with enough characters to uniquely identify a version node.
   493      data name     Name of data to add.
   494      size          Size in voxels along each dimension specified in <dims>.
   495      offset        Gives coordinate of first voxel using dimensionality of data.
   496  
   497      Query-string Options:
   498  
   499      scale         A number from 0 up to MaxDownresLevel where each level beyond 0 has 1/2 resolution
   500  	                of previous level.  Level 0 is the highest resolution.
   501      compression   Allows retrieval of block data in "lz4" (default), "gzip", blocks" (native DVID
   502  	              label blocks) or "uncompressed" (uint64 labels).
   503      throttle      If "true", makes sure only N compute-intense operation (all API calls that can be 
   504  	              throttled) are handled.  If the server can't initiate the API call right away, a 503 
   505                    (Service Unavailable) status code is returned.
   506  
   507  
   508  POST <api URL>/node/<UUID>/<data name>/blocks[?queryopts]
   509  
   510      Puts properly-sized blocks for this data instance.  This is the most server-efficient way of
   511      storing labelarray data, where data read from the HTTP stream is written directly to the 
   512  	underlying storage.  The default (and currently only supported) compression is gzip on compressed 
   513  	DVID label Block serialization.
   514  
   515  	Note that maximum label and extents are automatically handled during these calls.
   516  
   517      Example: 
   518  
   519      POST <api URL>/node/3f8c/segmentation/blocks
   520  
   521      The posted data format should be in the following format where int32 is in little endian and 
   522  	the bytes of block data have been compressed in the desired output format.
   523  
   524          int32  Block 1 coordinate X (Note that this may not be starting block coordinate if it is unset.)
   525          int32  Block 1 coordinate Y
   526          int32  Block 1 coordinate Z
   527          int32  # bytes for first block (N1)
   528          byte0  Block N1 serialization using chosen compression format (see "compression" option below)
   529          byte1
   530          ...
   531          byteN1
   532  
   533          int32  Block 2 coordinate X
   534          int32  Block 2 coordinate Y
   535          int32  Block 2 coordinate Z
   536          int32  # bytes for second block (N2)
   537          byte0  Block N2 serialization using chosen compression format (see "compression" option below)
   538          byte1
   539          ...
   540          byteN2
   541  
   542          ...
   543  
   544  	The Block serialization format is as follows:
   545  
   546        3 * uint32      values of gx, gy, and gz
   547        uint32          # of labels (N), cannot exceed uint32.
   548        N * uint64      packed labels in little-endian format.  Label 0 can be used to represent
   549                            deleted labels, e.g., after a merge operation to avoid changing all
   550                            sub-block indices.
   551  
   552        ----- Data below is only included if N > 1, otherwise it is a solid block.
   553              Nsb = # sub-blocks = gx * gy * gz
   554  
   555        Nsb * uint16        # of labels for sub-blocks.  Each uint16 Ns[i] = # labels for sub-block i.
   556                                If Ns[i] == 0, the sub-block has no data (uninitialized), which
   557                                is useful for constructing Blocks with sparse data.
   558  
   559        Nsb * Ns * uint32   label indices for sub-blocks where Ns = sum of Ns[i] over all sub-blocks.
   560                                For each sub-block i, we have Ns[i] label indices of lBits.
   561  
   562        Nsb * values        sub-block indices for each voxel.
   563                                Data encompasses 512 * ceil(log2(Ns[i])) bits, padded so no two
   564                                sub-blocks have indices in the same byte.
   565                                At most we use 9 bits per voxel for up to the 512 labels in sub-block.
   566                                A value gives the sub-block index which points to the index into
   567                                the N labels.  If Ns[i] <= 1, there are no values.  If Ns[i] = 0,
   568                                the 8x8x8 voxels are set to label 0.  If Ns[i] = 1, all voxels
   569                                are the given label index.
   570  
   571      Arguments:
   572  
   573      UUID          Hexadecimal string with enough characters to uniquely identify a version node.
   574      data name     Name of data to add.
   575  
   576      Query-string Options:
   577  
   578      scale         A number from 0 up to MaxDownresLevel where each level beyond 0 has 1/2 resolution
   579  	                of previous level.  Level 0 is the highest resolution.
   580  	downres       "false" (default) or "true", specifies whether the given blocks should be
   581  	                down-sampled to lower resolution.  If "true", scale must be "0" or absent.
   582      compression   Specifies compression format of block data: default and only option currently is
   583                      "blocks" (native DVID label blocks).
   584      throttle      If "true", makes sure only N compute-intense operation (all API calls that can be 
   585  	                throttled) are handled.  If the server can't initiate the API call right away, a 503 
   586                      (Service Unavailable) status code is returned.
   587  
   588  
   589  GET <api URL>/node/<UUID>/<data name>/maxlabel
   590  
   591  	GET returns the maximum label for the version of data in JSON form:
   592  
   593  		{ "maxlabel": <label #> }
   594  
   595  
   596  -------------------------------------------------------------------------------------------------------
   597  --- The following endpoints require the labelarray data instance to have IndexedLabels set to true. ---
   598  -------------------------------------------------------------------------------------------------------
   599  
   600  GET  <api URL>/node/<UUID>/<data name>/sparsevol-size/<label>?<options>
   601  
   602  	Returns JSON giving the number of native blocks and the coarse bounding box in DVID
   603  	coordinates (voxel space).
   604  
   605  	Example return:
   606  
   607  	{ "numblocks": 1081, "minvoxel": [886, 513, 744], "maxvoxel": [1723, 1279, 4855]}
   608  
   609  	Note that the minvoxel and maxvoxel coordinates are voxel coordinates that are
   610  	accurate to the block, not the voxel.
   611  
   612  GET  <api URL>/node/<UUID>/<data name>/sparsevol/<label>?<options>
   613  
   614  	Returns a sparse volume with voxels of the given label in encoded RLE format.  The returned
   615  	data can be optionally compressed using the "compression" option below.
   616  
   617  	The encoding has the following possible format where integers are little endian and the order
   618  	of data is exactly as specified below:
   619  
   620  	Legacy RLEs ("rles") :
   621  
   622  	    byte     Payload descriptor:
   623  	               Bit 0 (LSB) - 8-bit grayscale
   624  	               Bit 1 - 16-bit grayscale
   625  	               Bit 2 - 16-bit normal
   626  	               If set to all 0, there is no payload and it's a binary sparse volume.
   627  	    uint8    Number of dimensions
   628  	    uint8    Dimension of run (typically 0 = X)
   629  	    byte     Reserved (to be used later)
   630  	    uint32    # Voxels [TODO.  0 for now]
   631  	    uint32    # Spans
   632  	    Repeating unit of:
   633  	        int32   Coordinate of run start (dimension 0)
   634  	        int32   Coordinate of run start (dimension 1)
   635  	        int32   Coordinate of run start (dimension 2)
   636  	        int32   Length of run
   637  	        bytes   Optional payload dependent on first byte descriptor
   638  			  ...
   639  	
   640  	Streaming RLEs ("srles"):
   641  
   642  	    Repeating unit of:
   643  	        int32   Coordinate of run start (dimension 0)
   644  	        int32   Coordinate of run start (dimension 1)
   645  	        int32   Coordinate of run start (dimension 2)
   646  	        int32   Length of run
   647  
   648  	Streaming Binary Blocks ("blocks"):
   649  
   650        3 * uint32      values of gx, gy, and gz -- the # of sub-blocks along each dimension in a Block.
   651        uint64          foreground label
   652  
   653        Stream of blocks.  Each block has the following data:
   654  
   655  		3 * int32       offset of first voxel of Block in DVID space (x, y, z)
   656  		byte            content flag:
   657  						0 = background ONLY  (no more data for this block)
   658  						1 = foreground ONLY  (no more data for this block)
   659  						2 = both background and foreground so stream of sub-blocks required.
   660  
   661  		If content is both background and foreground, stream of gx * gy * gz sub-blocks with the following data:
   662  
   663  		byte            content flag:
   664  						0 = background ONLY  (no more data for this sub-block)
   665  						1 = foreground ONLY  (no more data for this sub-block)
   666  						2 = both background and foreground so mask data required.
   667  		mask            64 byte bitmask where each voxel is 0 (background) or 1 (foreground)
   668  
   669      GET Query-string Options:
   670  
   671  	format  One of the following:
   672  	          "rles" (default) - legacy RLEs with header including # spans.Data
   673  			  "srles" - streaming RLEs with each RLE composed of 4 int32 (16 bytes) for x, y, z, run 
   674  			  "blocks" - binary Block stream
   675  
   676      minx    Spans must be equal to or larger than this minimum x voxel coordinate.
   677      maxx    Spans must be equal to or smaller than this maximum x voxel coordinate.
   678      miny    Spans must be equal to or larger than this minimum y voxel coordinate.
   679      maxy    Spans must be equal to or smaller than this maximum y voxel coordinate.
   680      minz    Spans must be equal to or larger than this minimum z voxel coordinate.
   681      maxz    Spans must be equal to or smaller than this maximum z voxel coordinate.
   682      exact   "false" if RLEs can extend a bit outside voxel bounds within border blocks.
   683               This will give slightly faster responses. 
   684  
   685      compression   "lz4" and "gzip" compressed format; only applies to "rles" format for now.
   686  	scale   A number from 0 up to MaxDownresLevel where each level beyond 0 has 1/2 
   687  	         resolution of previous level.  Level 0 is the highest resolution.
   688  
   689  
   690  HEAD <api URL>/node/<UUID>/<data name>/sparsevol/<label>?<options>
   691  
   692  	Returns:
   693  		200 (OK) if a sparse volume of the given label exists within any optional bounds.
   694  		204 (No Content) if there is no sparse volume for the given label within any optional bounds.
   695  
   696  	Note that for speed, the optional bounds are always expanded to the block-aligned containing
   697  	subvolume, i.e., it's as if exact=false for the corresponding GET.
   698  
   699      GET Query-string Options:
   700  
   701      minx    Spans must be equal to or larger than this minimum x voxel coordinate.
   702      maxx    Spans must be equal to or smaller than this maximum x voxel coordinate.
   703      miny    Spans must be equal to or larger than this minimum y voxel coordinate.
   704      maxy    Spans must be equal to or smaller than this maximum y voxel coordinate.
   705      minz    Spans must be equal to or larger than this minimum z voxel coordinate.
   706      maxz    Spans must be equal to or smaller than this maximum z voxel coordinate.
   707  
   708  
   709  GET <api URL>/node/<UUID>/<data name>/sparsevol-by-point/<coord>
   710  
   711  	Returns a sparse volume with voxels that pass through a given voxel.
   712  	The encoding is described in the "sparsevol" request above.
   713  	
   714      Arguments:
   715  
   716      UUID          Hexadecimal string with enough characters to uniquely identify a version node.
   717      data name     Name of mapping data.
   718      coord     	  Coordinate of voxel with underscore as separator, e.g., 10_20_30
   719  
   720  
   721  GET <api URL>/node/<UUID>/<data name>/sparsevol-coarse/<label>?<options>
   722  
   723  	Returns a sparse volume with blocks of the given label in encoded RLE format.
   724  	The encoding has the following format where integers are little endian and the order
   725  	of data is exactly as specified below:
   726  
   727  	    byte     Set to 0
   728  	    uint8    Number of dimensions
   729  	    uint8    Dimension of run (typically 0 = X)
   730  	    byte     Reserved (to be used later)
   731  	    uint32    # Blocks [TODO.  0 for now]
   732  	    uint32    # Spans
   733  	    Repeating unit of:
   734  	        int32   Block coordinate of run start (dimension 0)
   735  	        int32   Block coordinate of run start (dimension 1)
   736  	        int32   Block coordinate of run start (dimension 2)
   737  			  ...
   738  	        int32   Length of run
   739  
   740  	Note that the above format is the RLE encoding of sparsevol, where voxel coordinates
   741  	have been replaced by block coordinates.
   742  
   743      GET Query-string Options:
   744  
   745      minx    Spans must be equal to or larger than this minimum x voxel coordinate.
   746      maxx    Spans must be equal to or smaller than this maximum x voxel coordinate.
   747      miny    Spans must be equal to or larger than this minimum y voxel coordinate.
   748      maxy    Spans must be equal to or smaller than this maximum y voxel coordinate.
   749      minz    Spans must be equal to or larger than this minimum z voxel coordinate.
   750      maxz    Spans must be equal to or smaller than this maximum z voxel coordinate.
   751  
   752  
   753  GET <api URL>/node/<UUID>/<data name>/sparsevols-coarse/<start label>/<end label>
   754  
   755  	Note: this request does not reflect ongoing merges/splits but is meant to be used
   756  	for various batch operations on a static node.
   757  
   758  	Returns a stream of sparse volumes with blocks of the given label in encoded RLE format:
   759  
   760  		uint64   label
   761  		<coarse sparse vol as given below>
   762  
   763  		uint64   label
   764  		<coarse sparse vol as given below>
   765  
   766  		...
   767  
   768  	The coarse sparse vol has the following format where integers are little endian and the order
   769  	of data is exactly as specified below:
   770  
   771  		int32    # Spans
   772  		Repeating unit of:
   773  			int32   Block coordinate of run start (dimension 0)
   774  			int32   Block coordinate of run start (dimension 1)
   775  			int32   Block coordinate of run start (dimension 2)
   776  			int32   Length of run
   777  
   778  
   779  GET <api URL>/node/<UUID>/<data name>/nextlabel
   780  POST <api URL>/node/<UUID>/<data name>/nextlabel
   781  
   782  	GET returns the next label for the version of data in JSON form:
   783  
   784  		{ "nextlabel": <label #> }
   785  
   786  	POST allows the client to request some # of labels that will be reserved.
   787  	This is used if the client wants to introduce new labels.
   788  
   789  	The request:
   790  
   791  		{ "needed": <# of labels> }
   792  
   793  	Response:
   794  
   795  		{ "start": <starting label #>, "end": <ending label #> }
   796  
   797  
   798  POST <api URL>/node/<UUID>/<data name>/merge
   799  
   800  	Merges labels.  Requires JSON in request body using the following format:
   801  
   802  	[toLabel1, fromLabel1, fromLabel2, fromLabel3, ...]
   803  
   804  	The first element of the JSON array specifies the label to be used as the merge result.
   805  	Note that it's computationally more efficient to group a number of merges into the
   806  	same toLabel as a single merge request instead of multiple merge requests.
   807  
   808  	Kafka JSON message generated by this request:
   809  		{ 
   810  			"Action": "merge",
   811  			"Target": <to label>,
   812  			"Labels": [<to merge label 1>, <to merge label2>, ...],
   813  			"UUID": <UUID on which merge was done>,
   814  			"MutationID": <unique id for mutation>
   815  		}
   816  
   817  	After completion of the split op, the following JSON message is published:
   818  		{ 
   819  			"Action": "merge-complete",
   820  			"MutationID": <unique id for mutation>
   821  			"UUID": <UUID on which split was done>
   822  		}
   823  
   824  POST <api URL>/node/<UUID>/<data name>/split/<label>[?splitlabel=X]
   825  
   826  	Splits a portion of a label's voxels into a new label or, if "splitlabel" is specified
   827  	as an optional query string, the given split label.  Returns the following JSON:
   828  
   829  		{ "label": <new label> }
   830  
   831  	This request requires a binary sparse volume in the POSTed body with the following 
   832  	encoded RLE format, which is compatible with the format returned by a GET on the 
   833  	"sparsevol" endpoint described above:
   834  
   835  		All integers are in little-endian format.
   836  
   837  	    byte     Payload descriptor:
   838  	               Set to 0 to indicate it's a binary sparse volume.
   839  	    uint8    Number of dimensions
   840  	    uint8    Dimension of run (typically 0 = X)
   841  	    byte     Reserved (to be used later)
   842  	    uint32    # Voxels [TODO.  0 for now]
   843  	    uint32    # Spans
   844  	    Repeating unit of:
   845  	        int32   Coordinate of run start (dimension 0)
   846  	        int32   Coordinate of run start (dimension 1)
   847  	        int32   Coordinate of run start (dimension 2)
   848  			  ...
   849  	        int32   Length of run
   850  
   851  	NOTE 1: The POSTed split sparse volume must be a subset of the given label's voxels.  You cannot
   852  	give an arbitrary sparse volume that may span multiple labels.
   853  
   854  	NOTE 2: If a split label is specified, it is the client's responsibility to make sure the given
   855  	label will not create conflict with labels in other versions.  It should primarily be used in
   856  	chain operations like "split-coarse" followed by "split" using voxels, where the new label
   857  	created by the split coarse is used as the split label for the smaller, higher-res "split".
   858  
   859  	Kafka JSON message generated by this request:
   860  		{ 
   861  			"Action": "split",
   862  			"Target": <from label>,
   863  			"NewLabel": <to label>,
   864  			"Split": <string for reference to split data in serialized RLE format>,
   865  			"MutationID": <unique id for mutation>
   866  			"UUID": <UUID on which split was done>
   867  		}
   868  	
   869  	The split reference above can be used to download the split binary data by calling
   870  	this data instance's BlobStore API.  See the node-level HTTP API documentation.
   871  
   872  		GET /api/node/{uuid}/{data name}/blobstore/{reference}
   873  	
   874  	After completion of the split op, the following JSON message is published:
   875  		{ 
   876  			"Action": "split-complete",
   877  			"MutationID": <unique id for mutation>
   878  			"UUID": <UUID on which split was done>
   879  		}
   880  
   881  
   882  POST <api URL>/node/<UUID>/<data name>/split-coarse/<label>[?splitlabel=X]
   883  
   884  	Splits a portion of a label's blocks into a new label or, if "splitlabel" is specified
   885  	as an optional query string, the given split label.  Returns the following JSON:
   886  
   887  		{ "label": <new label> }
   888  
   889  	This request requires a binary sparse volume in the POSTed body with the following 
   890  	encoded RLE format, which is similar to the "split" request format but uses block
   891  	instead of voxel coordinates:
   892  
   893  		All integers are in little-endian format.
   894  
   895  	    byte     Payload descriptor:
   896  	               Set to 0 to indicate it's a binary sparse volume.
   897  	    uint8    Number of dimensions
   898  	    uint8    Dimension of run (typically 0 = X)
   899  	    byte     Reserved (to be used later)
   900  	    uint32    # Blocks [TODO.  0 for now]
   901  	    uint32    # Spans
   902  	    Repeating unit of:
   903  	        int32   Coordinate of run start (dimension 0)
   904  	        int32   Coordinate of run start (dimension 1)
   905  	        int32   Coordinate of run start (dimension 2)
   906  			  ...
   907  	        int32   Length of run
   908  
   909  	The Notes for "split" endpoint above are applicable to this "split-coarse" endpoint.
   910  `
   911  
   912  var (
   913  	dtype        Type
   914  	encodeFormat dvid.DataValues
   915  
   916  	zeroLabelBytes = make([]byte, 8, 8)
   917  
   918  	DefaultBlockSize int32   = 64
   919  	DefaultRes       float32 = imageblk.DefaultRes
   920  	DefaultUnits             = imageblk.DefaultUnits
   921  )
   922  
   923  // SparseVolFormat indicates the type of encoding used for sparse volume representation.
   924  type SparseVolFormat uint8
   925  
   926  const (
   927  	// FormatLegacyRLE is Legacy RLE encoding with header that gives # spans.
   928  	FormatLegacyRLE SparseVolFormat = iota
   929  
   930  	// FormatStreamingRLE specifies Streaming RLE encoding
   931  	FormatStreamingRLE
   932  
   933  	// FormatBinaryBlocks specifies a streaming set of binary Blocks
   934  	FormatBinaryBlocks
   935  )
   936  
   937  // returns default legacy RLE if no options set.
   938  func svformatFromQueryString(r *http.Request) SparseVolFormat {
   939  	switch r.URL.Query().Get("format") {
   940  	case "srles":
   941  		return FormatStreamingRLE
   942  	case "blocks":
   943  		return FormatBinaryBlocks
   944  	default:
   945  		return FormatLegacyRLE
   946  	}
   947  }
   948  
   949  func init() {
   950  	encodeFormat = dvid.DataValues{
   951  		{
   952  			T:     dvid.T_uint64,
   953  			Label: TypeName,
   954  		},
   955  	}
   956  	interpolable := false
   957  	dtype = Type{imageblk.NewType(encodeFormat, interpolable)}
   958  	dtype.Type.Name = TypeName
   959  	dtype.Type.URL = RepoURL
   960  	dtype.Type.Version = Version
   961  
   962  	// See doc for package on why channels are segregated instead of interleaved.
   963  	// Data types must be registered with the datastore to be used.
   964  	datastore.Register(&dtype)
   965  
   966  	// Need to register types that will be used to fulfill interfaces.
   967  	gob.Register(&Type{})
   968  	gob.Register(&Data{})
   969  }
   970  
   971  type bulkLoadInfo struct {
   972  	filenames     []string
   973  	versionID     dvid.VersionID
   974  	offset        dvid.Point
   975  	extentChanged dvid.Bool
   976  }
   977  
   978  // ZeroBytes returns a slice of bytes that represents the zero label.
   979  func ZeroBytes() []byte {
   980  	return zeroLabelBytes
   981  }
   982  
   983  func EncodeFormat() dvid.DataValues {
   984  	return encodeFormat
   985  }
   986  
   987  // --- Labels64 Datatype -----
   988  
   989  // Type uses imageblk data type by composition.
   990  type Type struct {
   991  	imageblk.Type
   992  }
   993  
   994  // --- TypeService interface ---
   995  
   996  func (dtype *Type) NewDataService(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (datastore.DataService, error) {
   997  	return NewData(uuid, id, name, c)
   998  }
   999  
  1000  func (dtype *Type) Help() string {
  1001  	return helpMessage
  1002  }
  1003  
  1004  // -------
  1005  
  1006  // GetByDataUUID returns a pointer to labelarray data given a data UUID.  Returns error if not found.
  1007  func GetByDataUUID(dataUUID dvid.UUID) (*Data, error) {
  1008  	source, err := datastore.GetDataByDataUUID(dataUUID)
  1009  	if err != nil {
  1010  		return nil, err
  1011  	}
  1012  	data, ok := source.(*Data)
  1013  	if !ok {
  1014  		return nil, fmt.Errorf("instance '%s' is not a labelarray datatype", source.DataName())
  1015  	}
  1016  	return data, nil
  1017  }
  1018  
  1019  // GetByUUIDName returns a pointer to labelarray data given a UUID and data name.
  1020  func GetByUUIDName(uuid dvid.UUID, name dvid.InstanceName) (*Data, error) {
  1021  	source, err := datastore.GetDataByUUIDName(uuid, name)
  1022  	if err != nil {
  1023  		return nil, err
  1024  	}
  1025  	data, ok := source.(*Data)
  1026  	if !ok {
  1027  		return nil, fmt.Errorf("instance '%s' is not a labelarray datatype", name)
  1028  	}
  1029  	return data, nil
  1030  }
  1031  
  1032  // GetByVersionName returns a pointer to labelarray data given a version and data name.
  1033  func GetByVersionName(v dvid.VersionID, name dvid.InstanceName) (*Data, error) {
  1034  	source, err := datastore.GetDataByVersionName(v, name)
  1035  	if err != nil {
  1036  		return nil, err
  1037  	}
  1038  	data, ok := source.(*Data)
  1039  	if !ok {
  1040  		return nil, fmt.Errorf("instance '%s' is not a labelarray datatype", name)
  1041  	}
  1042  	return data, nil
  1043  }
  1044  
  1045  // -------  ExtData interface implementation -------------
  1046  
  1047  // Labels are voxels that have uint64 labels.
  1048  type Labels struct {
  1049  	*imageblk.Voxels
  1050  }
  1051  
  1052  func (l *Labels) String() string {
  1053  	return fmt.Sprintf("Labels of size %s @ offset %s", l.Size(), l.StartPoint())
  1054  }
  1055  
  1056  func (l *Labels) Interpolable() bool {
  1057  	return false
  1058  }
  1059  
  1060  // Data of labelarray type is an extended form of imageblk Data
  1061  type Data struct {
  1062  	*imageblk.Data
  1063  	datastore.Updater
  1064  
  1065  	// The maximum label id found in each version of this instance.
  1066  	// Can be unset if no new label was added at that version, in which case
  1067  	// you must traverse DAG to find max label of parent.
  1068  	MaxLabel map[dvid.VersionID]uint64
  1069  
  1070  	// The maximum label for this instance in the entire repo.  This allows us to do
  1071  	// conflict-free merges without any relabeling.  Note that relabeling (rebasing)
  1072  	// is required if we move data between repos, e.g., when pushing remote nodes,
  1073  	// since we have no control over which labels were created remotely and there
  1074  	// could be conflicts between the local and remote repos.  When mutations only
  1075  	// occur within a single repo, however, this atomic label allows us to prevent
  1076  	// conflict across all versions within this repo.
  1077  	MaxRepoLabel uint64
  1078  
  1079  	// True if sparse volumes (split, merge, sparse volume optimized GET) are supported
  1080  	// for this data instance.  (Default true)
  1081  	IndexedLabels bool
  1082  
  1083  	// True if we keep track of # voxels per label.  (Default true)
  1084  	CountLabels bool
  1085  
  1086  	// Maximum down-resolution level supported.  Each down-res level is 2x scope of
  1087  	// the higher level.
  1088  	MaxDownresLevel uint8
  1089  
  1090  	updates  []uint32 // tracks updating to each scale of labelarray [0:MaxDownresLevel+1]
  1091  	updateMu sync.RWMutex
  1092  
  1093  	mlMu sync.RWMutex // For atomic access of MaxLabel and MaxRepoLabel
  1094  
  1095  	// unpersisted data: channels for mutations
  1096  	mutateCh [numMutateHandlers]chan procMsg // channels into mutate (merge/split) ops.
  1097  }
  1098  
  1099  // GetMaxDownresLevel returns the number of down-res levels, where level 0 = high-resolution
  1100  // and each subsequent level has one-half the resolution.
  1101  func (d *Data) GetMaxDownresLevel() uint8 {
  1102  	return d.MaxDownresLevel
  1103  }
  1104  
  1105  func (d *Data) StartScaleUpdate(scale uint8) {
  1106  	d.updateMu.Lock()
  1107  	d.updates[scale]++
  1108  	d.updateMu.Unlock()
  1109  }
  1110  
  1111  func (d *Data) StopScaleUpdate(scale uint8) {
  1112  	d.updateMu.Lock()
  1113  	d.updates[scale]--
  1114  	if d.updates[scale] < 0 {
  1115  		dvid.Criticalf("StopScaleUpdate(%d) called more than StartScaleUpdate.", scale)
  1116  	}
  1117  	d.updateMu.Unlock()
  1118  }
  1119  
  1120  func (d *Data) ScaleUpdating(scale uint8) bool {
  1121  	d.updateMu.RLock()
  1122  	updating := d.updates[scale] > 0
  1123  	d.updateMu.RUnlock()
  1124  	return updating
  1125  }
  1126  
  1127  func (d *Data) AnyScaleUpdating() bool {
  1128  	d.updateMu.RLock()
  1129  	for scale := uint8(0); scale < d.MaxDownresLevel; scale++ {
  1130  		if d.updates[scale] > 0 {
  1131  			d.updateMu.RUnlock()
  1132  			return true
  1133  		}
  1134  	}
  1135  	d.updateMu.RUnlock()
  1136  	return false
  1137  }
  1138  
  1139  // CopyPropertiesFrom copies the data instance-specific properties from a given
  1140  // data instance into the receiver's properties. Fulfills the datastore.PropertyCopier interface.
  1141  func (d *Data) CopyPropertiesFrom(src datastore.DataService, fs storage.FilterSpec) error {
  1142  	d2, ok := src.(*Data)
  1143  	if !ok {
  1144  		return fmt.Errorf("unable to copy properties from non-labelarray data %q", src.DataName())
  1145  	}
  1146  
  1147  	// TODO -- Handle mutable data that could be potentially altered by filter.
  1148  	d.MaxLabel = make(map[dvid.VersionID]uint64, len(d2.MaxLabel))
  1149  	for k, v := range d2.MaxLabel {
  1150  		d.MaxLabel[k] = v
  1151  	}
  1152  	d.MaxRepoLabel = d2.MaxRepoLabel
  1153  
  1154  	d.IndexedLabels = d2.IndexedLabels
  1155  	d.CountLabels = d2.CountLabels
  1156  	d.MaxDownresLevel = d2.MaxDownresLevel
  1157  
  1158  	return d.Data.CopyPropertiesFrom(d2.Data, fs)
  1159  }
  1160  
  1161  // NewData returns a pointer to labelarray data.
  1162  func NewData(uuid dvid.UUID, id dvid.InstanceID, name dvid.InstanceName, c dvid.Config) (*Data, error) {
  1163  	if _, found := c.Get("BlockSize"); !found {
  1164  		c.Set("BlockSize", fmt.Sprintf("%d,%d,%d", DefaultBlockSize, DefaultBlockSize, DefaultBlockSize))
  1165  	}
  1166  	if _, found := c.Get("Compression"); !found {
  1167  		c.Set("Compression", "gzip")
  1168  	}
  1169  	imgblkData, err := dtype.Type.NewData(uuid, id, name, c)
  1170  	if err != nil {
  1171  		return nil, err
  1172  	}
  1173  
  1174  	data := &Data{
  1175  		Data: imgblkData,
  1176  	}
  1177  	indexedLabels := true
  1178  	b, found, err := c.GetBool("IndexedLabels")
  1179  	if err != nil {
  1180  		return nil, err
  1181  	}
  1182  	if found {
  1183  		indexedLabels = b
  1184  	}
  1185  
  1186  	countLabels := true
  1187  	b, found, err = c.GetBool("CountLabels")
  1188  	if err != nil {
  1189  		return nil, err
  1190  	}
  1191  	if found {
  1192  		countLabels = b
  1193  	}
  1194  
  1195  	var downresLevels uint8
  1196  	levels, found, err := c.GetInt("MaxDownresLevel")
  1197  	if err != nil {
  1198  		return nil, err
  1199  	}
  1200  	if found {
  1201  		if levels < 0 || levels > 255 {
  1202  			return nil, fmt.Errorf("illegal number of down-res levels specified (%d): must be 0 <= n <= 255", levels)
  1203  		}
  1204  		downresLevels = uint8(levels)
  1205  	}
  1206  	data.updates = make([]uint32, downresLevels+1)
  1207  
  1208  	data.MaxLabel = make(map[dvid.VersionID]uint64)
  1209  	data.IndexedLabels = indexedLabels
  1210  	data.CountLabels = countLabels
  1211  	data.MaxDownresLevel = downresLevels
  1212  
  1213  	data.Initialize()
  1214  	return data, nil
  1215  }
  1216  
  1217  type propsJSON struct {
  1218  	imageblk.Properties
  1219  	MaxLabel        map[dvid.VersionID]uint64
  1220  	MaxRepoLabel    uint64
  1221  	IndexedLabels   bool
  1222  	CountLabels     bool
  1223  	MaxDownresLevel uint8
  1224  }
  1225  
  1226  func (d *Data) MarshalJSON() ([]byte, error) {
  1227  	return json.Marshal(struct {
  1228  		Base     *datastore.Data
  1229  		Extended propsJSON
  1230  	}{
  1231  		d.Data.Data,
  1232  		propsJSON{
  1233  			Properties:      d.Data.Properties,
  1234  			MaxLabel:        d.MaxLabel,
  1235  			MaxRepoLabel:    d.MaxRepoLabel,
  1236  			IndexedLabels:   d.IndexedLabels,
  1237  			CountLabels:     d.CountLabels,
  1238  			MaxDownresLevel: d.MaxDownresLevel,
  1239  		},
  1240  	})
  1241  }
  1242  
  1243  func (d *Data) MarshalJSONExtents(ctx *datastore.VersionedCtx) ([]byte, error) {
  1244  	// grab extent property and load
  1245  	extents, err := d.GetExtents(ctx)
  1246  	if err != nil {
  1247  		return nil, err
  1248  	}
  1249  
  1250  	var extentsJSON imageblk.ExtentsJSON
  1251  	extentsJSON.MinPoint = extents.MinPoint
  1252  	extentsJSON.MaxPoint = extents.MaxPoint
  1253  
  1254  	props, err := d.PropertiesWithExtents(ctx)
  1255  	if err != nil {
  1256  		return nil, err
  1257  	}
  1258  	return json.Marshal(struct {
  1259  		Base     *datastore.Data
  1260  		Extended propsJSON
  1261  		Extents  imageblk.ExtentsJSON
  1262  	}{
  1263  		d.Data.Data,
  1264  		propsJSON{
  1265  			Properties:      props,
  1266  			MaxLabel:        d.MaxLabel,
  1267  			MaxRepoLabel:    d.MaxRepoLabel,
  1268  			IndexedLabels:   d.IndexedLabels,
  1269  			CountLabels:     d.CountLabels,
  1270  			MaxDownresLevel: d.MaxDownresLevel,
  1271  		},
  1272  		extentsJSON,
  1273  	})
  1274  }
  1275  
  1276  func (d *Data) GobDecode(b []byte) error {
  1277  	buf := bytes.NewBuffer(b)
  1278  	dec := gob.NewDecoder(buf)
  1279  	if err := dec.Decode(&(d.Data)); err != nil {
  1280  		return err
  1281  	}
  1282  	if err := dec.Decode(&(d.IndexedLabels)); err != nil {
  1283  		dvid.Errorf("Decoding labelarray %q: no IndexedLabels, setting to true", d.DataName())
  1284  		d.IndexedLabels = true
  1285  	}
  1286  	if err := dec.Decode(&(d.CountLabels)); err != nil {
  1287  		dvid.Errorf("Decoding labelarray %q: no CountLabels, setting to true", d.DataName())
  1288  		d.CountLabels = true
  1289  	}
  1290  	if err := dec.Decode(&(d.MaxDownresLevel)); err != nil {
  1291  		dvid.Errorf("Decoding labelarray %q: no MaxDownresLevel, setting to 7", d.DataName())
  1292  		d.MaxDownresLevel = 7
  1293  	}
  1294  	d.updates = make([]uint32, d.MaxDownresLevel+1)
  1295  	return nil
  1296  }
  1297  
  1298  func (d *Data) GobEncode() ([]byte, error) {
  1299  	var buf bytes.Buffer
  1300  	enc := gob.NewEncoder(&buf)
  1301  	if err := enc.Encode(d.Data); err != nil {
  1302  		return nil, err
  1303  	}
  1304  	if err := enc.Encode(d.IndexedLabels); err != nil {
  1305  		return nil, err
  1306  	}
  1307  	if err := enc.Encode(d.CountLabels); err != nil {
  1308  		return nil, err
  1309  	}
  1310  	if err := enc.Encode(d.MaxDownresLevel); err != nil {
  1311  		return nil, err
  1312  	}
  1313  	return buf.Bytes(), nil
  1314  }
  1315  
  1316  // makes database call for any update
  1317  func (d *Data) updateMaxLabel(v dvid.VersionID, label uint64) error {
  1318  	var changed bool
  1319  	d.mlMu.RLock()
  1320  	curMax, found := d.MaxLabel[v]
  1321  	d.mlMu.RUnlock()
  1322  	if !found || curMax < label {
  1323  		changed = true
  1324  	}
  1325  	if changed {
  1326  		d.mlMu.Lock()
  1327  		d.MaxLabel[v] = label
  1328  		if err := d.persistMaxLabel(v); err != nil {
  1329  			return fmt.Errorf("updateMaxLabel of data %q: %v\n", d.DataName(), err)
  1330  		}
  1331  		if label > d.MaxRepoLabel {
  1332  			d.MaxRepoLabel = label
  1333  			if err := d.persistMaxRepoLabel(); err != nil {
  1334  				return fmt.Errorf("updateMaxLabel of data %q: %v\n", d.DataName(), err)
  1335  			}
  1336  		}
  1337  		d.mlMu.Unlock()
  1338  	}
  1339  	return nil
  1340  }
  1341  
  1342  // makes database call for any update
  1343  func (d *Data) updateBlockMaxLabel(v dvid.VersionID, block *labels.Block) {
  1344  	var changed bool
  1345  	d.mlMu.RLock()
  1346  	curMax, found := d.MaxLabel[v]
  1347  	d.mlMu.RUnlock()
  1348  	if !found {
  1349  		curMax = 0
  1350  	}
  1351  	for _, label := range block.Labels {
  1352  		if label > curMax {
  1353  			curMax = label
  1354  			changed = true
  1355  		}
  1356  	}
  1357  	if changed {
  1358  		d.mlMu.Lock()
  1359  		d.MaxLabel[v] = curMax
  1360  		if err := d.persistMaxLabel(v); err != nil {
  1361  			dvid.Errorf("updateBlockMaxLabel of data %q: %v\n", d.DataName(), err)
  1362  		}
  1363  		if curMax > d.MaxRepoLabel {
  1364  			d.MaxRepoLabel = curMax
  1365  			if err := d.persistMaxRepoLabel(); err != nil {
  1366  				dvid.Errorf("updateBlockMaxLabel of data %q: %v\n", d.DataName(), err)
  1367  			}
  1368  		}
  1369  		d.mlMu.Unlock()
  1370  	}
  1371  }
  1372  
  1373  func (d *Data) Equals(d2 *Data) bool {
  1374  	if !d.Data.Equals(d2.Data) {
  1375  		return false
  1376  	}
  1377  	return true
  1378  }
  1379  
  1380  func (d *Data) persistMaxLabel(v dvid.VersionID) error {
  1381  	store, err := datastore.GetOrderedKeyValueDB(d)
  1382  	if err != nil {
  1383  		return err
  1384  	}
  1385  	if len(d.MaxLabel) == 0 {
  1386  		return fmt.Errorf("bad attempt to save non-existent max label for version %d\n", v)
  1387  	}
  1388  	buf := make([]byte, 8)
  1389  	binary.LittleEndian.PutUint64(buf, d.MaxLabel[v])
  1390  	ctx := datastore.NewVersionedCtx(d, v)
  1391  	return store.Put(ctx, maxLabelTKey, buf)
  1392  }
  1393  
  1394  func (d *Data) persistMaxRepoLabel() error {
  1395  	store, err := datastore.GetOrderedKeyValueDB(d)
  1396  	if err != nil {
  1397  		return err
  1398  	}
  1399  	buf := make([]byte, 8)
  1400  	binary.LittleEndian.PutUint64(buf, d.MaxRepoLabel)
  1401  	ctx := storage.NewDataContext(d, 0)
  1402  	return store.Put(ctx, maxRepoLabelTKey, buf)
  1403  }
  1404  
  1405  // NewLabel returns a new label for the given version.
  1406  func (d *Data) NewLabel(v dvid.VersionID) (uint64, error) {
  1407  	d.mlMu.Lock()
  1408  	defer d.mlMu.Unlock()
  1409  
  1410  	// Make sure we aren't trying to increment a label on a locked node.
  1411  	locked, err := datastore.LockedVersion(v)
  1412  	if err != nil {
  1413  		return 0, err
  1414  	}
  1415  	if locked {
  1416  		return 0, fmt.Errorf("can't ask for new label in a locked version id %d", v)
  1417  	}
  1418  
  1419  	// Increment and store.
  1420  	d.MaxRepoLabel++
  1421  	d.MaxLabel[v] = d.MaxRepoLabel
  1422  	if err := d.persistMaxLabel(v); err != nil {
  1423  		return d.MaxRepoLabel, err
  1424  	}
  1425  	if err := d.persistMaxRepoLabel(); err != nil {
  1426  		return d.MaxRepoLabel, err
  1427  	}
  1428  	return d.MaxRepoLabel, nil
  1429  }
  1430  
  1431  // --- datastore.InstanceMutator interface -----
  1432  
  1433  // LoadMutable loads mutable properties of label volumes like the maximum labels
  1434  // for each version.  Note that we load these max labels from key-value pairs
  1435  // rather than data instance properties persistence, because in the case of a crash,
  1436  // the actually stored repo data structure may be out-of-date compared to the guaranteed
  1437  // up-to-date key-value pairs for max labels.
  1438  func (d *Data) LoadMutable(root dvid.VersionID, storedVersion, expectedVersion uint64) (bool, error) {
  1439  	ctx := storage.NewDataContext(d, 0)
  1440  	store, err := datastore.GetOrderedKeyValueDB(d)
  1441  	if err != nil {
  1442  		return false, fmt.Errorf("Data type labelarray had error initializing store: %v\n", err)
  1443  	}
  1444  
  1445  	wg := new(sync.WaitGroup)
  1446  	wg.Add(1)
  1447  	ch := make(chan *storage.KeyValue)
  1448  
  1449  	// Start appropriate migration function if any.
  1450  	var saveRequired bool
  1451  
  1452  	switch storedVersion {
  1453  	case 0:
  1454  		// Need to update all max labels and set repo-level max label.
  1455  		saveRequired = true
  1456  		dvid.Infof("Migrating old version of labelarray %q to new version\n", d.DataName())
  1457  		go d.migrateMaxLabels(root, wg, ch)
  1458  	default:
  1459  		// Load in each version max label without migration.
  1460  		go d.loadMaxLabels(wg, ch)
  1461  	}
  1462  
  1463  	// Send the max label data per version
  1464  	minKey, err := ctx.MinVersionKey(maxLabelTKey)
  1465  	if err != nil {
  1466  		return false, err
  1467  	}
  1468  	maxKey, err := ctx.MaxVersionKey(maxLabelTKey)
  1469  	if err != nil {
  1470  		return false, err
  1471  	}
  1472  	keysOnly := false
  1473  	if err = store.RawRangeQuery(minKey, maxKey, keysOnly, ch, nil); err != nil {
  1474  		return false, err
  1475  	}
  1476  	wg.Wait()
  1477  
  1478  	dvid.Infof("Loaded max label values for labelarray %q with repo-wide max %d\n", d.DataName(), d.MaxRepoLabel)
  1479  	return saveRequired, nil
  1480  }
  1481  
  1482  const veryLargeLabel = 10000000000 // 10 billion
  1483  
  1484  func (d *Data) migrateMaxLabels(root dvid.VersionID, wg *sync.WaitGroup, ch chan *storage.KeyValue) {
  1485  	ctx := storage.NewDataContext(d, 0)
  1486  	store, err := datastore.GetOrderedKeyValueDB(d)
  1487  	if err != nil {
  1488  		dvid.Errorf("Can't initialize store for labelarray %q: %v\n", d.DataName(), err)
  1489  	}
  1490  
  1491  	var maxRepoLabel uint64
  1492  	d.MaxLabel = make(map[dvid.VersionID]uint64)
  1493  	for {
  1494  		kv := <-ch
  1495  		if kv == nil {
  1496  			break
  1497  		}
  1498  		v, err := ctx.VersionFromKey(kv.K)
  1499  		if err != nil {
  1500  			dvid.Errorf("Can't decode key when loading mutable data for %s", d.DataName())
  1501  			continue
  1502  		}
  1503  		if len(kv.V) != 8 {
  1504  			dvid.Errorf("Got bad value.  Expected 64-bit label, got %v", kv.V)
  1505  			continue
  1506  		}
  1507  		label := binary.LittleEndian.Uint64(kv.V)
  1508  		d.MaxLabel[v] = label
  1509  		if label > maxRepoLabel {
  1510  			maxRepoLabel = label
  1511  		}
  1512  	}
  1513  
  1514  	// Adjust the MaxLabel data to make sure we correct for any case of child max < parent max.
  1515  	d.adjustMaxLabels(store, root)
  1516  
  1517  	// Set the repo-wide max label.
  1518  	d.MaxRepoLabel = maxRepoLabel
  1519  
  1520  	buf := make([]byte, 8)
  1521  	binary.LittleEndian.PutUint64(buf, maxRepoLabel)
  1522  	store.Put(ctx, maxRepoLabelTKey, buf)
  1523  
  1524  	wg.Done()
  1525  	return
  1526  }
  1527  
  1528  func (d *Data) adjustMaxLabels(store storage.KeyValueSetter, root dvid.VersionID) error {
  1529  	buf := make([]byte, 8)
  1530  
  1531  	parentMax, ok := d.MaxLabel[root]
  1532  	if !ok {
  1533  		return fmt.Errorf("can't adjust version id %d since none exists in metadata", root)
  1534  	}
  1535  	childIDs, err := datastore.GetChildrenByVersion(root)
  1536  	if err != nil {
  1537  		return err
  1538  	}
  1539  	for _, childID := range childIDs {
  1540  		var save bool
  1541  		childMax, ok := d.MaxLabel[childID]
  1542  		if !ok {
  1543  			// set to parent max
  1544  			d.MaxLabel[childID] = parentMax
  1545  			save = true
  1546  		} else if childMax < parentMax {
  1547  			d.MaxLabel[childID] = parentMax + childMax + 1
  1548  			save = true
  1549  		}
  1550  
  1551  		// save the key-value
  1552  		if save {
  1553  			binary.LittleEndian.PutUint64(buf, d.MaxLabel[childID])
  1554  			ctx := datastore.NewVersionedCtx(d, childID)
  1555  			store.Put(ctx, maxLabelTKey, buf)
  1556  		}
  1557  
  1558  		// recurse for depth-first
  1559  		if err := d.adjustMaxLabels(store, childID); err != nil {
  1560  			return err
  1561  		}
  1562  	}
  1563  	return nil
  1564  }
  1565  
  1566  func (d *Data) loadMaxLabels(wg *sync.WaitGroup, ch chan *storage.KeyValue) {
  1567  	ctx := storage.NewDataContext(d, 0)
  1568  	var repoMax uint64
  1569  	d.MaxLabel = make(map[dvid.VersionID]uint64)
  1570  	for {
  1571  		kv := <-ch
  1572  		if kv == nil {
  1573  			break
  1574  		}
  1575  		v, err := ctx.VersionFromKey(kv.K)
  1576  		if err != nil {
  1577  			dvid.Errorf("Can't decode key when loading mutable data for %s", d.DataName())
  1578  			continue
  1579  		}
  1580  		var label uint64 = veryLargeLabel
  1581  		if len(kv.V) != 8 {
  1582  			dvid.Errorf("Got bad value.  Expected 64-bit label, got %v", kv.V)
  1583  		} else {
  1584  			label = binary.LittleEndian.Uint64(kv.V)
  1585  		}
  1586  		d.MaxLabel[v] = label
  1587  		if label > repoMax {
  1588  			repoMax = label
  1589  		}
  1590  	}
  1591  
  1592  	// Load in the repo-wide max label.
  1593  	store, err := datastore.GetOrderedKeyValueDB(d)
  1594  	if err != nil {
  1595  		dvid.Errorf("Data type labelarray had error initializing store: %v\n", err)
  1596  		return
  1597  	}
  1598  	data, err := store.Get(ctx, maxRepoLabelTKey)
  1599  	if err != nil {
  1600  		dvid.Errorf("Error getting repo-wide max label: %v\n", err)
  1601  		return
  1602  	}
  1603  	if data == nil || len(data) != 8 {
  1604  		dvid.Errorf("Could not load repo-wide max label for instance %q.  Only got %d bytes, not 64-bit label.\n", d.DataName(), len(data))
  1605  		if repoMax == 0 {
  1606  			repoMax = veryLargeLabel
  1607  		}
  1608  		dvid.Errorf("Using max label across versions: %d\n", repoMax)
  1609  		d.MaxRepoLabel = repoMax
  1610  	} else {
  1611  		d.MaxRepoLabel = binary.LittleEndian.Uint64(data)
  1612  		if d.MaxRepoLabel < repoMax {
  1613  			dvid.Errorf("Saved repo-wide max for instance %q was %d, changed to largest version max %d\n", d.DataName(), d.MaxRepoLabel, repoMax)
  1614  			d.MaxRepoLabel = repoMax
  1615  		}
  1616  	}
  1617  	wg.Done()
  1618  }
  1619  
  1620  // --- imageblk.IntData interface -------------
  1621  
  1622  func (d *Data) BlockSize() dvid.Point {
  1623  	return d.Properties.BlockSize
  1624  }
  1625  
  1626  func (d *Data) Extents() *dvid.Extents {
  1627  	return &(d.Properties.Extents)
  1628  }
  1629  
  1630  // NewLabels returns labelarray Labels, a representation of externally usable subvolume
  1631  // or slice data, given some geometry and optional image data.
  1632  // If img is passed in, the function will initialize Voxels with data from the image.
  1633  // Otherwise, it will allocate a zero buffer of appropriate size.
  1634  func (d *Data) NewLabels(geom dvid.Geometry, img interface{}) (*Labels, error) {
  1635  	bytesPerVoxel := d.Properties.Values.BytesPerElement()
  1636  	stride := geom.Size().Value(0) * bytesPerVoxel
  1637  	var data []byte
  1638  
  1639  	if img == nil {
  1640  		numVoxels := geom.NumVoxels()
  1641  		if numVoxels <= 0 {
  1642  			return nil, fmt.Errorf("Illegal geometry requested: %s", geom)
  1643  		}
  1644  		requestSize := int64(bytesPerVoxel) * numVoxels
  1645  		if requestSize > server.MaxDataRequest {
  1646  			return nil, fmt.Errorf("Requested payload (%d bytes) exceeds this DVID server's set limit (%d)",
  1647  				requestSize, server.MaxDataRequest)
  1648  		}
  1649  		data = make([]byte, requestSize)
  1650  	} else {
  1651  		switch t := img.(type) {
  1652  		case image.Image:
  1653  			var inputBytesPerVoxel, actualStride int32
  1654  			var err error
  1655  			data, inputBytesPerVoxel, actualStride, err = dvid.ImageData(t)
  1656  			if err != nil {
  1657  				return nil, err
  1658  			}
  1659  			if actualStride != stride {
  1660  				data, err = d.convertTo64bit(geom, data, int(inputBytesPerVoxel), int(actualStride))
  1661  				if err != nil {
  1662  					return nil, err
  1663  				}
  1664  			}
  1665  		case []byte:
  1666  			data = t
  1667  			actualLen := int64(len(data))
  1668  			expectedLen := int64(bytesPerVoxel) * geom.NumVoxels()
  1669  			if actualLen != expectedLen {
  1670  				return nil, fmt.Errorf("labels data was %d bytes, expected %d bytes for %s",
  1671  					actualLen, expectedLen, geom)
  1672  			}
  1673  		default:
  1674  			return nil, fmt.Errorf("unexpected image type given to NewVoxels(): %T", t)
  1675  		}
  1676  	}
  1677  
  1678  	labels := &Labels{
  1679  		imageblk.NewVoxels(geom, d.Properties.Values, data, stride),
  1680  	}
  1681  	return labels, nil
  1682  }
  1683  
  1684  // Convert raw image data into a 2d array of 64-bit labels
  1685  func (d *Data) convertTo64bit(geom dvid.Geometry, data []uint8, bytesPerVoxel, stride int) ([]byte, error) {
  1686  	nx := int(geom.Size().Value(0))
  1687  	ny := int(geom.Size().Value(1))
  1688  	numBytes := nx * ny * 8
  1689  	data64 := make([]byte, numBytes, numBytes)
  1690  
  1691  	var byteOrder binary.ByteOrder
  1692  	if geom.DataShape().ShapeDimensions() == 2 {
  1693  		byteOrder = binary.BigEndian // This is the default for PNG
  1694  	} else {
  1695  		byteOrder = binary.LittleEndian
  1696  	}
  1697  
  1698  	switch bytesPerVoxel {
  1699  	case 1:
  1700  		dstI := 0
  1701  		for y := 0; y < ny; y++ {
  1702  			srcI := y * stride
  1703  			for x := 0; x < nx; x++ {
  1704  				binary.LittleEndian.PutUint64(data64[dstI:dstI+8], uint64(data[srcI]))
  1705  				srcI++
  1706  				dstI += 8
  1707  			}
  1708  		}
  1709  	case 2:
  1710  		dstI := 0
  1711  		for y := 0; y < ny; y++ {
  1712  			srcI := y * stride
  1713  			for x := 0; x < nx; x++ {
  1714  				value := byteOrder.Uint16(data[srcI : srcI+2])
  1715  				binary.LittleEndian.PutUint64(data64[dstI:dstI+8], uint64(value))
  1716  				srcI += 2
  1717  				dstI += 8
  1718  			}
  1719  		}
  1720  	case 4:
  1721  		dstI := 0
  1722  		for y := 0; y < ny; y++ {
  1723  			srcI := y * stride
  1724  			for x := 0; x < nx; x++ {
  1725  				value := byteOrder.Uint32(data[srcI : srcI+4])
  1726  				binary.LittleEndian.PutUint64(data64[dstI:dstI+8], uint64(value))
  1727  				srcI += 4
  1728  				dstI += 8
  1729  			}
  1730  		}
  1731  	case 8:
  1732  		dstI := 0
  1733  		for y := 0; y < ny; y++ {
  1734  			srcI := y * stride
  1735  			for x := 0; x < nx; x++ {
  1736  				value := byteOrder.Uint64(data[srcI : srcI+8])
  1737  				binary.LittleEndian.PutUint64(data64[dstI:dstI+8], uint64(value))
  1738  				srcI += 8
  1739  				dstI += 8
  1740  			}
  1741  		}
  1742  	default:
  1743  		return nil, fmt.Errorf("could not convert to 64-bit label given %d bytes/voxel", bytesPerVoxel)
  1744  	}
  1745  	return data64, nil
  1746  }
  1747  
  1748  // sendBlocksSpecific writes data to the blocks specified -- best for non-ordered backend
  1749  func (d *Data) sendBlocksSpecific(ctx *datastore.VersionedCtx, w http.ResponseWriter, blockstring string, scale uint8) (numBlocks int, err error) {
  1750  	w.Header().Set("Content-type", "application/octet-stream")
  1751  	// extract querey string
  1752  	if blockstring == "" {
  1753  		return
  1754  	}
  1755  	coordarray := strings.Split(blockstring, ",")
  1756  	if len(coordarray)%3 != 0 {
  1757  		err = fmt.Errorf("block query string should be three coordinates per block")
  1758  		return
  1759  	}
  1760  	numBlocks = len(coordarray) / 3
  1761  
  1762  	// make a finished queue
  1763  	finishedRequests := make(chan error, len(coordarray)/3)
  1764  	var mutex sync.Mutex
  1765  
  1766  	// get store
  1767  	var store storage.KeyValueDB
  1768  	store, err = datastore.GetKeyValueDB(d)
  1769  	if err != nil {
  1770  		return
  1771  	}
  1772  
  1773  	// iterate through each block and query
  1774  	for i := 0; i < len(coordarray); i += 3 {
  1775  		var xloc, yloc, zloc int
  1776  		xloc, err = strconv.Atoi(coordarray[i])
  1777  		if err != nil {
  1778  			return
  1779  		}
  1780  		yloc, err = strconv.Atoi(coordarray[i+1])
  1781  		if err != nil {
  1782  			return
  1783  		}
  1784  		zloc, err = strconv.Atoi(coordarray[i+2])
  1785  		if err != nil {
  1786  			return
  1787  		}
  1788  
  1789  		go func(xloc, yloc, zloc int32, finishedRequests chan error, store storage.KeyValueDB) {
  1790  			var err error
  1791  			defer func() {
  1792  				finishedRequests <- err
  1793  			}()
  1794  			indexBeg := dvid.IndexZYX(dvid.ChunkPoint3d{xloc, yloc, zloc})
  1795  			keyBeg := NewBlockTKey(scale, &indexBeg)
  1796  
  1797  			value, err := store.Get(ctx, keyBeg)
  1798  			if err != nil {
  1799  				return
  1800  			}
  1801  			if len(value) > 0 {
  1802  				// lock shared resource
  1803  				mutex.Lock()
  1804  				d.SendSerializedBlock(w, xloc, yloc, zloc, value, "")
  1805  				mutex.Unlock()
  1806  			}
  1807  		}(int32(xloc), int32(yloc), int32(zloc), finishedRequests, store)
  1808  	}
  1809  
  1810  	// wait for everything to finish
  1811  	for i := 0; i < len(coordarray); i += 3 {
  1812  		errjob := <-finishedRequests
  1813  		if errjob != nil {
  1814  			err = errjob
  1815  		}
  1816  	}
  1817  	return
  1818  }
  1819  
  1820  // returns nil block if no block is at the given block coordinate
  1821  func (d *Data) getLabelBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.PositionedBlock, error) {
  1822  	store, err := datastore.GetKeyValueDB(d)
  1823  	if err != nil {
  1824  		return nil, fmt.Errorf("labelarray getLabelBlock() had error initializing store: %v\n", err)
  1825  	}
  1826  	tk := NewBlockTKeyByCoord(scale, bcoord)
  1827  	val, err := store.Get(ctx, tk)
  1828  	if err != nil {
  1829  		return nil, fmt.Errorf("Error on GET of labelarray %q label block @ %s\n", d.DataName(), bcoord)
  1830  	}
  1831  	if val == nil {
  1832  		return nil, nil
  1833  	}
  1834  	data, _, err := dvid.DeserializeData(val, true)
  1835  	if err != nil {
  1836  		return nil, fmt.Errorf("unable to deserialize label block in %q: %v\n", d.DataName(), err)
  1837  	}
  1838  	var block labels.Block
  1839  	if err := block.UnmarshalBinary(data); err != nil {
  1840  		return nil, err
  1841  	}
  1842  	return &labels.PositionedBlock{block, bcoord}, nil
  1843  }
  1844  
  1845  func (d *Data) putLabelBlock(ctx *datastore.VersionedCtx, scale uint8, pb *labels.PositionedBlock) error {
  1846  	store, err := datastore.GetKeyValueDB(d)
  1847  	if err != nil {
  1848  		return fmt.Errorf("labelarray putLabelBlock() had error initializing store: %v\n", err)
  1849  	}
  1850  	tk := NewBlockTKeyByCoord(scale, pb.BCoord)
  1851  
  1852  	data, err := pb.MarshalBinary()
  1853  	if err != nil {
  1854  		return err
  1855  	}
  1856  
  1857  	val, err := dvid.SerializeData(data, d.Compression(), d.Checksum())
  1858  	if err != nil {
  1859  		return fmt.Errorf("Unable to serialize block %s in %q: %v\n", pb.BCoord, d.DataName(), err)
  1860  	}
  1861  	return store.Put(ctx, tk, val)
  1862  }
  1863  
  1864  func (d *Data) sendBlock(w http.ResponseWriter, x, y, z int32, v []byte, compression string) error {
  1865  	formatIn, checksum := dvid.DecodeSerializationFormat(dvid.SerializationFormat(v[0]))
  1866  
  1867  	var start int
  1868  	if checksum == dvid.CRC32 {
  1869  		start = 5
  1870  	} else {
  1871  		start = 1
  1872  	}
  1873  
  1874  	var outsize uint32
  1875  	var out []byte
  1876  
  1877  	switch formatIn {
  1878  	case dvid.LZ4:
  1879  		outsize = binary.LittleEndian.Uint32(v[start : start+4])
  1880  		out = v[start+4:]
  1881  		if len(out) != int(outsize) {
  1882  			return fmt.Errorf("block (%d,%d,%d) was corrupted lz4: supposed size %d but had %d bytes", x, y, z, outsize, len(out))
  1883  		}
  1884  	case dvid.Uncompressed, dvid.Gzip:
  1885  		outsize = uint32(len(v[start:]))
  1886  		out = v[start:]
  1887  	default:
  1888  		return fmt.Errorf("labelarray data was stored in unknown compressed format: %s\n", formatIn)
  1889  	}
  1890  
  1891  	var formatOut dvid.CompressionFormat
  1892  	switch compression {
  1893  	case "", "lz4":
  1894  		formatOut = dvid.LZ4
  1895  	case "blocks":
  1896  		formatOut = formatIn
  1897  	case "gzip":
  1898  		formatOut = dvid.Gzip
  1899  	case "uncompressed":
  1900  		formatOut = dvid.Uncompressed
  1901  	default:
  1902  		return fmt.Errorf("unknown compression %q requested for blocks", compression)
  1903  	}
  1904  
  1905  	// Need to do uncompression/recompression if we are changing compression
  1906  	var err error
  1907  	var uncompressed, recompressed []byte
  1908  	if formatIn != formatOut || compression == "gzip" {
  1909  		switch formatIn {
  1910  		case dvid.LZ4:
  1911  			uncompressed = make([]byte, outsize)
  1912  			if err := lz4.Uncompress(out, uncompressed); err != nil {
  1913  				return err
  1914  			}
  1915  		case dvid.Uncompressed:
  1916  			uncompressed = out
  1917  		case dvid.Gzip:
  1918  			gzipIn := bytes.NewBuffer(out)
  1919  			zr, err := gzip.NewReader(gzipIn)
  1920  			if err != nil {
  1921  				return err
  1922  			}
  1923  			uncompressed, err = ioutil.ReadAll(zr)
  1924  			if err != nil {
  1925  				return err
  1926  			}
  1927  			zr.Close()
  1928  		}
  1929  
  1930  		var block labels.Block
  1931  		if err = block.UnmarshalBinary(uncompressed); err != nil {
  1932  			return fmt.Errorf("unable to deserialize label block (%d, %d, %d): %v\n", x, y, z, err)
  1933  		}
  1934  		uint64array, size := block.MakeLabelVolume()
  1935  		expectedSize := d.BlockSize().(dvid.Point3d)
  1936  		if !size.Equals(expectedSize) {
  1937  			return fmt.Errorf("deserialized label block size %s does not equal data %q block size %s", size, d.DataName(), expectedSize)
  1938  		}
  1939  
  1940  		switch formatOut {
  1941  		case dvid.LZ4:
  1942  			recompressed = make([]byte, lz4.CompressBound(uint64array))
  1943  			var size int
  1944  			if size, err = lz4.Compress(uint64array, recompressed); err != nil {
  1945  				return err
  1946  			}
  1947  			outsize = uint32(size)
  1948  			out = recompressed[:outsize]
  1949  		case dvid.Uncompressed:
  1950  			out = uint64array
  1951  			outsize = uint32(len(uint64array))
  1952  		case dvid.Gzip:
  1953  			var gzipOut bytes.Buffer
  1954  			zw := gzip.NewWriter(&gzipOut)
  1955  			if _, err = zw.Write(uint64array); err != nil {
  1956  				return err
  1957  			}
  1958  			zw.Flush()
  1959  			zw.Close()
  1960  			out = gzipOut.Bytes()
  1961  			outsize = uint32(len(out))
  1962  		}
  1963  	}
  1964  
  1965  	// Send block coordinate, size of data, then data
  1966  	if err := binary.Write(w, binary.LittleEndian, x); err != nil {
  1967  		return err
  1968  	}
  1969  	if err := binary.Write(w, binary.LittleEndian, y); err != nil {
  1970  		return err
  1971  	}
  1972  	if err := binary.Write(w, binary.LittleEndian, z); err != nil {
  1973  		return err
  1974  	}
  1975  	if err := binary.Write(w, binary.LittleEndian, outsize); err != nil {
  1976  		return err
  1977  	}
  1978  	if written, err := w.Write(out); err != nil || written != int(outsize) {
  1979  		if err != nil {
  1980  			return err
  1981  		}
  1982  		return fmt.Errorf("could not write %d bytes of block (%d,%d,%d): only %d bytes written", outsize, x, y, z, written)
  1983  	}
  1984  	return nil
  1985  }
  1986  
  1987  // SendBlocks returns a series of blocks covering the given block-aligned subvolume.
  1988  func (d *Data) SendBlocks(ctx *datastore.VersionedCtx, w http.ResponseWriter, scale uint8, subvol *dvid.Subvolume, compression string) error {
  1989  	w.Header().Set("Content-type", "application/octet-stream")
  1990  
  1991  	switch compression {
  1992  	case "", "lz4", "gzip", "blocks", "uncompressed":
  1993  	default:
  1994  		return fmt.Errorf(`compression must be "lz4" (default), "gzip", "blocks" or "uncompressed"`)
  1995  	}
  1996  
  1997  	// convert x,y,z coordinates to block coordinates for this scale
  1998  	blocksdims := subvol.Size().Div(d.BlockSize())
  1999  	blocksoff := subvol.StartPoint().Div(d.BlockSize())
  2000  
  2001  	timedLog := dvid.NewTimeLog()
  2002  	defer timedLog.Infof("SendBlocks %s, span x %d, span y %d, span z %d", blocksoff, blocksdims.Value(0), blocksdims.Value(1), blocksdims.Value(2))
  2003  
  2004  	store, err := datastore.GetOrderedKeyValueDB(d)
  2005  	if err != nil {
  2006  		return fmt.Errorf("Data type labelarray had error initializing store: %v\n", err)
  2007  	}
  2008  
  2009  	// only do one request at a time, although each request can start many goroutines.
  2010  	if subvol.NumVoxels() > 256*256*256 {
  2011  		server.LargeMutationMutex.Lock()
  2012  		defer server.LargeMutationMutex.Unlock()
  2013  	}
  2014  
  2015  	okv := store.(storage.BufferableOps)
  2016  	// extract buffer interface
  2017  	req, hasbuffer := okv.(storage.KeyValueRequester)
  2018  	if hasbuffer {
  2019  		okv = req.NewBuffer(ctx)
  2020  	}
  2021  
  2022  	for ziter := int32(0); ziter < blocksdims.Value(2); ziter++ {
  2023  		for yiter := int32(0); yiter < blocksdims.Value(1); yiter++ {
  2024  			beginPoint := dvid.ChunkPoint3d{blocksoff.Value(0), blocksoff.Value(1) + yiter, blocksoff.Value(2) + ziter}
  2025  			endPoint := dvid.ChunkPoint3d{blocksoff.Value(0) + blocksdims.Value(0) - 1, blocksoff.Value(1) + yiter, blocksoff.Value(2) + ziter}
  2026  
  2027  			indexBeg := dvid.IndexZYX(beginPoint)
  2028  			sx, sy, sz := indexBeg.Unpack()
  2029  			begTKey := NewBlockTKey(scale, &indexBeg)
  2030  			indexEnd := dvid.IndexZYX(endPoint)
  2031  			endTKey := NewBlockTKey(scale, &indexEnd)
  2032  
  2033  			// Send the entire range of key-value pairs to chunk processor
  2034  			err = okv.ProcessRange(ctx, begTKey, endTKey, &storage.ChunkOp{}, func(c *storage.Chunk) error {
  2035  				if c == nil || c.TKeyValue == nil {
  2036  					return nil
  2037  				}
  2038  				kv := c.TKeyValue
  2039  				if kv.V == nil {
  2040  					return nil
  2041  				}
  2042  
  2043  				// Determine which block this is.
  2044  				_, indexZYX, err := DecodeBlockTKey(kv.K)
  2045  				if err != nil {
  2046  					return err
  2047  				}
  2048  				x, y, z := indexZYX.Unpack()
  2049  				if z != sz || y != sy || x < sx || x >= sx+int32(blocksdims.Value(0)) {
  2050  					return nil
  2051  				}
  2052  				if err := d.sendBlock(w, x, y, z, kv.V, compression); err != nil {
  2053  					return err
  2054  				}
  2055  				return nil
  2056  			})
  2057  
  2058  			if err != nil {
  2059  				return fmt.Errorf("Unable to GET data %s: %v", ctx, err)
  2060  			}
  2061  		}
  2062  	}
  2063  
  2064  	if hasbuffer {
  2065  		// submit the entire buffer to the DB
  2066  		err = okv.(storage.RequestBuffer).Flush()
  2067  
  2068  		if err != nil {
  2069  			return fmt.Errorf("Unable to GET data %s: %v", ctx, err)
  2070  
  2071  		}
  2072  	}
  2073  
  2074  	return err
  2075  }
  2076  
  2077  func (d *Data) blockChangesExtents(extents *dvid.Extents, bx, by, bz int32) bool {
  2078  	blockSize := d.BlockSize().(dvid.Point3d)
  2079  	start := dvid.Point3d{bx * blockSize[0], by * blockSize[1], bz * blockSize[2]}
  2080  	end := dvid.Point3d{start[0] + blockSize[0] - 1, start[1] + blockSize[1] - 1, start[2] + blockSize[2] - 1}
  2081  	return extents.AdjustPoints(start, end)
  2082  }
  2083  
  2084  // ReceiveBlocks stores a slice of bytes corresponding to specified blocks
  2085  func (d *Data) ReceiveBlocks(ctx *datastore.VersionedCtx, r io.ReadCloser, scale uint8, downscale bool, compression string) error {
  2086  	if downscale && scale != 0 {
  2087  		return fmt.Errorf("cannot downscale blocks of scale > 0")
  2088  	}
  2089  
  2090  	switch compression {
  2091  	case "", "blocks":
  2092  	default:
  2093  		return fmt.Errorf(`compression must be "blocks" (default) at this time`)
  2094  	}
  2095  
  2096  	timedLog := dvid.NewTimeLog()
  2097  	store, err := datastore.GetOrderedKeyValueDB(d)
  2098  	if err != nil {
  2099  		return fmt.Errorf("Data type labelarray had error initializing store: %v\n", err)
  2100  	}
  2101  
  2102  	// extract buffer interface if it exists
  2103  	var putbuffer storage.RequestBuffer
  2104  	if req, ok := store.(storage.KeyValueRequester); ok {
  2105  		putbuffer = req.NewBuffer(ctx)
  2106  	}
  2107  
  2108  	mutID := d.NewMutationID()
  2109  	var downresMut *downres.Mutation
  2110  	if downscale {
  2111  		downresMut = downres.NewMutation(d, ctx.VersionID(), mutID)
  2112  	}
  2113  
  2114  	blockCh := make(chan blockChange, 100)
  2115  	go d.aggregateBlockChanges(ctx.VersionID(), blockCh)
  2116  	var wg sync.WaitGroup
  2117  
  2118  	callback := func(bcoord dvid.IZYXString, block *labels.Block, ready chan error) {
  2119  		if ready != nil {
  2120  			if resperr := <-ready; resperr != nil {
  2121  				dvid.Errorf("Unable to PUT voxel data for block %v: %v\n", bcoord, resperr)
  2122  				return
  2123  			}
  2124  		}
  2125  		event := labels.IngestBlockEvent
  2126  		ingestBlock := IngestedBlock{mutID, bcoord, block}
  2127  		if scale == 0 {
  2128  			d.handleBlockIndexing(ctx.VersionID(), blockCh, ingestBlock)
  2129  		}
  2130  		if downscale {
  2131  			if err := downresMut.BlockMutated(bcoord, block); err != nil {
  2132  				dvid.Errorf("data %q publishing downres: %v\n", d.DataName(), err)
  2133  			}
  2134  		}
  2135  
  2136  		evt := datastore.SyncEvent{d.DataUUID(), event}
  2137  		msg := datastore.SyncMessage{event, ctx.VersionID(), ingestBlock}
  2138  		if err := datastore.NotifySubscribers(evt, msg); err != nil {
  2139  			dvid.Errorf("Unable to notify subscribers of event %s in %s\n", event, d.DataName())
  2140  		}
  2141  
  2142  		wg.Done()
  2143  	}
  2144  
  2145  	if d.Compression().Format() != dvid.Gzip {
  2146  		return fmt.Errorf("labelarray %q cannot accept GZIP /blocks POST since it internally uses %s", d.DataName(), d.Compression().Format())
  2147  	}
  2148  	var extentsChanged bool
  2149  	extents, err := d.GetExtents(ctx)
  2150  	if err != nil {
  2151  		return err
  2152  	}
  2153  	var numBlocks, pos int
  2154  	hdrBytes := make([]byte, 16)
  2155  	for {
  2156  		n, readErr := io.ReadFull(r, hdrBytes)
  2157  		if n != 0 {
  2158  			pos += n
  2159  			if n != 16 {
  2160  				return fmt.Errorf("error reading header bytes at byte %d: %v", pos, err)
  2161  			}
  2162  			bx := int32(binary.LittleEndian.Uint32(hdrBytes[0:4]))
  2163  			by := int32(binary.LittleEndian.Uint32(hdrBytes[4:8]))
  2164  			bz := int32(binary.LittleEndian.Uint32(hdrBytes[8:12]))
  2165  			numBytes := int(binary.LittleEndian.Uint32(hdrBytes[12:16]))
  2166  			bcoord := dvid.ChunkPoint3d{bx, by, bz}.ToIZYXString()
  2167  			tk := NewBlockTKeyByCoord(scale, bcoord)
  2168  			compressed := make([]byte, numBytes)
  2169  			n, readErr = io.ReadFull(r, compressed)
  2170  			if n != numBytes || (readErr != nil && readErr != io.EOF) {
  2171  				return fmt.Errorf("error reading %d bytes for block %s: %d read (%v)", numBytes, bcoord, n, readErr)
  2172  			}
  2173  
  2174  			if scale == 0 {
  2175  				if mod := d.blockChangesExtents(&extents, bx, by, bz); mod {
  2176  					extentsChanged = true
  2177  				}
  2178  			}
  2179  
  2180  			serialization, err := dvid.SerializePrecompressedData(compressed, d.Compression(), d.Checksum())
  2181  			if err != nil {
  2182  				return fmt.Errorf("can't serialize received block %s data: %v", bcoord, err)
  2183  			}
  2184  			pos += n
  2185  
  2186  			gzipIn := bytes.NewBuffer(compressed)
  2187  			zr, err := gzip.NewReader(gzipIn)
  2188  			if err != nil {
  2189  				return fmt.Errorf("can't initiate gzip reader: %v", err)
  2190  			}
  2191  			uncompressed, err := ioutil.ReadAll(zr)
  2192  			if err != nil {
  2193  				return fmt.Errorf("can't read all %d bytes from gzipped block %s: %v", numBytes, bcoord, err)
  2194  			}
  2195  			if err := zr.Close(); err != nil {
  2196  				return fmt.Errorf("error on closing gzip on block read of data %q: %v", d.DataName(), err)
  2197  			}
  2198  
  2199  			var block labels.Block
  2200  			if err = block.UnmarshalBinary(uncompressed); err != nil {
  2201  				return fmt.Errorf("unable to deserialize label block %s: %v", bcoord, err)
  2202  			}
  2203  			if scale == 0 {
  2204  				go d.updateBlockMaxLabel(ctx.VersionID(), &block)
  2205  			}
  2206  
  2207  			if err != nil {
  2208  				return fmt.Errorf("Unable to deserialize %d bytes corresponding to block %s: %v", n, bcoord, err)
  2209  			}
  2210  			wg.Add(1)
  2211  			if putbuffer != nil {
  2212  				ready := make(chan error, 1)
  2213  				go callback(bcoord, &block, ready)
  2214  				putbuffer.PutCallback(ctx, tk, serialization, ready)
  2215  			} else {
  2216  				if err := store.Put(ctx, tk, serialization); err != nil {
  2217  					return fmt.Errorf("Unable to PUT voxel data for block %s: %v", bcoord, err)
  2218  				}
  2219  				go callback(bcoord, &block, nil)
  2220  			}
  2221  			numBlocks++
  2222  		}
  2223  		if readErr == io.EOF {
  2224  			break
  2225  		}
  2226  	}
  2227  
  2228  	wg.Wait()
  2229  	close(blockCh)
  2230  
  2231  	if extentsChanged {
  2232  		if err := d.PostExtents(ctx, extents.StartPoint(), extents.EndPoint()); err != nil {
  2233  			dvid.Criticalf("could not modify extents for labelarray %q: %v\n", d.DataName(), err)
  2234  		}
  2235  	}
  2236  
  2237  	// if a bufferable op, flush
  2238  	if putbuffer != nil {
  2239  		putbuffer.Flush()
  2240  	}
  2241  	if downscale {
  2242  		if err := downresMut.Execute(); err != nil {
  2243  			return err
  2244  		}
  2245  	}
  2246  	timedLog.Infof("Received and stored %d blocks for labelarray %q", numBlocks, d.DataName())
  2247  	return nil
  2248  }
  2249  
  2250  // --- datastore.DataService interface ---------
  2251  
  2252  // PushData pushes labelarray data to a remote DVID.
  2253  func (d *Data) PushData(p *datastore.PushSession) error {
  2254  	// Delegate to imageblk's implementation.
  2255  	return d.Data.PushData(p)
  2256  }
  2257  
  2258  // DoRPC acts as a switchboard for RPC commands.
  2259  func (d *Data) DoRPC(req datastore.Request, reply *datastore.Response) error {
  2260  	switch req.TypeCommand() {
  2261  	case "load":
  2262  		if len(req.Command) < 5 {
  2263  			return fmt.Errorf("Poorly formatted load command.  See command-line help.")
  2264  		}
  2265  		// Parse the request
  2266  		var uuidStr, dataName, cmdStr, offsetStr string
  2267  		filenames, err := req.FilenameArgs(1, &uuidStr, &dataName, &cmdStr, &offsetStr)
  2268  		if err != nil {
  2269  			return err
  2270  		}
  2271  		if len(filenames) == 0 {
  2272  			return fmt.Errorf("Need to include at least one file to add: %s", req)
  2273  		}
  2274  
  2275  		offset, err := dvid.StringToPoint(offsetStr, ",")
  2276  		if err != nil {
  2277  			return fmt.Errorf("Illegal offset specification: %s: %v", offsetStr, err)
  2278  		}
  2279  
  2280  		var addedFiles string
  2281  		if len(filenames) == 1 {
  2282  			addedFiles = filenames[0]
  2283  		} else {
  2284  			addedFiles = fmt.Sprintf("filenames: %s [%d more]", filenames[0], len(filenames)-1)
  2285  		}
  2286  		dvid.Debugf(addedFiles + "\n")
  2287  
  2288  		uuid, versionID, err := datastore.MatchingUUID(uuidStr)
  2289  		if err != nil {
  2290  			return err
  2291  		}
  2292  		if err = datastore.AddToNodeLog(uuid, []string{req.Command.String()}); err != nil {
  2293  			return err
  2294  		}
  2295  		go func() {
  2296  			if err = d.LoadImages(versionID, offset, filenames); err != nil {
  2297  				dvid.Errorf("Cannot load images into data instance %q @ node %s: %v\n", dataName, uuidStr, err)
  2298  			}
  2299  			if err := datastore.SaveDataByUUID(uuid, d); err != nil {
  2300  				dvid.Errorf("Could not store metadata changes into data instance %q @ node %s: %v\n", dataName, uuidStr, err)
  2301  			}
  2302  		}()
  2303  		reply.Text = fmt.Sprintf("Asynchronously loading %d files into data instance %q @ node %s (errors will be printed in server log) ...\n", len(filenames), dataName, uuidStr)
  2304  		return nil
  2305  
  2306  	case "composite":
  2307  		if len(req.Command) < 6 {
  2308  			return fmt.Errorf("Poorly formatted composite command.  See command-line help.")
  2309  		}
  2310  		return d.CreateComposite(req, reply)
  2311  
  2312  	default:
  2313  		return fmt.Errorf("Unknown command.  Data type '%s' [%s] does not support '%s' command.",
  2314  			d.DataName(), d.TypeName(), req.TypeCommand())
  2315  	}
  2316  }
  2317  
  2318  func colorImage(labels *dvid.Image) (image.Image, error) {
  2319  	if labels == nil || labels.Which != 3 || labels.NRGBA64 == nil {
  2320  		return nil, fmt.Errorf("writePseudoColor can't use labels image with wrong format: %v\n", labels)
  2321  	}
  2322  	src := labels.NRGBA64
  2323  	srcRect := src.Bounds()
  2324  	srcW := srcRect.Dx()
  2325  	srcH := srcRect.Dy()
  2326  
  2327  	dst := image.NewNRGBA(image.Rect(0, 0, srcW, srcH))
  2328  
  2329  	for y := 0; y < srcH; y++ {
  2330  		srcI := src.PixOffset(0, y)
  2331  		dstI := dst.PixOffset(0, y)
  2332  		for x := 0; x < srcW; x++ {
  2333  			murmurhash3(src.Pix[srcI:srcI+8], dst.Pix[dstI:dstI+4])
  2334  			dst.Pix[dstI+3] = 255
  2335  
  2336  			srcI += 8
  2337  			dstI += 4
  2338  		}
  2339  	}
  2340  	return dst, nil
  2341  }
  2342  
  2343  // compressGoogle uses the neuroglancer compression format
  2344  func compressGoogle(data []byte, subvol *dvid.Subvolume) ([]byte, error) {
  2345  	// TODO: share table between blocks
  2346  	subvolsizes := subvol.Size()
  2347  
  2348  	// must <= 32
  2349  	BLKSIZE := int32(8)
  2350  
  2351  	xsize := subvolsizes.Value(0)
  2352  	ysize := subvolsizes.Value(1)
  2353  	zsize := subvolsizes.Value(2)
  2354  	gx := subvolsizes.Value(0) / BLKSIZE
  2355  	gy := subvolsizes.Value(1) / BLKSIZE
  2356  	gz := subvolsizes.Value(2) / BLKSIZE
  2357  	if xsize%BLKSIZE > 0 || ysize%BLKSIZE > 0 || zsize%BLKSIZE > 0 {
  2358  		return nil, fmt.Errorf("volume must be a multiple of the block size")
  2359  	}
  2360  
  2361  	// add initial 4 byte to designate as a header for the compressed data
  2362  	// 64 bit headers for each 8x8x8 block and pre-allocate some data based on expected data size
  2363  	dword := 4
  2364  	globaloffset := dword
  2365  
  2366  	datagoogle := make([]byte, gx*gy*gz*8+int32(globaloffset), xsize*ysize*zsize*8/10)
  2367  	datagoogle[0] = byte(globaloffset / dword) // compressed data starts after first 4 bytes
  2368  
  2369  	// everything is written out little-endian
  2370  	for gziter := int32(0); gziter < gz; gziter++ {
  2371  		for gyiter := int32(0); gyiter < gy; gyiter++ {
  2372  			for gxiter := int32(0); gxiter < gx; gxiter++ {
  2373  				unique_vals := make(map[uint64]uint32)
  2374  				unique_list := make([]uint64, 0)
  2375  
  2376  				currpos := (gziter*BLKSIZE*(xsize*ysize) + gyiter*BLKSIZE*xsize + gxiter*BLKSIZE) * 8
  2377  
  2378  				// extract unique values in the 8x8x8 block
  2379  				for z := int32(0); z < BLKSIZE; z++ {
  2380  					for y := int32(0); y < BLKSIZE; y++ {
  2381  						for x := int32(0); x < BLKSIZE; x++ {
  2382  							if _, ok := unique_vals[binary.LittleEndian.Uint64(data[currpos:currpos+8])]; !ok {
  2383  								unique_vals[binary.LittleEndian.Uint64(data[currpos:currpos+8])] = 0
  2384  								unique_list = append(unique_list, binary.LittleEndian.Uint64(data[currpos:currpos+8]))
  2385  							}
  2386  							currpos += 8
  2387  						}
  2388  						currpos += ((xsize - BLKSIZE) * 8)
  2389  					}
  2390  					currpos += (xsize*ysize - (xsize * (BLKSIZE))) * 8
  2391  				}
  2392  				// write out mapping
  2393  				for pos, val := range unique_list {
  2394  					unique_vals[val] = uint32(pos)
  2395  				}
  2396  
  2397  				// write-out compressed data
  2398  				encodedBits := uint32(math.Ceil(math.Log2(float64(len(unique_vals)))))
  2399  				switch {
  2400  				case encodedBits == 0, encodedBits == 1, encodedBits == 2:
  2401  				case encodedBits <= 4:
  2402  					encodedBits = 4
  2403  				case encodedBits <= 8:
  2404  					encodedBits = 8
  2405  				case encodedBits <= 16:
  2406  					encodedBits = 16
  2407  				}
  2408  
  2409  				// starting location for writing out data
  2410  				currpos2 := len(datagoogle)
  2411  				compressstart := len(datagoogle) / dword // in 4-byte units
  2412  				// number of bytes to add (encode bytes + table size of 8 byte numbers)
  2413  				addedBytes := uint32(encodedBits*uint32(BLKSIZE*BLKSIZE*BLKSIZE)/8) + uint32(len(unique_vals)*8) // will always be a multiple of 4 bytes
  2414  				datagoogle = append(datagoogle, make([]byte, addedBytes)...)
  2415  
  2416  				// do not need to write-out anything if there is only one entry
  2417  				if encodedBits > 0 {
  2418  					currpos := (gziter*BLKSIZE*(xsize*ysize) + gyiter*BLKSIZE*xsize + gxiter*BLKSIZE) * 8
  2419  
  2420  					for z := uint32(0); z < uint32(BLKSIZE); z++ {
  2421  						for y := uint32(0); y < uint32(BLKSIZE); y++ {
  2422  							for x := uint32(0); x < uint32(BLKSIZE); x++ {
  2423  								mappedval := unique_vals[binary.LittleEndian.Uint64(data[currpos:currpos+8])]
  2424  								currpos += 8
  2425  
  2426  								// write out encoding
  2427  								startbit := uint32((encodedBits * x) % uint32(8))
  2428  								if encodedBits == 16 {
  2429  									// write two bytes worth of data
  2430  									datagoogle[currpos2] = byte(255 & mappedval)
  2431  									currpos2++
  2432  									datagoogle[currpos2] = byte(255 & (mappedval >> 8))
  2433  									currpos2++
  2434  								} else {
  2435  									// write bit-shifted data
  2436  									datagoogle[currpos2] |= byte(255 & (mappedval << startbit))
  2437  								}
  2438  								if int(startbit) == (8 - int(encodedBits)) {
  2439  									currpos2++
  2440  								}
  2441  
  2442  							}
  2443  							currpos += ((xsize - BLKSIZE) * 8)
  2444  						}
  2445  						currpos += (xsize*ysize - (xsize * (BLKSIZE))) * 8
  2446  					}
  2447  				}
  2448  				tablestart := currpos2 / dword // in 4-byte units
  2449  				// write-out lookup table
  2450  				for _, val := range unique_list {
  2451  					for bytespot := uint32(0); bytespot < uint32(8); bytespot++ {
  2452  						datagoogle[currpos2] = byte(255 & (val >> (bytespot * 8)))
  2453  						currpos2++
  2454  					}
  2455  				}
  2456  
  2457  				// write-out block header
  2458  				// 8 bytes per header entry
  2459  				headerpos := (gziter*(gy*gx)+gyiter*gx+gxiter)*8 + int32(globaloffset) // shift start by global offset
  2460  
  2461  				// write out lookup table start
  2462  				tablestart -= (globaloffset / dword) // relative to the start of the compressed data
  2463  				datagoogle[headerpos] = byte(255 & tablestart)
  2464  				headerpos++
  2465  				datagoogle[headerpos] = byte(255 & (tablestart >> 8))
  2466  				headerpos++
  2467  				datagoogle[headerpos] = byte(255 & (tablestart >> 16))
  2468  				headerpos++
  2469  
  2470  				// write out number of encoded bits
  2471  				datagoogle[headerpos] = byte(255 & encodedBits)
  2472  				headerpos++
  2473  
  2474  				// write out block compress start
  2475  				compressstart -= (globaloffset / dword) // relative to the start of the compressed data
  2476  				datagoogle[headerpos] = byte(255 & compressstart)
  2477  				headerpos++
  2478  				datagoogle[headerpos] = byte(255 & (compressstart >> 8))
  2479  				headerpos++
  2480  				datagoogle[headerpos] = byte(255 & (compressstart >> 16))
  2481  				headerpos++
  2482  				datagoogle[headerpos] = byte(255 & (compressstart >> 24))
  2483  			}
  2484  		}
  2485  	}
  2486  
  2487  	return datagoogle, nil
  2488  }
  2489  
  2490  func sendBinaryData(compression string, data []byte, subvol *dvid.Subvolume, w http.ResponseWriter) error {
  2491  	var err error
  2492  	w.Header().Set("Content-type", "application/octet-stream")
  2493  	switch compression {
  2494  	case "":
  2495  		_, err = w.Write(data)
  2496  		if err != nil {
  2497  			return err
  2498  		}
  2499  	case "lz4":
  2500  		compressed := make([]byte, lz4.CompressBound(data))
  2501  		var n, outSize int
  2502  		if outSize, err = lz4.Compress(data, compressed); err != nil {
  2503  			return err
  2504  		}
  2505  		compressed = compressed[:outSize]
  2506  		if n, err = w.Write(compressed); err != nil {
  2507  			return err
  2508  		}
  2509  		if n != outSize {
  2510  			errmsg := fmt.Sprintf("Only able to write %d of %d lz4 compressed bytes\n", n, outSize)
  2511  			dvid.Errorf(errmsg)
  2512  			return err
  2513  		}
  2514  	case "gzip":
  2515  		gw := gzip.NewWriter(w)
  2516  		if _, err = gw.Write(data); err != nil {
  2517  			return err
  2518  		}
  2519  		if err = gw.Close(); err != nil {
  2520  			return err
  2521  		}
  2522  	case "google", "googlegzip": // see neuroglancer for details of compressed segmentation format
  2523  		datagoogle, err := compressGoogle(data, subvol)
  2524  		if err != nil {
  2525  			return err
  2526  		}
  2527  		if compression == "googlegzip" {
  2528  			w.Header().Set("Content-encoding", "gzip")
  2529  			gw := gzip.NewWriter(w)
  2530  			if _, err = gw.Write(datagoogle); err != nil {
  2531  				return err
  2532  			}
  2533  			if err = gw.Close(); err != nil {
  2534  				return err
  2535  			}
  2536  
  2537  		} else {
  2538  			_, err = w.Write(datagoogle)
  2539  			if err != nil {
  2540  				return err
  2541  			}
  2542  		}
  2543  	default:
  2544  		return fmt.Errorf("unknown compression type %q", compression)
  2545  	}
  2546  	return nil
  2547  }
  2548  
  2549  // GetBinaryData returns label data from a potentially compressed ("lz4", "gzip") reader.
  2550  func GetBinaryData(compression string, in io.ReadCloser, estsize int64) ([]byte, error) {
  2551  	var err error
  2552  	var data []byte
  2553  	switch compression {
  2554  	case "":
  2555  		tlog := dvid.NewTimeLog()
  2556  		data, err = ioutil.ReadAll(in)
  2557  		if err != nil {
  2558  			return nil, err
  2559  		}
  2560  		tlog.Debugf("read 3d uncompressed POST")
  2561  	case "lz4":
  2562  		tlog := dvid.NewTimeLog()
  2563  		data, err = ioutil.ReadAll(in)
  2564  		if err != nil {
  2565  			return nil, err
  2566  		}
  2567  		tlog.Debugf("read 3d lz4 POST: %d bytes", len(data))
  2568  		if len(data) == 0 {
  2569  			return nil, fmt.Errorf("received 0 LZ4 compressed bytes")
  2570  		}
  2571  		tlog = dvid.NewTimeLog()
  2572  		uncompressed := make([]byte, estsize)
  2573  		if err = lz4.Uncompress(data, uncompressed); err != nil {
  2574  			return nil, err
  2575  		}
  2576  		data = uncompressed
  2577  		tlog.Debugf("uncompressed 3d lz4 POST: %d bytes", len(data))
  2578  	case "gzip":
  2579  		tlog := dvid.NewTimeLog()
  2580  		gr, err := gzip.NewReader(in)
  2581  		if err != nil {
  2582  			return nil, err
  2583  		}
  2584  		data, err = ioutil.ReadAll(gr)
  2585  		if err != nil {
  2586  			return nil, err
  2587  		}
  2588  		if err = gr.Close(); err != nil {
  2589  			return nil, err
  2590  		}
  2591  		tlog.Debugf("read and uncompress 3d gzip POST: %d bytes", len(data))
  2592  	default:
  2593  		return nil, fmt.Errorf("unknown compression type %q", compression)
  2594  	}
  2595  	return data, nil
  2596  }
  2597  
  2598  // SetResolution loads JSON data giving Resolution.
  2599  func (d *Data) SetResolution(uuid dvid.UUID, jsonBytes []byte) error {
  2600  	config := make(dvid.NdFloat32, 3)
  2601  	if err := json.Unmarshal(jsonBytes, &config); err != nil {
  2602  		return err
  2603  	}
  2604  	d.Properties.VoxelSize = config
  2605  	if err := datastore.SaveDataByUUID(uuid, d); err != nil {
  2606  		return err
  2607  	}
  2608  	return nil
  2609  }
  2610  
  2611  // if hash is not empty, make sure it is hash of data.
  2612  func checkContentHash(hash string, data []byte) error {
  2613  	if hash == "" {
  2614  		return nil
  2615  	}
  2616  	hexHash := fmt.Sprintf("%x", md5.Sum(data))
  2617  	if hexHash != hash {
  2618  		return fmt.Errorf("content hash incorrect.  expected %s, got %s", hash, hexHash)
  2619  	}
  2620  	return nil
  2621  }
  2622  
  2623  func getScale(queryStrings url.Values) (scale uint8, err error) {
  2624  	scaleStr := queryStrings.Get("scale")
  2625  	if scaleStr != "" {
  2626  		var scaleInt int
  2627  		scaleInt, err = strconv.Atoi(scaleStr)
  2628  		if err != nil {
  2629  			return
  2630  		}
  2631  		scale = uint8(scaleInt)
  2632  	}
  2633  	return
  2634  }
  2635  
  2636  // ServeHTTP handles all incoming HTTP requests for this data.
  2637  func (d *Data) ServeHTTP(uuid dvid.UUID, ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request) (activity map[string]interface{}) {
  2638  	// Get the action (GET, POST)
  2639  	action := strings.ToLower(r.Method)
  2640  
  2641  	// Break URL request into arguments
  2642  	url := r.URL.Path[len(server.WebAPIPath):]
  2643  	parts := strings.Split(url, "/")
  2644  	if len(parts[len(parts)-1]) == 0 {
  2645  		parts = parts[:len(parts)-1]
  2646  	}
  2647  
  2648  	// Handle POST on data -> setting of configuration
  2649  	if len(parts) == 3 && action == "post" {
  2650  		config, err := server.DecodeJSON(r)
  2651  		if err != nil {
  2652  			server.BadRequest(w, r, err)
  2653  			return
  2654  		}
  2655  		if err := d.ModifyConfig(config); err != nil {
  2656  			server.BadRequest(w, r, err)
  2657  			return
  2658  		}
  2659  		if err := datastore.SaveDataByUUID(uuid, d); err != nil {
  2660  			server.BadRequest(w, r, err)
  2661  			return
  2662  		}
  2663  		fmt.Fprintf(w, "Changed '%s' based on received configuration:\n%s\n", d.DataName(), config)
  2664  		return
  2665  	}
  2666  
  2667  	if len(parts) < 4 {
  2668  		server.BadRequest(w, r, "Incomplete API request")
  2669  		return
  2670  	}
  2671  
  2672  	// Prevent use of APIs that require IndexedLabels when it is not set.
  2673  	if !d.IndexedLabels {
  2674  		switch parts[3] {
  2675  		case "sparsevol", "sparsevol-by-point", "sparsevol-coarse", "maxlabel", "nextlabel", "split", "split-coarse", "merge":
  2676  			server.BadRequest(w, r, "data %q is not label indexed (IndexedLabels=false): %q endpoint is not supported", d.DataName(), parts[3])
  2677  			return
  2678  		}
  2679  	}
  2680  
  2681  	// Handle all requests
  2682  	switch parts[3] {
  2683  	case "help":
  2684  		w.Header().Set("Content-Type", "text/plain")
  2685  		fmt.Fprintln(w, dtype.Help())
  2686  
  2687  	case "metadata":
  2688  		jsonStr, err := d.NdDataMetadata(ctx)
  2689  		if err != nil {
  2690  			server.BadRequest(w, r, err)
  2691  			return
  2692  		}
  2693  		w.Header().Set("Content-Type", "application/vnd.dvid-nd-data+json")
  2694  		fmt.Fprintln(w, jsonStr)
  2695  
  2696  	case "resolution":
  2697  		jsonBytes, err := ioutil.ReadAll(r.Body)
  2698  		if err != nil {
  2699  			server.BadRequest(w, r, err)
  2700  			return
  2701  		}
  2702  		if err := d.SetResolution(uuid, jsonBytes); err != nil {
  2703  			server.BadRequest(w, r, err)
  2704  			return
  2705  		}
  2706  
  2707  	case "info":
  2708  		jsonBytes, err := d.MarshalJSONExtents(ctx)
  2709  		if err != nil {
  2710  			server.BadRequest(w, r, err)
  2711  			return
  2712  		}
  2713  		w.Header().Set("Content-Type", "application/json")
  2714  		fmt.Fprintf(w, string(jsonBytes))
  2715  
  2716  	case "specificblocks":
  2717  		// GET <api URL>/node/<UUID>/<data name>/specificblocks?blocks=x,y,z,x,y,z...
  2718  		blocklist := r.URL.Query().Get("blocks")
  2719  		scale, err := getScale(r.URL.Query())
  2720  		if err != nil {
  2721  			server.BadRequest(w, r, err)
  2722  			return
  2723  		}
  2724  		if action == "get" {
  2725  			numBlocks, err := d.sendBlocksSpecific(ctx, w, blocklist, scale)
  2726  			if err != nil {
  2727  				server.BadRequest(w, r, err)
  2728  				return
  2729  			}
  2730  			timedLog := dvid.NewTimeLog()
  2731  			timedLog.Infof("HTTP %s: %s", r.Method, r.URL)
  2732  			activity = map[string]interface{}{
  2733  				"num_blocks": numBlocks,
  2734  			}
  2735  		} else {
  2736  			server.BadRequest(w, r, "DVID does not accept the %s action on the 'specificblocks' endpoint", action)
  2737  			return
  2738  		}
  2739  
  2740  	case "sync":
  2741  		if action != "post" {
  2742  			server.BadRequest(w, r, "Only POST allowed to sync endpoint")
  2743  			return
  2744  		}
  2745  		replace := r.URL.Query().Get("replace") == "true"
  2746  		if err := datastore.SetSyncByJSON(d, uuid, replace, r.Body); err != nil {
  2747  			server.BadRequest(w, r, err)
  2748  			return
  2749  		}
  2750  
  2751  	case "label":
  2752  		d.handleLabel(ctx, w, r, parts)
  2753  
  2754  	case "labels":
  2755  		d.handleLabels(ctx, w, r)
  2756  
  2757  	case "blocks":
  2758  		d.handleBlocks(ctx, w, r, parts)
  2759  
  2760  	case "pseudocolor":
  2761  		d.handlePseudocolor(ctx, w, r, parts)
  2762  
  2763  	case "raw", "isotropic":
  2764  		d.handleDataRequest(ctx, w, r, parts)
  2765  
  2766  	// endpoints after this must have data instance IndexedLabels = true
  2767  
  2768  	case "sparsevol-size":
  2769  		d.handleSparsevolSize(ctx, w, r, parts)
  2770  
  2771  	case "sparsevol":
  2772  		d.handleSparsevol(ctx, w, r, parts)
  2773  
  2774  	case "sparsevol-by-point":
  2775  		d.handleSparsevolByPoint(ctx, w, r, parts)
  2776  
  2777  	case "sparsevol-coarse":
  2778  		d.handleSparsevolCoarse(ctx, w, r, parts)
  2779  
  2780  	case "sparsevols-coarse":
  2781  		d.handleSparsevolsCoarse(ctx, w, r, parts)
  2782  
  2783  	case "maxlabel":
  2784  		d.handleMaxlabel(ctx, w, r)
  2785  
  2786  	case "nextlabel":
  2787  		d.handleNextlabel(ctx, w, r)
  2788  
  2789  	case "split":
  2790  		d.handleSplit(ctx, w, r, parts)
  2791  
  2792  	case "split-coarse":
  2793  		d.handleSplitCoarse(ctx, w, r, parts)
  2794  
  2795  	case "merge":
  2796  		d.handleMerge(ctx, w, r, parts)
  2797  
  2798  	default:
  2799  		server.BadAPIRequest(w, r, d)
  2800  	}
  2801  	return
  2802  }
  2803  
  2804  // --------- Handler functions for HTTP requests --------------
  2805  
  2806  func (d *Data) handleLabel(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request, parts []string) {
  2807  	// GET <api URL>/node/<UUID>/<data name>/label/<coord>
  2808  	if len(parts) < 5 {
  2809  		server.BadRequest(w, r, "DVID requires coord to follow 'label' command")
  2810  		return
  2811  	}
  2812  	timedLog := dvid.NewTimeLog()
  2813  
  2814  	queryStrings := r.URL.Query()
  2815  	scale, err := getScale(queryStrings)
  2816  	if err != nil {
  2817  		server.BadRequest(w, r, "bad scale specified: %v", err)
  2818  		return
  2819  	}
  2820  	coord, err := dvid.StringToPoint(parts[4], "_")
  2821  	if err != nil {
  2822  		server.BadRequest(w, r, err)
  2823  		return
  2824  	}
  2825  	label, err := d.GetLabelAtScaledPoint(ctx.VersionID(), coord, scale)
  2826  	if err != nil {
  2827  		server.BadRequest(w, r, err)
  2828  		return
  2829  	}
  2830  	w.Header().Set("Content-type", "application/json")
  2831  	jsonStr := fmt.Sprintf(`{"Label": %d}`, label)
  2832  	fmt.Fprintf(w, jsonStr)
  2833  
  2834  	timedLog.Infof("HTTP GET label at %s (%s)", parts[4], r.URL)
  2835  }
  2836  
  2837  func (d *Data) handleLabels(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request) {
  2838  	// POST <api URL>/node/<UUID>/<data name>/labels
  2839  	timedLog := dvid.NewTimeLog()
  2840  
  2841  	if strings.ToLower(r.Method) != "get" {
  2842  		server.BadRequest(w, r, "Batch labels query must be a GET request")
  2843  		return
  2844  	}
  2845  	data, err := ioutil.ReadAll(r.Body)
  2846  	if err != nil {
  2847  		server.BadRequest(w, r, "Bad GET request body for batch query: %v", err)
  2848  		return
  2849  	}
  2850  	queryStrings := r.URL.Query()
  2851  	scale, err := getScale(queryStrings)
  2852  	if err != nil {
  2853  		server.BadRequest(w, r, "bad scale specified: %v", err)
  2854  		return
  2855  	}
  2856  	hash := queryStrings.Get("hash")
  2857  	if err := checkContentHash(hash, data); err != nil {
  2858  		server.BadRequest(w, r, err)
  2859  		return
  2860  	}
  2861  	var coords []dvid.Point3d
  2862  	if err := json.Unmarshal(data, &coords); err != nil {
  2863  		server.BadRequest(w, r, fmt.Sprintf("Bad labels request JSON: %v", err))
  2864  		return
  2865  	}
  2866  	w.Header().Set("Content-type", "application/json")
  2867  	fmt.Fprintf(w, "[")
  2868  	sep := false
  2869  	for _, coord := range coords {
  2870  		label, err := d.GetLabelAtScaledPoint(ctx.VersionID(), coord, scale)
  2871  		if err != nil {
  2872  			server.BadRequest(w, r, err)
  2873  			return
  2874  		}
  2875  		if sep {
  2876  			fmt.Fprintf(w, ",")
  2877  		}
  2878  		fmt.Fprintf(w, "%d", label)
  2879  		sep = true
  2880  	}
  2881  	fmt.Fprintf(w, "]")
  2882  
  2883  	timedLog.Infof("HTTP GET batch label-at-point query (%s)", r.URL)
  2884  }
  2885  
  2886  func (d *Data) handleBlocks(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request, parts []string) {
  2887  	// GET <api URL>/node/<UUID>/<data name>/blocks/<size>/<offset>[?compression=...]
  2888  	// POST <api URL>/node/<UUID>/<data name>/blocks[?compression=...]
  2889  	timedLog := dvid.NewTimeLog()
  2890  
  2891  	queryStrings := r.URL.Query()
  2892  	if throttle := queryStrings.Get("throttle"); throttle == "on" || throttle == "true" {
  2893  		if server.ThrottledHTTP(w) {
  2894  			return
  2895  		}
  2896  		defer server.ThrottledOpDone()
  2897  	}
  2898  	scale, err := getScale(queryStrings)
  2899  	if err != nil {
  2900  		server.BadRequest(w, r, "bad scale specified: %v", err)
  2901  		return
  2902  	}
  2903  
  2904  	compression := queryStrings.Get("compression")
  2905  	downscale := queryStrings.Get("downres") == "true"
  2906  	if strings.ToLower(r.Method) == "get" {
  2907  		if len(parts) < 6 {
  2908  			server.BadRequest(w, r, "must specify size and offset with GET /blocks endpoint")
  2909  			return
  2910  		}
  2911  		sizeStr, offsetStr := parts[4], parts[5]
  2912  		subvol, err := dvid.NewSubvolumeFromStrings(offsetStr, sizeStr, "_")
  2913  		if err != nil {
  2914  			server.BadRequest(w, r, err)
  2915  			return
  2916  		}
  2917  		if subvol.StartPoint().NumDims() != 3 || subvol.Size().NumDims() != 3 {
  2918  			server.BadRequest(w, r, "must specify 3D subvolumes", subvol.StartPoint(), subvol.EndPoint())
  2919  			return
  2920  		}
  2921  
  2922  		// Make sure subvolume gets align with blocks
  2923  		if !dvid.BlockAligned(subvol, d.BlockSize()) {
  2924  			server.BadRequest(w, r, "cannot use labels via 'block' endpoint in non-block aligned geometry %s -> %s", subvol.StartPoint(), subvol.EndPoint())
  2925  			return
  2926  		}
  2927  
  2928  		if err := d.SendBlocks(ctx, w, scale, subvol, compression); err != nil {
  2929  			server.BadRequest(w, r, err)
  2930  		}
  2931  		timedLog.Infof("HTTP GET blocks at size %s, offset %s (%s)", parts[4], parts[5], r.URL)
  2932  	} else {
  2933  		if err := d.ReceiveBlocks(ctx, r.Body, scale, downscale, compression); err != nil {
  2934  			server.BadRequest(w, r, err)
  2935  		}
  2936  		timedLog.Infof("HTTP POST blocks (%s)", r.URL)
  2937  	}
  2938  }
  2939  
  2940  func (d *Data) handlePseudocolor(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request, parts []string) {
  2941  	if len(parts) < 7 {
  2942  		server.BadRequest(w, r, "'%s' must be followed by shape/size/offset", parts[3])
  2943  		return
  2944  	}
  2945  	timedLog := dvid.NewTimeLog()
  2946  
  2947  	queryStrings := r.URL.Query()
  2948  	roiname := dvid.InstanceName(queryStrings.Get("roi"))
  2949  	scale, err := getScale(queryStrings)
  2950  	if err != nil {
  2951  		server.BadRequest(w, r, "bad scale specified: %v", err)
  2952  		return
  2953  	}
  2954  
  2955  	shapeStr, sizeStr, offsetStr := parts[4], parts[5], parts[6]
  2956  	planeStr := dvid.DataShapeString(shapeStr)
  2957  	plane, err := planeStr.DataShape()
  2958  	if err != nil {
  2959  		server.BadRequest(w, r, err)
  2960  		return
  2961  	}
  2962  	switch plane.ShapeDimensions() {
  2963  	case 2:
  2964  		slice, err := dvid.NewSliceFromStrings(planeStr, offsetStr, sizeStr, "_")
  2965  		if err != nil {
  2966  			server.BadRequest(w, r, err)
  2967  			return
  2968  		}
  2969  		if strings.ToLower(r.Method) != "get" {
  2970  			server.BadRequest(w, r, "DVID does not permit 2d mutations, only 3d block-aligned stores")
  2971  			return
  2972  		}
  2973  		lbl, err := d.NewLabels(slice, nil)
  2974  		if err != nil {
  2975  			server.BadRequest(w, r, err)
  2976  			return
  2977  		}
  2978  		img, err := d.GetImage(ctx.VersionID(), lbl, scale, roiname)
  2979  		if err != nil {
  2980  			server.BadRequest(w, r, err)
  2981  			return
  2982  		}
  2983  
  2984  		// Convert to pseudocolor
  2985  		pseudoColor, err := colorImage(img)
  2986  		if err != nil {
  2987  			server.BadRequest(w, r, err)
  2988  			return
  2989  		}
  2990  
  2991  		//dvid.ElapsedTime(dvid.Normal, startTime, "%s %s upto image formatting", op, slice)
  2992  		var formatStr string
  2993  		if len(parts) >= 8 {
  2994  			formatStr = parts[7]
  2995  		}
  2996  		err = dvid.WriteImageHttp(w, pseudoColor, formatStr)
  2997  		if err != nil {
  2998  			server.BadRequest(w, r, err)
  2999  			return
  3000  		}
  3001  	default:
  3002  		server.BadRequest(w, r, "DVID currently supports only 2d pseudocolor image requests")
  3003  		return
  3004  	}
  3005  	timedLog.Infof("HTTP GET pseudocolor with shape %s, size %s, offset %s", parts[4], parts[5], parts[6])
  3006  }
  3007  
  3008  func (d *Data) handleDataRequest(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request, parts []string) {
  3009  	if len(parts) < 7 {
  3010  		server.BadRequest(w, r, "'%s' must be followed by shape/size/offset", parts[3])
  3011  		return
  3012  	}
  3013  	timedLog := dvid.NewTimeLog()
  3014  
  3015  	var isotropic bool = (parts[3] == "isotropic")
  3016  	shapeStr, sizeStr, offsetStr := parts[4], parts[5], parts[6]
  3017  	planeStr := dvid.DataShapeString(shapeStr)
  3018  	plane, err := planeStr.DataShape()
  3019  	if err != nil {
  3020  		server.BadRequest(w, r, err)
  3021  		return
  3022  	}
  3023  	queryStrings := r.URL.Query()
  3024  	roiname := dvid.InstanceName(queryStrings.Get("roi"))
  3025  
  3026  	scale, err := getScale(queryStrings)
  3027  	if err != nil {
  3028  		server.BadRequest(w, r, "bad scale specified: %v", err)
  3029  		return
  3030  	}
  3031  
  3032  	switch plane.ShapeDimensions() {
  3033  	case 2:
  3034  		slice, err := dvid.NewSliceFromStrings(planeStr, offsetStr, sizeStr, "_")
  3035  		if err != nil {
  3036  			server.BadRequest(w, r, err)
  3037  			return
  3038  		}
  3039  		if strings.ToLower(r.Method) != "get" {
  3040  			server.BadRequest(w, r, "DVID does not permit 2d mutations, only 3d block-aligned stores")
  3041  			return
  3042  		}
  3043  		rawSlice, err := dvid.Isotropy2D(d.Properties.VoxelSize, slice, isotropic)
  3044  		lbl, err := d.NewLabels(rawSlice, nil)
  3045  		if err != nil {
  3046  			server.BadRequest(w, r, err)
  3047  			return
  3048  		}
  3049  		img, err := d.GetImage(ctx.VersionID(), lbl, scale, roiname)
  3050  		if err != nil {
  3051  			server.BadRequest(w, r, err)
  3052  			return
  3053  		}
  3054  		if isotropic {
  3055  			dstW := int(slice.Size().Value(0))
  3056  			dstH := int(slice.Size().Value(1))
  3057  			img, err = img.ScaleImage(dstW, dstH)
  3058  			if err != nil {
  3059  				server.BadRequest(w, r, err)
  3060  				return
  3061  			}
  3062  		}
  3063  		var formatStr string
  3064  		if len(parts) >= 8 {
  3065  			formatStr = parts[7]
  3066  		}
  3067  		//dvid.ElapsedTime(dvid.Normal, startTime, "%s %s upto image formatting", op, slice)
  3068  		err = dvid.WriteImageHttp(w, img.Get(), formatStr)
  3069  		if err != nil {
  3070  			server.BadRequest(w, r, err)
  3071  			return
  3072  		}
  3073  	case 3:
  3074  		if throttle := queryStrings.Get("throttle"); throttle == "on" || throttle == "true" {
  3075  			if server.ThrottledHTTP(w) {
  3076  				return
  3077  			}
  3078  			defer server.ThrottledOpDone()
  3079  		}
  3080  		compression := queryStrings.Get("compression")
  3081  		subvol, err := dvid.NewSubvolumeFromStrings(offsetStr, sizeStr, "_")
  3082  		if err != nil {
  3083  			server.BadRequest(w, r, err)
  3084  			return
  3085  		}
  3086  		if strings.ToLower(r.Method) == "get" {
  3087  			lbl, err := d.NewLabels(subvol, nil)
  3088  			if err != nil {
  3089  				server.BadRequest(w, r, err)
  3090  				return
  3091  			}
  3092  			data, err := d.GetVolume(ctx.VersionID(), lbl, scale, roiname)
  3093  			if err != nil {
  3094  				server.BadRequest(w, r, err)
  3095  				return
  3096  			}
  3097  			if err := sendBinaryData(compression, data, subvol, w); err != nil {
  3098  				server.BadRequest(w, r, err)
  3099  				return
  3100  			}
  3101  		} else {
  3102  			if isotropic {
  3103  				server.BadRequest(w, r, "can only POST 'raw' not 'isotropic' images")
  3104  				return
  3105  			}
  3106  			estsize := subvol.NumVoxels() * 8
  3107  			data, err := GetBinaryData(compression, r.Body, estsize)
  3108  			if err != nil {
  3109  				server.BadRequest(w, r, err)
  3110  				return
  3111  			}
  3112  			mutate := queryStrings.Get("mutate") == "true"
  3113  			if err = d.PutLabels(ctx.VersionID(), subvol, data, roiname, mutate); err != nil {
  3114  				server.BadRequest(w, r, err)
  3115  				return
  3116  			}
  3117  		}
  3118  	default:
  3119  		server.BadRequest(w, r, "DVID currently supports shapes of only 2 and 3 dimensions")
  3120  		return
  3121  	}
  3122  	timedLog.Infof("HTTP %s %s with shape %s, size %s, offset %s, scale %d", r.Method, parts[3], parts[4], parts[5], parts[6], scale)
  3123  }
  3124  
  3125  func (d *Data) getSparsevolOptions(r *http.Request) (b dvid.Bounds, compression string, err error) {
  3126  	queryStrings := r.URL.Query()
  3127  	compression = queryStrings.Get("compression")
  3128  
  3129  	if b.Voxel, err = dvid.OptionalBoundsFromQueryString(r); err != nil {
  3130  		err = fmt.Errorf("Error parsing bounds from query string: %v\n", err)
  3131  		return
  3132  	}
  3133  	blockSize, ok := d.BlockSize().(dvid.Point3d)
  3134  	if !ok {
  3135  		err = fmt.Errorf("Error: BlockSize for %s wasn't 3d", d.DataName())
  3136  		return
  3137  	}
  3138  	b.Block = b.Voxel.Divide(blockSize)
  3139  	b.Exact = true
  3140  	if queryStrings.Get("exact") == "false" {
  3141  		b.Exact = false
  3142  	}
  3143  	return
  3144  }
  3145  
  3146  func (d *Data) handleSparsevolSize(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request, parts []string) {
  3147  	// GET <api URL>/node/<UUID>/<data name>/sparsevol-size/<label>
  3148  	if len(parts) < 5 {
  3149  		server.BadRequest(w, r, "ERROR: DVID requires label ID to follow 'sparsevol-size' command")
  3150  		return
  3151  	}
  3152  	label, err := strconv.ParseUint(parts[4], 10, 64)
  3153  	if err != nil {
  3154  		server.BadRequest(w, r, err)
  3155  		return
  3156  	}
  3157  	if label == 0 {
  3158  		server.BadRequest(w, r, "Label 0 is protected background value and cannot be used as sparse volume.\n")
  3159  		return
  3160  	}
  3161  	if strings.ToLower(r.Method) != "get" {
  3162  		server.BadRequest(w, r, "DVID does not support %s on /sparsevol-size endpoint", r.Method)
  3163  		return
  3164  	}
  3165  
  3166  	meta, lbls, err := GetMappedLabelIndex(d, ctx.VersionID(), label, 0, dvid.Bounds{})
  3167  	if err != nil {
  3168  		server.BadRequest(w, r, "problem getting block indexing on labels %: %v", lbls, err)
  3169  		return
  3170  	}
  3171  	if meta == nil {
  3172  		dvid.Infof("GET sparsevol-size on label %d: not found.\n", label)
  3173  		w.WriteHeader(http.StatusNotFound)
  3174  		return
  3175  	}
  3176  
  3177  	w.Header().Set("Content-type", "application/octet-stream")
  3178  	fmt.Fprintf(w, "{")
  3179  	fmt.Fprintf(w, `"numblocks": %d, `, len(meta.Blocks))
  3180  	minBlock, maxBlock, err := meta.Blocks.GetBounds()
  3181  	if err != nil {
  3182  		server.BadRequest(w, r, "problem getting bounds on blocks of label %d: %v", label, err)
  3183  		return
  3184  	}
  3185  	blockSize, ok := d.BlockSize().(dvid.Point3d)
  3186  	if !ok {
  3187  		server.BadRequest(w, r, "Error: BlockSize for %s wasn't 3d", d.DataName())
  3188  		return
  3189  	}
  3190  	minx := minBlock[0] * blockSize[0]
  3191  	miny := minBlock[1] * blockSize[1]
  3192  	minz := minBlock[2] * blockSize[2]
  3193  	maxx := (maxBlock[0]+1)*blockSize[0] - 1
  3194  	maxy := (maxBlock[1]+1)*blockSize[1] - 1
  3195  	maxz := (maxBlock[2]+1)*blockSize[2] - 1
  3196  	fmt.Fprintf(w, `"minvoxel": [%d, %d, %d], `, minx, miny, minz)
  3197  	fmt.Fprintf(w, `"maxvoxel": [%d, %d, %d]`, maxx, maxy, maxz)
  3198  	fmt.Fprintf(w, "}")
  3199  }
  3200  
  3201  func (d *Data) handleSparsevol(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request, parts []string) {
  3202  	// GET <api URL>/node/<UUID>/<data name>/sparsevol/<label>
  3203  	// POST <api URL>/node/<UUID>/<data name>/sparsevol/<label>
  3204  	// HEAD <api URL>/node/<UUID>/<data name>/sparsevol/<label>
  3205  	if len(parts) < 5 {
  3206  		server.BadRequest(w, r, "ERROR: DVID requires label ID to follow 'sparsevol' command")
  3207  		return
  3208  	}
  3209  	queryStrings := r.URL.Query()
  3210  	scale, err := getScale(queryStrings)
  3211  	if err != nil {
  3212  		server.BadRequest(w, r, "bad scale specified: %v", err)
  3213  		return
  3214  	}
  3215  
  3216  	label, err := strconv.ParseUint(parts[4], 10, 64)
  3217  	if err != nil {
  3218  		server.BadRequest(w, r, err)
  3219  		return
  3220  	}
  3221  	if label == 0 {
  3222  		server.BadRequest(w, r, "Label 0 is protected background value and cannot be used as sparse volume.\n")
  3223  		return
  3224  	}
  3225  	b, compression, err := d.getSparsevolOptions(r)
  3226  	if err != nil {
  3227  		server.BadRequest(w, r, err)
  3228  		return
  3229  	}
  3230  
  3231  	timedLog := dvid.NewTimeLog()
  3232  	switch strings.ToLower(r.Method) {
  3233  	case "get":
  3234  		w.Header().Set("Content-type", "application/octet-stream")
  3235  
  3236  		var found bool
  3237  		switch svformatFromQueryString(r) {
  3238  		case FormatLegacyRLE:
  3239  			found, err = d.writeLegacyRLE(ctx, label, scale, b, compression, w)
  3240  		case FormatBinaryBlocks:
  3241  			found, err = d.writeBinaryBlocks(ctx, label, scale, b, compression, w)
  3242  		case FormatStreamingRLE:
  3243  			found, err = d.writeStreamingRLE(ctx, label, scale, b, compression, w)
  3244  		}
  3245  		if err != nil {
  3246  			server.BadRequest(w, r, err)
  3247  			return
  3248  		}
  3249  		if !found {
  3250  			dvid.Infof("GET sparsevol on label %d was not found.\n", label)
  3251  			w.WriteHeader(http.StatusNotFound)
  3252  			return
  3253  		}
  3254  
  3255  	case "head":
  3256  		w.Header().Set("Content-type", "text/html")
  3257  		found, err := d.FoundSparseVol(ctx, label, b)
  3258  		if err != nil {
  3259  			server.BadRequest(w, r, err)
  3260  			return
  3261  		}
  3262  		if found {
  3263  			w.WriteHeader(http.StatusOK)
  3264  		} else {
  3265  			w.WriteHeader(http.StatusNoContent)
  3266  		}
  3267  		return
  3268  
  3269  	case "post":
  3270  		server.BadRequest(w, r, "POST of sparsevol not currently implemented\n")
  3271  		return
  3272  		// if err := d.PutSparseVol(versionID, label, r.Body); err != nil {
  3273  		// 	server.BadRequest(w, r, err)
  3274  		// 	return
  3275  		// }
  3276  	default:
  3277  		server.BadRequest(w, r, "Unable to handle HTTP action %s on sparsevol endpoint", r.Method)
  3278  		return
  3279  	}
  3280  
  3281  	timedLog.Infof("HTTP %s: sparsevol on label %s (%s)", r.Method, parts[4], r.URL)
  3282  }
  3283  
  3284  func (d *Data) handleSparsevolByPoint(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request, parts []string) {
  3285  	// GET <api URL>/node/<UUID>/<data name>/sparsevol-by-point/<coord>
  3286  	if len(parts) < 5 {
  3287  		server.BadRequest(w, r, "ERROR: DVID requires coord to follow 'sparsevol-by-point' command")
  3288  		return
  3289  	}
  3290  	timedLog := dvid.NewTimeLog()
  3291  
  3292  	coord, err := dvid.StringToPoint(parts[4], "_")
  3293  	if err != nil {
  3294  		server.BadRequest(w, r, err)
  3295  		return
  3296  	}
  3297  	label, err := d.GetLabelAtScaledPoint(ctx.VersionID(), coord, 0)
  3298  	if err != nil {
  3299  		server.BadRequest(w, r, err)
  3300  		return
  3301  	}
  3302  	if label == 0 {
  3303  		server.BadRequest(w, r, "Label 0 is protected background value and cannot be used as sparse volume.\n")
  3304  		return
  3305  	}
  3306  	b, compression, err := d.getSparsevolOptions(r)
  3307  	if err != nil {
  3308  		server.BadRequest(w, r, err)
  3309  		return
  3310  	}
  3311  
  3312  	w.Header().Set("Content-type", "application/octet-stream")
  3313  
  3314  	format := svformatFromQueryString(r)
  3315  	var found bool
  3316  	switch format {
  3317  	case FormatLegacyRLE:
  3318  		found, err = d.writeLegacyRLE(ctx, label, 0, b, compression, w)
  3319  	case FormatBinaryBlocks:
  3320  		found, err = d.writeBinaryBlocks(ctx, label, 0, b, compression, w)
  3321  	case FormatStreamingRLE:
  3322  		found, err = d.writeStreamingRLE(ctx, label, 0, b, compression, w)
  3323  	}
  3324  	if err != nil {
  3325  		server.BadRequest(w, r, err)
  3326  		return
  3327  	}
  3328  	if !found {
  3329  		w.WriteHeader(http.StatusNotFound)
  3330  	}
  3331  	timedLog.Infof("HTTP %s: sparsevol-by-point at %s (%s)", r.Method, parts[4], r.URL)
  3332  }
  3333  
  3334  func (d *Data) handleSparsevolCoarse(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request, parts []string) {
  3335  	// GET <api URL>/node/<UUID>/<data name>/sparsevol-coarse/<label>
  3336  	if len(parts) < 5 {
  3337  		server.BadRequest(w, r, "DVID requires label ID to follow 'sparsevol-coarse' command")
  3338  		return
  3339  	}
  3340  	timedLog := dvid.NewTimeLog()
  3341  
  3342  	label, err := strconv.ParseUint(parts[4], 10, 64)
  3343  	if err != nil {
  3344  		server.BadRequest(w, r, err)
  3345  		return
  3346  	}
  3347  	if label == 0 {
  3348  		server.BadRequest(w, r, "Label 0 is protected background value and cannot be used as sparse volume.\n")
  3349  		return
  3350  	}
  3351  	var b dvid.Bounds
  3352  	b.Voxel, err = dvid.OptionalBoundsFromQueryString(r)
  3353  	if err != nil {
  3354  		server.BadRequest(w, r, "Error parsing bounds from query string: %v\n", err)
  3355  		return
  3356  	}
  3357  	blockSize, ok := d.BlockSize().(dvid.Point3d)
  3358  	if !ok {
  3359  		server.BadRequest(w, r, "Error: BlockSize for %s wasn't 3d", d.DataName())
  3360  		return
  3361  	}
  3362  	b.Block = b.Voxel.Divide(blockSize)
  3363  	data, err := d.GetSparseCoarseVol(ctx, label, b)
  3364  	if err != nil {
  3365  		server.BadRequest(w, r, err)
  3366  		return
  3367  	}
  3368  	if data == nil {
  3369  		w.WriteHeader(http.StatusNotFound)
  3370  		return
  3371  	}
  3372  	w.Header().Set("Content-type", "application/octet-stream")
  3373  	_, err = w.Write(data)
  3374  	if err != nil {
  3375  		server.BadRequest(w, r, err)
  3376  		return
  3377  	}
  3378  	timedLog.Infof("HTTP %s: sparsevol-coarse on label %s (%s)", r.Method, parts[4], r.URL)
  3379  }
  3380  
  3381  func (d *Data) handleSparsevolsCoarse(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request, parts []string) {
  3382  	// GET <api URL>/node/<UUID>/<data name>/sparsevols-coarse/<start label>/<end label>
  3383  	if len(parts) < 6 {
  3384  		server.BadRequest(w, r, "DVID requires start and end label ID to follow 'sparsevols-coarse' command")
  3385  		return
  3386  	}
  3387  	timedLog := dvid.NewTimeLog()
  3388  
  3389  	begLabel, err := strconv.ParseUint(parts[4], 10, 64)
  3390  	if err != nil {
  3391  		server.BadRequest(w, r, err)
  3392  		return
  3393  	}
  3394  	endLabel, err := strconv.ParseUint(parts[5], 10, 64)
  3395  	if err != nil {
  3396  		server.BadRequest(w, r, err)
  3397  		return
  3398  	}
  3399  	if begLabel == 0 || endLabel == 0 {
  3400  		server.BadRequest(w, r, "Label 0 is protected background value and cannot be used as sparse volume.\n")
  3401  		return
  3402  	}
  3403  
  3404  	var b dvid.Bounds
  3405  	b.Voxel, err = dvid.OptionalBoundsFromQueryString(r)
  3406  	if err != nil {
  3407  		server.BadRequest(w, r, "Error parsing bounds from query string: %v\n", err)
  3408  		return
  3409  	}
  3410  	blockSize, ok := d.BlockSize().(dvid.Point3d)
  3411  	if !ok {
  3412  		server.BadRequest(w, r, "Error: BlockSize for %s wasn't 3d", d.DataName())
  3413  		return
  3414  	}
  3415  	b.Block = b.Voxel.Divide(blockSize)
  3416  
  3417  	w.Header().Set("Content-type", "application/octet-stream")
  3418  	if err := d.WriteSparseCoarseVols(ctx, w, begLabel, endLabel, b); err != nil {
  3419  		server.BadRequest(w, r, err)
  3420  		return
  3421  	}
  3422  	timedLog.Infof("HTTP %s: sparsevols-coarse on label %s to %s (%s)", r.Method, parts[4], parts[5], r.URL)
  3423  }
  3424  
  3425  func (d *Data) handleMaxlabel(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request) {
  3426  	// GET <api URL>/node/<UUID>/<data name>/maxlabel
  3427  	timedLog := dvid.NewTimeLog()
  3428  	w.Header().Set("Content-Type", "application/json")
  3429  	switch strings.ToLower(r.Method) {
  3430  	case "get":
  3431  		maxlabel, ok := d.MaxLabel[ctx.VersionID()]
  3432  		if !ok {
  3433  			server.BadRequest(w, r, "No maximum label found for %s version %d\n", d.DataName(), ctx.VersionID())
  3434  			return
  3435  		}
  3436  		fmt.Fprintf(w, "{%q: %d}", "maxlabel", maxlabel)
  3437  	default:
  3438  		server.BadRequest(w, r, "Unknown action %q requested: %s\n", r.Method, r.URL)
  3439  		return
  3440  	}
  3441  	timedLog.Infof("HTTP maxlabel request (%s)", r.URL)
  3442  }
  3443  
  3444  func (d *Data) handleNextlabel(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request) {
  3445  	// GET <api URL>/node/<UUID>/<data name>/nextlabel
  3446  	// POST <api URL>/node/<UUID>/<data name>/nextlabel
  3447  	timedLog := dvid.NewTimeLog()
  3448  	w.Header().Set("Content-Type", "application/json")
  3449  	switch strings.ToLower(r.Method) {
  3450  	case "get":
  3451  		fmt.Fprintf(w, "{%q: %d}", "nextlabel", d.MaxRepoLabel+1)
  3452  	case "post":
  3453  		server.BadRequest(w, r, "POST on maxlabel is not supported yet.\n")
  3454  		return
  3455  	default:
  3456  		server.BadRequest(w, r, "Unknown action %q requested: %s\n", r.Method, r.URL)
  3457  		return
  3458  	}
  3459  	timedLog.Infof("HTTP maxlabel request (%s)", r.URL)
  3460  }
  3461  
  3462  func (d *Data) handleSplit(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request, parts []string) {
  3463  	// POST <api URL>/node/<UUID>/<data name>/split/<label>[?splitlabel=X]
  3464  	if strings.ToLower(r.Method) != "post" {
  3465  		server.BadRequest(w, r, "Split requests must be POST actions.")
  3466  		return
  3467  	}
  3468  	if len(parts) < 5 {
  3469  		server.BadRequest(w, r, "ERROR: DVID requires label ID to follow 'split' command")
  3470  		return
  3471  	}
  3472  	timedLog := dvid.NewTimeLog()
  3473  
  3474  	fromLabel, err := strconv.ParseUint(parts[4], 10, 64)
  3475  	if err != nil {
  3476  		server.BadRequest(w, r, err)
  3477  		return
  3478  	}
  3479  	if fromLabel == 0 {
  3480  		server.BadRequest(w, r, "Label 0 is protected background value and cannot be used as sparse volume.\n")
  3481  		return
  3482  	}
  3483  	var splitLabel uint64
  3484  	queryStrings := r.URL.Query()
  3485  	splitStr := queryStrings.Get("splitlabel")
  3486  	if splitStr != "" {
  3487  		splitLabel, err = strconv.ParseUint(splitStr, 10, 64)
  3488  		if err != nil {
  3489  			server.BadRequest(w, r, "Bad parameter for 'splitlabel' query string (%q).  Must be uint64.\n", splitStr)
  3490  		}
  3491  	}
  3492  	toLabel, err := d.SplitLabels(ctx.VersionID(), fromLabel, splitLabel, r.Body)
  3493  	if err != nil {
  3494  		server.BadRequest(w, r, fmt.Sprintf("split label %d -> %d: %v", fromLabel, splitLabel, err))
  3495  		return
  3496  	}
  3497  	w.Header().Set("Content-Type", "application/json")
  3498  	fmt.Fprintf(w, "{%q: %d}", "label", toLabel)
  3499  
  3500  	timedLog.Infof("HTTP split of label %d request (%s)", fromLabel, r.URL)
  3501  }
  3502  
  3503  func (d *Data) handleSplitCoarse(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request, parts []string) {
  3504  	// POST <api URL>/node/<UUID>/<data name>/split-coarse/<label>[?splitlabel=X]
  3505  	if strings.ToLower(r.Method) != "post" {
  3506  		server.BadRequest(w, r, "Split-coarse requests must be POST actions.")
  3507  		return
  3508  	}
  3509  	if len(parts) < 5 {
  3510  		server.BadRequest(w, r, "ERROR: DVID requires label ID to follow 'split' command")
  3511  		return
  3512  	}
  3513  	timedLog := dvid.NewTimeLog()
  3514  
  3515  	fromLabel, err := strconv.ParseUint(parts[4], 10, 64)
  3516  	if err != nil {
  3517  		server.BadRequest(w, r, err)
  3518  		return
  3519  	}
  3520  	if fromLabel == 0 {
  3521  		server.BadRequest(w, r, "Label 0 is protected background value and cannot be used as sparse volume.\n")
  3522  		return
  3523  	}
  3524  	var splitLabel uint64
  3525  	queryStrings := r.URL.Query()
  3526  	splitStr := queryStrings.Get("splitlabel")
  3527  	if splitStr != "" {
  3528  		splitLabel, err = strconv.ParseUint(splitStr, 10, 64)
  3529  		if err != nil {
  3530  			server.BadRequest(w, r, "Bad parameter for 'splitlabel' query string (%q).  Must be uint64.\n", splitStr)
  3531  		}
  3532  	}
  3533  	toLabel, err := d.SplitCoarseLabels(ctx.VersionID(), fromLabel, splitLabel, r.Body)
  3534  	if err != nil {
  3535  		server.BadRequest(w, r, fmt.Sprintf("split-coarse: %v", err))
  3536  		return
  3537  	}
  3538  	w.Header().Set("Content-Type", "application/json")
  3539  	fmt.Fprintf(w, "{%q: %d}", "label", toLabel)
  3540  
  3541  	timedLog.Infof("HTTP split-coarse of label %d request (%s)", fromLabel, r.URL)
  3542  }
  3543  
  3544  func (d *Data) handleMerge(ctx *datastore.VersionedCtx, w http.ResponseWriter, r *http.Request, parts []string) {
  3545  	// POST <api URL>/node/<UUID>/<data name>/merge
  3546  	if strings.ToLower(r.Method) != "post" {
  3547  		server.BadRequest(w, r, "Merge requests must be POST actions.")
  3548  		return
  3549  	}
  3550  	timedLog := dvid.NewTimeLog()
  3551  
  3552  	data, err := ioutil.ReadAll(r.Body)
  3553  	if err != nil {
  3554  		server.BadRequest(w, r, "Bad POSTed data for merge.  Should be JSON.")
  3555  		return
  3556  	}
  3557  	var tuple labels.MergeTuple
  3558  	if err := json.Unmarshal(data, &tuple); err != nil {
  3559  		server.BadRequest(w, r, fmt.Sprintf("Bad merge op JSON: %v", err))
  3560  		return
  3561  	}
  3562  	mergeOp, err := tuple.Op()
  3563  	if err != nil {
  3564  		server.BadRequest(w, r, err)
  3565  		return
  3566  	}
  3567  	if err := d.MergeLabels(ctx.VersionID(), mergeOp); err != nil {
  3568  		server.BadRequest(w, r, fmt.Sprintf("Error on merge: %v", err))
  3569  		return
  3570  	}
  3571  
  3572  	timedLog.Infof("HTTP merge request (%s)", r.URL)
  3573  }
  3574  
  3575  // --------- Other functions on labelarray Data -----------------
  3576  
  3577  // GetLabelBlock returns a compressed label Block of the given block coordinate.
  3578  func (d *Data) GetLabelBlock(v dvid.VersionID, bcoord dvid.ChunkPoint3d, scale uint8) (*labels.Block, error) {
  3579  	store, err := datastore.GetOrderedKeyValueDB(d)
  3580  	if err != nil {
  3581  		return nil, err
  3582  	}
  3583  
  3584  	// Retrieve the block of labels
  3585  	ctx := datastore.NewVersionedCtx(d, v)
  3586  	index := dvid.IndexZYX(bcoord)
  3587  	serialization, err := store.Get(ctx, NewBlockTKey(scale, &index))
  3588  	if err != nil {
  3589  		return nil, fmt.Errorf("error getting '%s' block for index %s", d.DataName(), bcoord)
  3590  	}
  3591  	if serialization == nil {
  3592  		blockSize, ok := d.BlockSize().(dvid.Point3d)
  3593  		if !ok {
  3594  			return nil, fmt.Errorf("block size for data %q should be 3d, not: %s", d.DataName(), d.BlockSize())
  3595  		}
  3596  		return labels.MakeSolidBlock(0, blockSize), nil
  3597  	}
  3598  	deserialization, _, err := dvid.DeserializeData(serialization, true)
  3599  	if err != nil {
  3600  		return nil, fmt.Errorf("unable to deserialize block %s in '%s': %v", bcoord, d.DataName(), err)
  3601  	}
  3602  	var block labels.Block
  3603  	if err = block.UnmarshalBinary(deserialization); err != nil {
  3604  		return nil, err
  3605  	}
  3606  	return &block, nil
  3607  }
  3608  
  3609  // GetLabelBytesWithScale returns a block of labels in packed little-endian uint64 format at the given scale.
  3610  func (d *Data) GetLabelBytesWithScale(v dvid.VersionID, bcoord dvid.ChunkPoint3d, scale uint8) ([]byte, error) {
  3611  	block, err := d.GetLabelBlock(v, bcoord, scale)
  3612  	if err != nil {
  3613  		return nil, err
  3614  	}
  3615  	labelData, _ := block.MakeLabelVolume()
  3616  	return labelData, nil
  3617  }
  3618  
  3619  // GetLabelBytesAtScaledPoint returns the 8 byte slice corresponding to a 64-bit label at a point.
  3620  func (d *Data) GetLabelBytesAtScaledPoint(v dvid.VersionID, pt dvid.Point, scale uint8) ([]byte, error) {
  3621  	coord, ok := pt.(dvid.Chunkable)
  3622  	if !ok {
  3623  		return nil, fmt.Errorf("Can't determine block of point %s", pt)
  3624  	}
  3625  	blockSize := d.BlockSize()
  3626  	bcoord := coord.Chunk(blockSize).(dvid.ChunkPoint3d)
  3627  
  3628  	labelData, err := d.GetLabelBytesWithScale(v, bcoord, scale)
  3629  	if err != nil {
  3630  		return nil, err
  3631  	}
  3632  	if len(labelData) == 0 {
  3633  		return zeroLabelBytes, nil
  3634  	}
  3635  
  3636  	// Retrieve the particular label within the block.
  3637  	ptInBlock := coord.PointInChunk(blockSize)
  3638  	nx := int64(blockSize.Value(0))
  3639  	nxy := nx * int64(blockSize.Value(1))
  3640  	i := (int64(ptInBlock.Value(0)) + int64(ptInBlock.Value(1))*nx + int64(ptInBlock.Value(2))*nxy) * 8
  3641  	return labelData[i : i+8], nil
  3642  }
  3643  
  3644  // GetLabelAtScaledPoint returns the 64-bit unsigned int label for a given point.
  3645  func (d *Data) GetLabelAtScaledPoint(v dvid.VersionID, pt dvid.Point, scale uint8) (uint64, error) {
  3646  	labelBytes, err := d.GetLabelBytesAtScaledPoint(v, pt, scale)
  3647  	if err != nil {
  3648  		return 0, err
  3649  	}
  3650  	return binary.LittleEndian.Uint64(labelBytes), nil
  3651  }
  3652  
  3653  // The following functions implement an interface to synced data types like annotation.
  3654  
  3655  // GetLabelBytes returns a block of hi-res labels (scale 0) in packed little-endian uint64 format
  3656  func (d *Data) GetLabelBytes(v dvid.VersionID, bcoord dvid.ChunkPoint3d) ([]byte, error) {
  3657  	return d.GetLabelBytesWithScale(v, bcoord, 0)
  3658  }
  3659  
  3660  // GetLabelAtPoint returns the 64-bit unsigned int label for a given point.
  3661  func (d *Data) GetLabelAtPoint(v dvid.VersionID, pt dvid.Point) (uint64, error) {
  3662  	return d.GetLabelAtScaledPoint(v, pt, 0)
  3663  }