storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/mint/run/core/awscli/test.sh (about)

     1  #!/bin/bash
     2  #
     3  #  Mint (C) 2017-2020 Minio, Inc.
     4  #
     5  #  Licensed under the Apache License, Version 2.0 (the "License");
     6  #  you may not use this file except in compliance with the License.
     7  #  You may obtain a copy of the License at
     8  #
     9  #      http://www.apache.org/licenses/LICENSE-2.0
    10  #
    11  #  Unless required by applicable law or agreed to in writing, software
    12  #  distributed under the License is distributed on an "AS IS" BASIS,
    13  #  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14  #  See the License for the specific language governing permissions and
    15  #  limitations under the License.
    16  #
    17  
    18  HASH_1_KB=$(md5sum "${MINT_DATA_DIR}/datafile-1-kB" | awk '{print $1}')
    19  HASH_65_MB=$(md5sum "${MINT_DATA_DIR}/datafile-65-MB" | awk '{print $1}')
    20  
    21  _init() {
    22      AWS="aws --endpoint-url $1"
    23  }
    24  
    25  function get_time() {
    26      date +%s%N
    27  }
    28  
    29  function get_duration() {
    30      start_time=$1
    31      end_time=$(get_time)
    32  
    33      echo $(( (end_time - start_time) / 1000000 ))
    34  }
    35  
    36  function log_success() {
    37      function=$(python -c 'import sys,json; print(json.dumps(sys.stdin.read()))' <<<"$2")
    38      printf '{"name": "awscli", "duration": %d, "function": %s, "status": "PASS"}\n' "$1" "$function"
    39  }
    40  
    41  function log_failure() {
    42      function=$(python -c 'import sys,json; print(json.dumps(sys.stdin.read()))' <<<"$2")
    43      err=$(echo "$3" | tr -d '\n')
    44      printf '{"name": "awscli", "duration": %d, "function": %s, "status": "FAIL", "error": "%s"}\n' "$1" "$function" "$err"
    45  }
    46  
    47  function log_alert() {
    48      function=$(python -c 'import sys,json; print(json.dumps(sys.stdin.read()))' <<<"$2")
    49      err=$(echo "$4" | tr -d '\n')
    50      printf '{"name": "awscli", "duration": %d, "function": %s, "status": "FAIL", "alert": "%s", "error": "%s"}\n' "$1" "$function" "$3" "$err"
    51  }
    52  
    53  function make_bucket() {
    54      # Make bucket
    55      bucket_name="awscli-mint-test-bucket-$RANDOM"
    56      function="${AWS} s3api create-bucket --bucket ${bucket_name}"
    57  
    58      # execute the test
    59      out=$($function 2>&1)
    60      rv=$?
    61  
    62      # if command is successful print bucket_name or print error
    63      if [ $rv -eq 0 ]; then
    64          echo "${bucket_name}"
    65      else
    66          echo "${out}"
    67      fi
    68  
    69      return $rv
    70  }
    71  
    72  function delete_bucket() {
    73      # Delete bucket
    74      function="${AWS} s3 rb s3://${1} --force"
    75      out=$($function 2>&1)
    76      rv=$?
    77  
    78      # echo the output
    79      echo "${out}"
    80  
    81      return $rv
    82  }
    83  
    84  # Tests creating, stat and delete on a bucket.
    85  function test_create_bucket() {
    86      # log start time
    87      start_time=$(get_time)
    88  
    89      function="make_bucket"
    90      bucket_name=$(make_bucket)
    91      rv=$?
    92      # save the ref to function being tested, so it can be logged
    93      test_function=${function}
    94  
    95      # if make_bucket is successful stat the bucket
    96      if [ $rv -eq 0 ]; then
    97          function="${AWS} s3api head-bucket --bucket ${bucket_name}"
    98          out=$($function 2>&1)
    99          rv=$?
   100      else
   101          # if make bucket failes, $bucket_name has the error output
   102          out="${bucket_name}"
   103      fi
   104  
   105       # if stat bucket is successful remove the bucket
   106      if [ $rv -eq 0 ]; then
   107          function="delete_bucket"
   108          out=$(delete_bucket "${bucket_name}")
   109          rv=$?
   110      else
   111          # if make bucket failes, $bucket_name has the error output
   112          out="${bucket_name}"
   113      fi
   114  
   115      if [ $rv -eq 0 ]; then
   116          log_success "$(get_duration "$start_time")" "${test_function}"
   117      else
   118          # clean up and log error
   119          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   120          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   121      fi
   122  
   123      return $rv
   124  }
   125  
   126  # Tests creating and deleting an object.
   127  function test_upload_object() {
   128      # log start time
   129      start_time=$(get_time)
   130  
   131      function="make_bucket"
   132      bucket_name=$(make_bucket)
   133      rv=$?
   134  
   135      # if make bucket succeeds upload a file
   136      if [ $rv -eq 0 ]; then
   137          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB"
   138          out=$($function 2>&1)
   139          rv=$?
   140      else
   141          # if make bucket fails, $bucket_name has the error output
   142          out="${bucket_name}"
   143      fi
   144  
   145      # if upload succeeds download the file
   146      if [ $rv -eq 0 ]; then
   147          function="${AWS} s3api get-object --bucket ${bucket_name} --key datafile-1-kB /tmp/datafile-1-kB"
   148          # save the ref to function being tested, so it can be logged
   149          test_function=${function}
   150          out=$($function 2>&1)
   151          rv=$?
   152          # calculate the md5 hash of downloaded file
   153          hash2=$(md5sum /tmp/datafile-1-kB | awk '{print $1}')
   154      fi
   155  
   156      # if download succeeds, verify downloaded file
   157      if [ $rv -eq 0 ]; then
   158          if [ "$HASH_1_KB" == "$hash2" ]; then
   159              function="delete_bucket"
   160              out=$(delete_bucket "$bucket_name")
   161              rv=$?
   162              # remove download file
   163              rm -f /tmp/datafile-1-kB
   164          else
   165              rv=1
   166              out="Checksum verification failed for uploaded object"
   167          fi
   168      fi
   169  
   170      if [ $rv -eq 0 ]; then
   171          log_success "$(get_duration "$start_time")" "${test_function}"
   172      else
   173          # clean up and log error
   174          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   175          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   176      fi
   177  
   178      return $rv
   179  }
   180  
   181  # Test lookup a directory prefix.
   182  function test_lookup_object_prefix() {
   183      # log start time
   184      start_time=$(get_time)
   185  
   186      function="make_bucket"
   187      bucket_name=$(make_bucket)
   188      rv=$?
   189  
   190      # if make bucket succeeds create a directory.
   191      if [ $rv -eq 0 ]; then
   192          function="${AWS} s3api put-object --bucket ${bucket_name} --key prefix/directory/"
   193          # save the ref to function being tested, so it can be logged
   194          test_function=${function}
   195  
   196          out=$($function 2>&1)
   197  
   198          rv=$?
   199      else
   200          # if make_bucket fails, $bucket_name has the error output
   201          out="${bucket_name}"
   202      fi
   203  
   204      if [ $rv -eq 0 ]; then
   205          ## Attempt an overwrite of the prefix again and should succeed as well.
   206          function="${AWS} s3api put-object --bucket ${bucket_name} --key prefix/directory/"
   207          # save the ref to function being tested, so it can be logged
   208          test_function=${function}
   209          out=$($function 2>&1)
   210          rv=$?
   211      fi
   212  
   213      # if upload succeeds lookup for the prefix.
   214      if [ $rv -eq 0 ]; then
   215          function="${AWS} s3api head-object --bucket ${bucket_name} --key prefix/directory/"
   216          # save the ref to function being tested, so it can be logged
   217          test_function=${function}
   218          out=$($function 2>&1)
   219          rv=$?
   220      fi
   221  
   222      # if directory create succeeds, upload the object.
   223      if [ $rv -eq 0 ]; then
   224          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key prefix/directory/datafile-1-kB"
   225          # save the ref to function being tested, so it can be logged
   226          test_function=${function}
   227          out=$($function 2>&1)
   228          rv=$?
   229      fi
   230  
   231      # Attempt a delete on prefix shouldn't delete the directory since we have an object inside it.
   232      if [ $rv -eq 0 ]; then
   233          function="${AWS} s3api delete-object --bucket ${bucket_name} --key prefix/directory/"
   234          # save the ref to function being tested, so it can be logged
   235          test_function=${function}
   236          out=$($function 2>&1)
   237          rv=$?
   238      fi
   239  
   240      # if upload succeeds lookup for the object should succeed.
   241      if [ $rv -eq 0 ]; then
   242          function="${AWS} s3api head-object --bucket ${bucket_name} --key prefix/directory/datafile-1-kB"
   243          # save the ref to function being tested, so it can be logged
   244          test_function=${function}
   245          out=$($function 2>&1)
   246          rv=$?
   247      fi
   248  
   249      # delete bucket
   250      if [ $rv -eq 0 ]; then
   251          function="delete_bucket"
   252          out=$(delete_bucket "$bucket_name")
   253          rv=$?
   254      fi
   255  
   256      if [ $rv -ne 0 ]; then
   257          # clean up and log error
   258          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   259          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   260      else
   261          log_success "$(get_duration "$start_time")" "${test_function}"
   262      fi
   263  
   264      return $rv
   265  }
   266  
   267  # Tests listing objects for both v1 and v2 API.
   268  function test_list_objects() {
   269      # log start time
   270      start_time=$(get_time)
   271  
   272      function="make_bucket"
   273      bucket_name=$(make_bucket)
   274      rv=$?
   275  
   276      # if make bucket succeeds upload a file
   277      if [ $rv -eq 0 ]; then
   278          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB"
   279          out=$($function 2>&1)
   280          rv=$?
   281      else
   282          # if make bucket fails, $bucket_name has the error output
   283          out="${bucket_name}"
   284      fi
   285  
   286      # if upload objects succeeds, list objects with existing prefix
   287      if [ $rv -eq 0 ]; then
   288          function="${AWS} s3api list-objects --bucket ${bucket_name} --prefix datafile-1-kB"
   289          test_function=${function}
   290          out=$($function)
   291          rv=$?
   292          key_name=$(echo "$out" | jq -r .Contents[].Key)
   293          if [ $rv -eq 0 ] && [ "$key_name" != "datafile-1-kB" ]; then
   294              rv=1
   295              # since rv is 0, command passed, but didn't return expected value. In this case set the output
   296              out="list-objects with existing prefix failed"
   297          fi
   298      fi
   299  
   300      # if upload objects succeeds, list objects without existing prefix
   301      if [ $rv -eq 0 ]; then
   302          function="${AWS} s3api list-objects --bucket ${bucket_name} --prefix linux"
   303          out=$($function)
   304          rv=$?
   305          key_name=$(echo "$out" | jq -r .Contents[].Key)
   306          if [ $rv -eq 0 ] && [ "$key_name" != "" ]; then
   307              rv=1
   308              out="list-objects without existing prefix failed"
   309          fi
   310      fi
   311  
   312      # if upload objects succeeds, list objectsv2 with existing prefix
   313      if [ $rv -eq 0 ]; then
   314          function="${AWS} s3api list-objects-v2 --bucket ${bucket_name} --prefix datafile-1-kB"
   315          out=$($function)
   316          rv=$?
   317          key_name=$(echo "$out" | jq -r .Contents[].Key)
   318          if [ $rv -eq 0 ] && [ "$key_name" != "datafile-1-kB" ]; then
   319              rv=1
   320              out="list-objects-v2 with existing prefix failed"
   321          fi
   322      fi
   323  
   324      # if upload objects succeeds, list objectsv2 without existing prefix
   325      if [ $rv -eq 0 ]; then
   326          function="${AWS} s3api list-objects-v2 --bucket ${bucket_name} --prefix linux"
   327          out=$($function)
   328          rv=$?
   329          key_name=$(echo "$out" | jq -r .Contents[].Key)
   330          if [ $rv -eq 0 ] && [ "$key_name" != "" ]; then
   331              rv=1
   332              out="list-objects-v2 without existing prefix failed"
   333          fi
   334      fi
   335  
   336      if [ $rv -eq 0 ]; then
   337          function="delete_bucket"
   338          out=$(delete_bucket "$bucket_name")
   339          rv=$?
   340          # remove download file
   341          rm -f /tmp/datafile-1-kB
   342      fi
   343  
   344      if [ $rv -eq 0 ]; then
   345          log_success "$(get_duration "$start_time")" "${test_function}"
   346      else
   347          # clean up and log error
   348          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   349          rm -f /tmp/datafile-1-kB
   350          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   351      fi
   352  
   353      return $rv
   354  }
   355  
   356  # Tests multipart API with 0 byte part.
   357  function test_multipart_upload_0byte() {
   358      # log start time
   359      start_time=$(get_time)
   360  
   361      function="make_bucket"
   362      bucket_name=$(make_bucket)
   363      object_name=${bucket_name}"-object"
   364      rv=$?
   365  
   366      # if make bucket succeeds upload a file
   367      if [ $rv -eq 0 ]; then
   368          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-0-b --bucket ${bucket_name} --key datafile-0-b"
   369          out=$($function 2>&1)
   370          rv=$?
   371      else
   372          # if make bucket fails, $bucket_name has the error output
   373          out="${bucket_name}"
   374      fi
   375  
   376      if [ $rv -eq 0 ]; then
   377          # create multipart
   378          function="${AWS} s3api create-multipart-upload --bucket ${bucket_name} --key ${object_name}"
   379          test_function=${function}
   380          out=$($function)
   381          rv=$?
   382          upload_id=$(echo "$out" | jq -r .UploadId)
   383      fi
   384  
   385      if [ $rv -eq 0 ]; then
   386          # Capture etag for part-number 1
   387          function="${AWS} s3api upload-part --bucket ${bucket_name} --key ${object_name} --body ${MINT_DATA_DIR}/datafile-0-b --upload-id ${upload_id} --part-number 1"
   388          out=$($function)
   389          rv=$?
   390          etag1=$(echo "$out" | jq -r .ETag)
   391      fi
   392  
   393      if [ $rv -eq 0 ]; then
   394          # Create a multipart struct file for completing multipart transaction
   395          echo "{
   396              \"Parts\": [
   397                  {
   398                      \"ETag\": ${etag1},
   399                      \"PartNumber\": 1
   400                  }
   401              ]
   402          }" >> /tmp/multipart
   403      fi
   404  
   405      if [ $rv -eq 0 ]; then
   406          # Use saved etags to complete the multipart transaction
   407          function="${AWS} s3api complete-multipart-upload --multipart-upload file:///tmp/multipart --bucket ${bucket_name} --key ${object_name} --upload-id ${upload_id}"
   408          out=$($function)
   409          rv=$?
   410          etag=$(echo "$out" | jq -r .ETag | sed -e 's/^"//' -e 's/"$//')
   411          if [ "${etag}" == "" ]; then
   412              rv=1
   413              out="complete-multipart-upload failed"
   414          fi
   415      fi
   416  
   417      if [ $rv -eq 0 ]; then
   418          function="${AWS} s3api get-object --bucket ${bucket_name} --key ${object_name} /tmp/datafile-0-b"
   419          test_function=${function}
   420          out=$($function 2>&1)
   421          rv=$?
   422      fi
   423  
   424      if [ $rv -eq 0 ]; then
   425          ret_etag=$(echo "$out" | jq -r .ETag | sed -e 's/^"//' -e 's/"$//')
   426          # match etag
   427          if [ "$etag" != "$ret_etag" ]; then
   428              rv=1
   429              out="Etag mismatch for multipart 0 byte object"
   430          fi
   431          rm -f /tmp/datafile-0-b
   432      fi
   433  
   434      if [ $rv -eq 0 ]; then
   435          function="delete_bucket"
   436          out=$(delete_bucket "$bucket_name")
   437          rv=$?
   438          # remove temp file
   439          rm -f /tmp/multipart
   440      fi
   441  
   442      if [ $rv -eq 0 ]; then
   443          log_success "$(get_duration "$start_time")" "${test_function}"
   444      else
   445          # clean up and log error
   446          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   447          rm -f /tmp/multipart
   448          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   449      fi
   450  
   451      return $rv
   452  }
   453  
   454  # Tests multipart API by making each individual calls.
   455  function test_multipart_upload() {
   456      # log start time
   457      start_time=$(get_time)
   458  
   459      function="make_bucket"
   460      bucket_name=$(make_bucket)
   461      object_name=${bucket_name}"-object"
   462      rv=$?
   463  
   464      # if make bucket succeeds upload a file
   465      if [ $rv -eq 0 ]; then
   466          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB"
   467          out=$($function 2>&1)
   468          rv=$?
   469      else
   470          # if make bucket fails, $bucket_name has the error output
   471          out="${bucket_name}"
   472      fi
   473  
   474      if [ $rv -eq 0 ]; then
   475          # create multipart
   476          function="${AWS} s3api create-multipart-upload --bucket ${bucket_name} --key ${object_name}"
   477          test_function=${function}
   478          out=$($function)
   479          rv=$?
   480          upload_id=$(echo "$out" | jq -r .UploadId)
   481      fi
   482  
   483      if [ $rv -eq 0 ]; then
   484          # Capture etag for part-number 1
   485          function="${AWS} s3api upload-part --bucket ${bucket_name} --key ${object_name} --body ${MINT_DATA_DIR}/datafile-5-MB --upload-id ${upload_id} --part-number 1"
   486          out=$($function)
   487          rv=$?
   488          etag1=$(echo "$out" | jq -r .ETag)
   489      fi
   490  
   491      if [ $rv -eq 0 ]; then
   492          # Capture etag for part-number 2
   493          function="${AWS} s3api upload-part --bucket ${bucket_name} --key ${object_name} --body ${MINT_DATA_DIR}/datafile-1-kB --upload-id ${upload_id} --part-number 2"
   494          out=$($function)
   495          rv=$?
   496          etag2=$(echo "$out" | jq -r .ETag)
   497          # Create a multipart struct file for completing multipart transaction
   498          echo "{
   499              \"Parts\": [
   500                  {
   501                      \"ETag\": ${etag1},
   502                      \"PartNumber\": 1
   503                  },
   504                  {
   505                      \"ETag\": ${etag2},
   506                      \"PartNumber\": 2
   507                  }
   508              ]
   509          }" >> /tmp/multipart
   510      fi
   511  
   512      if [ $rv -eq 0 ]; then
   513          # Use saved etags to complete the multipart transaction
   514          function="${AWS} s3api complete-multipart-upload --multipart-upload file:///tmp/multipart --bucket ${bucket_name} --key ${object_name} --upload-id ${upload_id}"
   515          out=$($function)
   516          rv=$?
   517          finalETag=$(echo "$out" | jq -r .ETag | sed -e 's/^"//' -e 's/"$//')
   518          if [ "${finalETag}" == "" ]; then
   519              rv=1
   520              out="complete-multipart-upload failed"
   521          fi
   522      fi
   523  
   524      if [ $rv -eq 0 ]; then
   525          function="delete_bucket"
   526          out=$(delete_bucket "$bucket_name")
   527          rv=$?
   528          # remove temp file
   529          rm -f /tmp/multipart
   530      fi
   531  
   532      if [ $rv -eq 0 ]; then
   533          log_success "$(get_duration "$start_time")" "${test_function}"
   534      else
   535          # clean up and log error
   536          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   537          rm -f /tmp/multipart
   538          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   539      fi
   540  
   541      return $rv
   542  }
   543  
   544  # List number of objects based on the maxKey
   545  # value set.
   546  function test_max_key_list() {
   547      # log start time
   548      start_time=$(get_time)
   549  
   550      function="make_bucket"
   551      bucket_name=$(make_bucket)
   552      rv=$?
   553  
   554      # if make bucket succeeds upload a file
   555      if [ $rv -eq 0 ]; then
   556          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-b --bucket ${bucket_name} --key datafile-1-b"
   557          out=$($function 2>&1)
   558          rv=$?
   559      else
   560          # if make bucket fails, $bucket_name has the error output
   561          out="${bucket_name}"
   562      fi
   563  
   564      # copy object server side
   565      if [ $rv -eq 0 ]; then
   566          function="${AWS} s3api copy-object --bucket ${bucket_name} --key datafile-1-b-copy --copy-source ${bucket_name}/datafile-1-b"
   567          out=$($function)
   568          rv=$?
   569      fi
   570  
   571      if [ $rv -eq 0 ]; then
   572          function="${AWS} s3api list-objects-v2 --bucket ${bucket_name} --max-keys 1"
   573          test_function=${function}
   574          out=$($function 2>&1)
   575          rv=$?
   576          if [ $rv -eq 0 ]; then
   577              out=$(echo "$out" | jq '.KeyCount')
   578              rv=$?
   579          fi
   580      fi
   581  
   582      if [ $rv -eq 0 ]; then
   583          function="delete_bucket"
   584          out=$(delete_bucket "$bucket_name")
   585          rv=$?
   586          # The command passed, but the delete_bucket failed
   587          out="delete_bucket for test_max_key_list failed"
   588      fi
   589  
   590      if [ $rv -eq 0 ]; then
   591          log_success "$(get_duration "$start_time")" "${test_function}"
   592      else
   593          # clean up and log error
   594          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   595          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   596      fi
   597  
   598      return $rv
   599  }
   600  
   601  # Copy object tests for server side copy
   602  # of the object, validates returned md5sum.
   603  function test_copy_object() {
   604      # log start time
   605      start_time=$(get_time)
   606  
   607      function="make_bucket"
   608      bucket_name=$(make_bucket)
   609      rv=$?
   610  
   611      # if make bucket succeeds upload a file
   612      if [ $rv -eq 0 ]; then
   613          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB"
   614          out=$($function 2>&1)
   615          rv=$?
   616      else
   617          # if make bucket fails, $bucket_name has the error output
   618          out="${bucket_name}"
   619      fi
   620  
   621      # copy object server side
   622      if [ $rv -eq 0 ]; then
   623          function="${AWS} s3api copy-object --bucket ${bucket_name} --key datafile-1-kB-copy --copy-source ${bucket_name}/datafile-1-kB"
   624          test_function=${function}
   625          out=$($function)
   626          rv=$?
   627          hash2=$(echo "$out" | jq -r .CopyObjectResult.ETag | sed -e 's/^"//' -e 's/"$//')
   628          if [ $rv -eq 0 ] && [ "$HASH_1_KB" != "$hash2" ]; then
   629              # Verification failed
   630              rv=1
   631              out="Hash mismatch expected $HASH_1_KB, got $hash2"
   632          fi
   633      fi
   634  
   635      ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   636      if [ $rv -eq 0 ]; then
   637          log_success "$(get_duration "$start_time")" "${test_function}"
   638      else
   639          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   640      fi
   641  
   642      return $rv
   643  }
   644  
   645  # Copy object tests for server side copy
   646  # of the object, validates returned md5sum.
   647  # validates change in storage class as well
   648  function test_copy_object_storage_class() {
   649      # log start time
   650      start_time=$(get_time)
   651  
   652      function="make_bucket"
   653      bucket_name=$(make_bucket)
   654      rv=$?
   655  
   656      # if make bucket succeeds upload a file
   657      if [ $rv -eq 0 ]; then
   658          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB"
   659          out=$($function 2>&1)
   660          rv=$?
   661      else
   662          # if make bucket fails, $bucket_name has the error output
   663          out="${bucket_name}"
   664      fi
   665  
   666      # copy object server side
   667      if [ $rv -eq 0 ]; then
   668          function="${AWS} s3api copy-object --bucket ${bucket_name} --storage-class REDUCED_REDUNDANCY --key datafile-1-kB-copy --copy-source ${bucket_name}/datafile-1-kB"
   669          test_function=${function}
   670          out=$($function 2>&1)
   671          rv=$?
   672          # if this functionality is not implemented return right away.
   673          if [ $rv -ne 0 ]; then
   674              if echo "$out" | grep -q "NotImplemented"; then
   675                  ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   676                  return 0
   677              fi
   678          fi
   679          hash2=$(echo "$out" | jq -r .CopyObjectResult.ETag | sed -e 's/^"//' -e 's/"$//')
   680          if [ $rv -eq 0 ] && [ "$HASH_1_KB" != "$hash2" ]; then
   681              # Verification failed
   682              rv=1
   683              out="Hash mismatch expected $HASH_1_KB, got $hash2"
   684          fi
   685          # if copy succeeds stat the object
   686          if [ $rv -eq 0 ]; then
   687              function="${AWS} s3api head-object --bucket ${bucket_name} --key datafile-1-kB-copy"
   688              # save the ref to function being tested, so it can be logged
   689              test_function=${function}
   690              out=$($function 2>&1)
   691              storageClass=$(echo "$out" | jq -r .StorageClass)
   692              rv=$?
   693          fi
   694          # if head-object succeeds, verify metadata has storage class
   695          if [ $rv -eq 0 ]; then
   696              if [ "${storageClass}" == "null" ]; then
   697                  rv=1
   698                  out="StorageClass was not applied"
   699              elif [ "${storageClass}" == "STANDARD" ]; then
   700                  rv=1
   701                  out="StorageClass was applied incorrectly"
   702              fi
   703          fi
   704      fi
   705  
   706      ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   707      if [ $rv -eq 0 ]; then
   708          log_success "$(get_duration "$start_time")" "${test_function}"
   709      else
   710          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   711      fi
   712  
   713      return $rv
   714  }
   715  
   716  # Copy object tests for server side copy
   717  # to itself by changing storage class
   718  function test_copy_object_storage_class_same() {
   719      # log start time
   720      start_time=$(get_time)
   721  
   722      function="make_bucket"
   723      bucket_name=$(make_bucket)
   724      rv=$?
   725  
   726      # if make bucket succeeds upload a file
   727      if [ $rv -eq 0 ]; then
   728          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB"
   729          out=$($function 2>&1)
   730          rv=$?
   731      else
   732          # if make bucket fails, $bucket_name has the error output
   733          out="${bucket_name}"
   734      fi
   735  
   736      # copy object server side
   737      if [ $rv -eq 0 ]; then
   738          function="${AWS} s3api copy-object --bucket ${bucket_name} --storage-class REDUCED_REDUNDANCY --key datafile-1-kB --copy-source ${bucket_name}/datafile-1-kB"
   739          test_function=${function}
   740          out=$($function 2>&1)
   741          rv=$?
   742          # if this functionality is not implemented return right away.
   743          if [ $rv -ne 0 ]; then
   744              if echo "$out" | grep -q "NotImplemented"; then
   745                  ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   746                  return 0
   747              fi
   748          fi
   749          hash2=$(echo "$out" | jq -r .CopyObjectResult.ETag | sed -e 's/^"//' -e 's/"$//')
   750          if [ $rv -eq 0 ] && [ "$HASH_1_KB" != "$hash2" ]; then
   751              # Verification failed
   752              rv=1
   753              out="Hash mismatch expected $HASH_1_KB, got $hash2"
   754          fi
   755          # if copy succeeds stat the object
   756          if [ $rv -eq 0 ]; then
   757              function="${AWS} s3api head-object --bucket ${bucket_name} --key datafile-1-kB"
   758              # save the ref to function being tested, so it can be logged
   759              test_function=${function}
   760              out=$($function 2>&1)
   761              storageClass=$(echo "$out" | jq -r .StorageClass)
   762              rv=$?
   763          fi
   764          # if head-object succeeds, verify metadata has storage class
   765          if [ $rv -eq 0 ]; then
   766              if [ "${storageClass}" == "null" ]; then
   767                  rv=1
   768                  out="StorageClass was not applied"
   769              elif [ "${storageClass}" == "STANDARD" ]; then
   770                  rv=1
   771                  out="StorageClass was applied incorrectly"
   772              fi
   773          fi
   774      fi
   775  
   776      ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   777      if [ $rv -eq 0 ]; then
   778          log_success "$(get_duration "$start_time")" "${test_function}"
   779      else
   780          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   781      fi
   782  
   783      return $rv
   784  }
   785  
   786  # Tests for presigned URL success case, presigned URL
   787  # is correct and accessible - we calculate md5sum of
   788  # the object and validate it against a local files md5sum.
   789  function test_presigned_object() {
   790      # log start time
   791      start_time=$(get_time)
   792  
   793      function="make_bucket"
   794      bucket_name=$(make_bucket)
   795      rv=$?
   796  
   797      # if make bucket succeeds upload a file
   798      if [ $rv -eq 0 ]; then
   799          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB"
   800          out=$($function 2>&1)
   801          rv=$?
   802      else
   803          # if make bucket fails, $bucket_name has the error output
   804          out="${bucket_name}"
   805      fi
   806  
   807      if [ $rv -eq 0 ]; then
   808          function="${AWS} s3 presign s3://${bucket_name}/datafile-1-kB"
   809          test_function=${function}
   810          url=$($function)
   811          rv=$?
   812          curl -sS -X GET "${url}" > /tmp/datafile-1-kB
   813          hash2=$(md5sum /tmp/datafile-1-kB | awk '{print $1}')
   814          if [ "$HASH_1_KB" == "$hash2" ]; then
   815              function="delete_bucket"
   816              out=$(delete_bucket "$bucket_name")
   817              rv=$?
   818              # remove download file
   819              rm -f /tmp/datafile-1-kB
   820          else
   821              rv=1
   822              out="Checksum verification failed for downloaded object"
   823          fi
   824      fi
   825  
   826      if [ $rv -eq 0 ]; then
   827          log_success "$(get_duration "$start_time")" "${test_function}"
   828      else
   829          # clean up and log error
   830          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   831          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   832      fi
   833  
   834      return $rv
   835  }
   836  
   837  # Tests creating and deleting an object - 10MiB
   838  function test_upload_object_10() {
   839      # log start time
   840      start_time=$(get_time)
   841  
   842      function="make_bucket"
   843      bucket_name=$(make_bucket)
   844      rv=$?
   845  
   846      # if make bucket succeeds upload a file
   847      if [ $rv -eq 0 ]; then
   848          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-10-MB --bucket ${bucket_name} --key datafile-10-MB"
   849          out=$($function 2>&1)
   850          rv=$?
   851      else
   852          # if make bucket fails, $bucket_name has the error output
   853          out="${bucket_name}"
   854      fi
   855  
   856      if [ $rv -eq 0 ]; then
   857          log_success "$(get_duration "$start_time")" "${test_function}"
   858      else
   859          # clean up and log error
   860          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   861          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   862      fi
   863  
   864      return $rv
   865  }
   866  
   867  # Tests multipart API by making each individual calls with 10MiB part size.
   868  function test_multipart_upload_10() {
   869      # log start time
   870      start_time=$(get_time)
   871  
   872      function="make_bucket"
   873      bucket_name=$(make_bucket)
   874      object_name=${bucket_name}"-object"
   875      rv=$?
   876  
   877      if [ $rv -eq 0 ]; then
   878          # create multipart
   879          function="${AWS} s3api create-multipart-upload --bucket ${bucket_name} --key ${object_name}"
   880          test_function=${function}
   881          out=$($function)
   882          rv=$?
   883          upload_id=$(echo "$out" | jq -r .UploadId)
   884      fi
   885  
   886      if [ $rv -eq 0 ]; then
   887          # Capture etag for part-number 1
   888          function="${AWS} s3api upload-part --bucket ${bucket_name} --key ${object_name} --body ${MINT_DATA_DIR}/datafile-10-MB --upload-id ${upload_id} --part-number 1"
   889          out=$($function)
   890          rv=$?
   891          etag1=$(echo "$out" | jq -r .ETag)
   892      fi
   893  
   894      if [ $rv -eq 0 ]; then
   895          # Capture etag for part-number 2
   896          function="${AWS} s3api upload-part --bucket ${bucket_name} --key ${object_name} --body ${MINT_DATA_DIR}/datafile-10-MB --upload-id ${upload_id} --part-number 2"
   897          out=$($function)
   898          rv=$?
   899          etag2=$(echo "$out" | jq -r .ETag)
   900          # Create a multipart struct file for completing multipart transaction
   901          echo "{
   902              \"Parts\": [
   903                  {
   904                      \"ETag\": ${etag1},
   905                      \"PartNumber\": 1
   906                  },
   907                  {
   908                      \"ETag\": ${etag2},
   909                      \"PartNumber\": 2
   910                  }
   911              ]
   912          }" >> /tmp/multipart
   913      fi
   914  
   915      if [ $rv -eq 0 ]; then
   916          # Use saved etags to complete the multipart transaction
   917          function="${AWS} s3api complete-multipart-upload --multipart-upload file:///tmp/multipart --bucket ${bucket_name} --key ${object_name} --upload-id ${upload_id}"
   918          out=$($function)
   919          rv=$?
   920          finalETag=$(echo "$out" | jq -r .ETag | sed -e 's/^"//' -e 's/"$//')
   921          if [ "${finalETag}" == "" ]; then
   922              rv=1
   923              out="complete-multipart-upload failed"
   924          fi
   925      fi
   926  
   927      if [ $rv -eq 0 ]; then
   928          function="delete_bucket"
   929          out=$(delete_bucket "$bucket_name")
   930          rv=$?
   931          # remove temp file
   932          rm -f /tmp/multipart
   933      fi
   934  
   935      if [ $rv -eq 0 ]; then
   936          log_success "$(get_duration "$start_time")" "${test_function}"
   937      else
   938          # clean up and log error
   939          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   940          rm -f /tmp/multipart
   941          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   942      fi
   943  
   944      return $rv
   945  }
   946  
   947  # Tests lifecycle of a bucket.
   948  function test_bucket_lifecycle() {
   949      # log start time
   950      start_time=$(get_time)
   951  
   952      echo "{ \"Rules\": [ { \"Expiration\": { \"Days\": 365 },\"ID\": \"Bucketlifecycle test\", \"Filter\": { \"Prefix\": \"\" }, \"Status\": \"Enabled\" } ] }" >> /tmp/lifecycle.json
   953  
   954      function="make_bucket"
   955      bucket_name=$(make_bucket)
   956      rv=$?
   957  
   958      # if make bucket succeeds put bucket lifecycle
   959      if [ $rv -eq 0 ]; then
   960          function="${AWS} s3api put-bucket-lifecycle-configuration --bucket ${bucket_name} --lifecycle-configuration file:///tmp/lifecycle.json"
   961          out=$($function 2>&1)
   962          rv=$?
   963      else
   964          # if make bucket fails, $bucket_name has the error output
   965          out="${bucket_name}"
   966      fi
   967  
   968      # if put bucket lifecycle succeeds get bucket lifecycle
   969      if [ $rv -eq 0 ]; then
   970          function="${AWS} s3api get-bucket-lifecycle-configuration --bucket ${bucket_name}"
   971          out=$($function 2>&1)
   972          rv=$?
   973      fi
   974  
   975      # if get bucket lifecycle succeeds delete bucket lifecycle
   976      if [ $rv -eq 0 ]; then
   977          function="${AWS} s3api delete-bucket-lifecycle --bucket ${bucket_name}"
   978          out=$($function 2>&1)
   979          rv=$?
   980      fi
   981  
   982      # delete lifecycle.json
   983      rm -f /tmp/lifecycle.json
   984  
   985      # delete bucket
   986      if [ $rv -eq 0 ]; then
   987          function="delete_bucket"
   988          out=$(delete_bucket "$bucket_name")
   989          rv=$?
   990      fi
   991  
   992      if [ $rv -eq 0 ]; then
   993          log_success "$(get_duration "$start_time")" "test_bucket_lifecycle"
   994      else
   995          # clean up and log error
   996          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
   997          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
   998      fi
   999  
  1000      return $rv
  1001  }
  1002  
  1003  # Tests `aws s3 cp` by uploading a local file.
  1004  function test_aws_s3_cp() {
  1005      file_name="${MINT_DATA_DIR}/datafile-65-MB"
  1006  
  1007      # log start time
  1008      start_time=$(get_time)
  1009  
  1010      function="make_bucket"
  1011      bucket_name=$(make_bucket)
  1012      rv=$?
  1013  
  1014      # if make bucket succeeds upload a file using cp
  1015      if [ $rv -eq 0 ]; then
  1016          function="${AWS} s3 cp $file_name s3://${bucket_name}/$(basename "$file_name")"
  1017          test_function=${function}
  1018          out=$($function 2>&1)
  1019          rv=$?
  1020      else
  1021          # if make bucket fails, $bucket_name has the error output
  1022          out="${bucket_name}"
  1023      fi
  1024  
  1025      if [ $rv -eq 0 ]; then
  1026          function="${AWS} s3 rm s3://${bucket_name}/$(basename "$file_name")"
  1027          out=$($function 2>&1)
  1028          rv=$?
  1029      fi
  1030  
  1031      if [ $rv -eq 0 ]; then
  1032          function="${AWS} s3 rb s3://${bucket_name}/"
  1033          out=$($function 2>&1)
  1034          rv=$?
  1035      fi
  1036  
  1037      if [ $rv -eq 0 ]; then
  1038          log_success "$(get_duration "$start_time")" "${test_function}"
  1039      else
  1040          # clean up and log error
  1041          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
  1042          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
  1043      fi
  1044  
  1045      return $rv
  1046  }
  1047  
  1048  # Tests `aws s3 sync` by mirroring all the
  1049  # local content to remove bucket.
  1050  function test_aws_s3_sync() {
  1051      # log start time
  1052      start_time=$(get_time)
  1053  
  1054      function="make_bucket"
  1055      bucket_name=$(make_bucket)
  1056      rv=$?
  1057  
  1058      # if make bucket succeeds sync all the files in a directory
  1059      if [ $rv -eq 0 ]; then
  1060          function="${AWS} s3 sync --no-progress $MINT_DATA_DIR s3://${bucket_name}/"
  1061          test_function=${function}
  1062          out=$($function 2>&1)
  1063          rv=$?
  1064      else
  1065          # if make bucket fails, $bucket_name has the error output
  1066          out="${bucket_name}"
  1067      fi
  1068  
  1069      # remove files recusively
  1070      if [ $rv -eq 0 ]; then
  1071          function="${AWS} s3 rm --recursive s3://${bucket_name}/"
  1072          out=$($function 2>&1)
  1073          rv=$?
  1074      fi
  1075  
  1076      # delete bucket
  1077      if [ $rv -eq 0 ]; then
  1078          function="delete_bucket"
  1079          out=$(delete_bucket "$bucket_name")
  1080          rv=$?
  1081      fi
  1082  
  1083      if [ $rv -eq 0 ]; then
  1084          log_success "$(get_duration "$start_time")" "${test_function}"
  1085      else
  1086          # clean up and log error
  1087          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
  1088          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
  1089      fi
  1090  
  1091      return $rv
  1092  }
  1093  
  1094  # list objects negative test - tests for following conditions.
  1095  # v1 API with max-keys=-1 and max-keys=0
  1096  # v2 API with max-keys=-1 and max-keys=0
  1097  function test_list_objects_error() {
  1098      # log start time
  1099      start_time=$(get_time)
  1100  
  1101      function="make_bucket"
  1102      bucket_name=$(make_bucket)
  1103      rv=$?
  1104  
  1105      # if make bucket succeeds upload a file
  1106      if [ $rv -eq 0 ]; then
  1107          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB"
  1108          out=$($function 2>&1)
  1109          rv=$?
  1110      else
  1111          # if make bucket fails, $bucket_name has the error output
  1112          out="${bucket_name}"
  1113      fi
  1114  
  1115      if [ $rv -eq 0 ]; then
  1116          # Server replies an error for v1 with max-key=-1
  1117          function="${AWS} s3api list-objects --bucket ${bucket_name} --prefix datafile-1-kB --max-keys=-1"
  1118          test_function=${function}
  1119          out=$($function 2>&1)
  1120          rv=$?
  1121          if [ $rv -ne 255 ]; then
  1122              rv=1
  1123          else
  1124              rv=0
  1125          fi
  1126      fi
  1127  
  1128      if [ $rv -eq 0 ]; then
  1129          # Server replies an error for v2 with max-keys=-1
  1130          function="${AWS} s3api list-objects-v2 --bucket ${bucket_name} --prefix datafile-1-kB --max-keys=-1"
  1131          test_function=${function}
  1132          out=$($function 2>&1)
  1133          rv=$?
  1134          if [ $rv -ne 255 ]; then
  1135              rv=1
  1136          else
  1137              rv=0
  1138          fi
  1139      fi
  1140  
  1141      if [ $rv -eq 0 ]; then
  1142          # Server returns success with no keys when max-keys=0
  1143          function="${AWS} s3api list-objects-v2 --bucket ${bucket_name} --prefix datafile-1-kB --max-keys=0"
  1144          out=$($function 2>&1)
  1145          rv=$?
  1146          if [ $rv -eq 0 ]; then
  1147              function="delete_bucket"
  1148              out=$(delete_bucket "$bucket_name")
  1149              rv=$?
  1150          fi
  1151      fi
  1152  
  1153      if [ $rv -eq 0 ]; then
  1154          log_success "$(get_duration "$start_time")" "${test_function}"
  1155      else
  1156          # clean up and log error
  1157          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
  1158          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
  1159      fi
  1160  
  1161      return $rv
  1162  }
  1163  
  1164  # put object negative test - tests for following conditions.
  1165  # - invalid object name.
  1166  # - invalid Content-Md5
  1167  # - invalid Content-Length
  1168  function test_put_object_error() {
  1169      # log start time
  1170      start_time=$(get_time)
  1171  
  1172      function="make_bucket"
  1173      bucket_name=$(make_bucket)
  1174      rv=$?
  1175  
  1176      # if make bucket succeeds upload an object without content-md5.
  1177      if [ $rv -eq 0 ]; then
  1178          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB --content-md5 invalid"
  1179          test_function=${function}
  1180          out=$($function 2>&1)
  1181          rv=$?
  1182          if [ $rv -ne 255 ]; then
  1183              rv=1
  1184          else
  1185              rv=0
  1186          fi
  1187      fi
  1188  
  1189      # upload an object without content-length.
  1190      if [ $rv -eq 0 ]; then
  1191          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB --content-length -1"
  1192          test_function=${function}
  1193          out=$($function 2>&1)
  1194          rv=$?
  1195          if [ $rv -ne 255 ]; then
  1196              rv=1
  1197          else
  1198              rv=0
  1199          fi
  1200      fi
  1201  
  1202      if [ $rv -eq 0 ]; then
  1203          function="delete_bucket"
  1204          out=$(delete_bucket "$bucket_name")
  1205          rv=$?
  1206      fi
  1207  
  1208      if [ $rv -eq 0 ]; then
  1209          log_success "$(get_duration "$start_time")" "${test_function}"
  1210      else
  1211          # clean up and log error
  1212          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
  1213          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
  1214      fi
  1215  
  1216      return $rv
  1217  }
  1218  # tests server side encryption headers for get and put calls
  1219  function test_serverside_encryption() {
  1220      #skip server side encryption tests if HTTPS disabled.
  1221      if [ "$ENABLE_HTTPS" != "1" ]; then
  1222          return 0
  1223      fi
  1224      # log start time
  1225      start_time=$(get_time)
  1226  
  1227      function="make_bucket"
  1228      bucket_name=$(make_bucket)
  1229      rv=$?
  1230  
  1231      # put object with server side encryption headers
  1232      if [ $rv -eq 0 ]; then
  1233          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB --sse-customer-algorithm AES256 --sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg=="
  1234          test_function=${function}
  1235          out=$($function 2>&1)
  1236          rv=$?
  1237      fi
  1238      # now get encrypted object from server
  1239      if [ $rv -eq 0 ]; then
  1240          etag1=$(echo "$out" | jq -r .ETag)
  1241          sse_customer_key1=$(echo "$out" | jq -r .SSECustomerKeyMD5)
  1242          sse_customer_algo1=$(echo "$out" | jq -r .SSECustomerAlgorithm)
  1243  
  1244          function="${AWS} s3api get-object --bucket ${bucket_name} --key datafile-1-kB --sse-customer-algorithm AES256 --sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg== /tmp/datafile-1-kB"
  1245          test_function=${function}
  1246          out=$($function 2>&1)
  1247          rv=$?
  1248      fi
  1249      if [ $rv -eq 0 ]; then
  1250          etag2=$(echo "$out" | jq -r .ETag)
  1251          sse_customer_key2=$(echo "$out" | jq -r .SSECustomerKeyMD5)
  1252          sse_customer_algo2=$(echo "$out" | jq -r .SSECustomerAlgorithm)
  1253          hash2=$(md5sum /tmp/datafile-1-kB | awk '{print $1}')
  1254          # match downloaded object's hash to original
  1255          if [ "$HASH_1_KB" == "$hash2" ]; then
  1256              function="delete_bucket"
  1257              out=$(delete_bucket "$bucket_name")
  1258              rv=$?
  1259              # remove download file
  1260              rm -f /tmp/datafile-1-kB
  1261          else
  1262              rv=1
  1263              out="Checksum verification failed for downloaded object"
  1264          fi
  1265          # match etag and SSE headers
  1266          if [ "$etag1" != "$etag2" ]; then
  1267              rv=1
  1268              out="Etag mismatch for object encrypted with server side encryption"
  1269          fi
  1270          if [ "$sse_customer_algo1" != "$sse_customer_algo2" ]; then
  1271              rv=1
  1272              out="sse customer algorithm mismatch"
  1273          fi
  1274          if [ "$sse_customer_key1" != "$sse_customer_key2" ]; then
  1275              rv=1
  1276              out="sse customer key mismatch"
  1277          fi
  1278      fi
  1279  
  1280      if [ $rv -eq 0 ]; then
  1281          log_success "$(get_duration "$start_time")" "${test_function}"
  1282      else
  1283          # clean up and log error
  1284          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
  1285          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
  1286      fi
  1287  
  1288      return $rv
  1289  }
  1290  
  1291  # tests server side encryption headers for multipart put
  1292  function test_serverside_encryption_multipart() {
  1293      #skip server side encryption tests if HTTPS disabled.
  1294      if [ "$ENABLE_HTTPS" != "1" ]; then
  1295          return 0
  1296      fi
  1297      # log start time
  1298      start_time=$(get_time)
  1299  
  1300      function="make_bucket"
  1301      bucket_name=$(make_bucket)
  1302      rv=$?
  1303  
  1304      # put object with server side encryption headers
  1305      if [ $rv -eq 0 ]; then
  1306          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-65-MB --bucket ${bucket_name} --key datafile-65-MB --sse-customer-algorithm AES256 --sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg=="
  1307          test_function=${function}
  1308          out=$($function 2>&1)
  1309          rv=$?
  1310      fi
  1311      # now get encrypted object from server
  1312      if [ $rv -eq 0 ]; then
  1313          etag1=$(echo "$out" | jq -r .ETag)
  1314          sse_customer_key1=$(echo "$out" | jq -r .SSECustomerKeyMD5)
  1315          sse_customer_algo1=$(echo "$out" | jq -r .SSECustomerAlgorithm)
  1316  
  1317          function="${AWS} s3api get-object --bucket ${bucket_name} --key datafile-65-MB --sse-customer-algorithm AES256 --sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg== /tmp/datafile-65-MB"
  1318          test_function=${function}
  1319          out=$($function 2>&1)
  1320          rv=$?
  1321      fi
  1322      if [ $rv -eq 0 ]; then
  1323          etag2=$(echo "$out" | jq -r .ETag)
  1324          sse_customer_key2=$(echo "$out" | jq -r .SSECustomerKeyMD5)
  1325          sse_customer_algo2=$(echo "$out" | jq -r .SSECustomerAlgorithm)
  1326          hash2=$(md5sum /tmp/datafile-65-MB | awk '{print $1}')
  1327          # match downloaded object's hash to original
  1328          if [ "$HASH_65_MB" == "$hash2" ]; then
  1329              function="delete_bucket"
  1330              out=$(delete_bucket "$bucket_name")
  1331              rv=$?
  1332              # remove download file
  1333              rm -f /tmp/datafile-65-MB
  1334          else
  1335              rv=1
  1336              out="Checksum verification failed for downloaded object"
  1337          fi
  1338          # match etag and SSE headers
  1339          if [ "$etag1" != "$etag2" ]; then
  1340              rv=1
  1341              out="Etag mismatch for object encrypted with server side encryption"
  1342          fi
  1343          if [ "$sse_customer_algo1" != "$sse_customer_algo2" ]; then
  1344              rv=1
  1345              out="sse customer algorithm mismatch"
  1346          fi
  1347          if [ "$sse_customer_key1" != "$sse_customer_key2" ]; then
  1348              rv=1
  1349              out="sse customer key mismatch"
  1350          fi
  1351      fi
  1352  
  1353      if [ $rv -eq 0 ]; then
  1354          log_success "$(get_duration "$start_time")" "${test_function}"
  1355      else
  1356          # clean up and log error
  1357          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
  1358          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
  1359      fi
  1360  
  1361      return $rv
  1362  }
  1363  
  1364  # tests encrypted copy from multipart encrypted object to
  1365  # single part encrypted object. This test in particular checks if copy
  1366  # succeeds for the case where encryption overhead for individually
  1367  # encrypted parts vs encryption overhead for the original datastream
  1368  # differs.
  1369  function test_serverside_encryption_multipart_copy() {
  1370      #skip server side encryption tests if HTTPS disabled.
  1371      if [ "$ENABLE_HTTPS" != "1" ]; then
  1372          return 0
  1373      fi
  1374      # log start time
  1375      start_time=$(get_time)
  1376  
  1377      function="make_bucket"
  1378      bucket_name=$(make_bucket)
  1379      object_name=${bucket_name}"-object"
  1380      rv=$?
  1381  
  1382      if [ $rv -eq 0 ]; then
  1383          # create multipart
  1384          function="${AWS} s3api create-multipart-upload --bucket ${bucket_name} --key ${object_name} --sse-customer-algorithm AES256 --sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg=="
  1385          out=$($function)
  1386          rv=$?
  1387          upload_id=$(echo "$out" | jq -r .UploadId)
  1388      fi
  1389  
  1390      if [ $rv -eq 0 ]; then
  1391          # Capture etag for part-number 1
  1392          function="${AWS} s3api upload-part --bucket ${bucket_name} --key ${object_name} --body ${MINT_DATA_DIR}/datafile-5243880-b --upload-id ${upload_id} --part-number 1 --sse-customer-algorithm AES256 --sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg=="
  1393          out=$($function)
  1394          rv=$?
  1395          etag1=$(echo "$out" | jq -r .ETag)
  1396      fi
  1397  
  1398      if [ $rv -eq 0 ]; then
  1399          # Capture etag for part-number 2
  1400          function="${AWS} s3api upload-part --bucket ${bucket_name} --key ${object_name} --body ${MINT_DATA_DIR}/datafile-5243880-b --upload-id ${upload_id} --part-number 2 --sse-customer-algorithm AES256 --sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg=="
  1401          out=$($function)
  1402          rv=$?
  1403          etag2=$(echo "$out" | jq -r .ETag)
  1404          # Create a multipart struct file for completing multipart transaction
  1405          echo "{
  1406              \"Parts\": [
  1407                  {
  1408                      \"ETag\": ${etag1},
  1409                      \"PartNumber\": 1
  1410                  },
  1411                  {
  1412                      \"ETag\": ${etag2},
  1413                      \"PartNumber\": 2
  1414                  }
  1415              ]
  1416          }" >> /tmp/multipart
  1417      fi
  1418  
  1419      if [ $rv -eq 0 ]; then
  1420          # Use saved etags to complete the multipart transaction
  1421          function="${AWS} s3api complete-multipart-upload --multipart-upload file:///tmp/multipart --bucket ${bucket_name} --key ${object_name} --upload-id ${upload_id}"
  1422          out=$($function)
  1423          rv=$?
  1424          finalETag=$(echo "$out" | jq -r .ETag | sed -e 's/^"//' -e 's/"$//')
  1425          if [ "${finalETag}" == "" ]; then
  1426              rv=1
  1427              out="complete-multipart-upload failed"
  1428          fi
  1429      fi
  1430  
  1431       # copy object server side
  1432      if [ $rv -eq 0 ]; then
  1433          function="${AWS} s3api copy-object --bucket ${bucket_name} --key ${object_name}-copy --copy-source ${bucket_name}/${object_name} --copy-source-sse-customer-algorithm AES256 --copy-source-sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --copy-source-sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg== --sse-customer-algorithm AES256 --sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg=="
  1434          test_function=${function}
  1435          out=$($function)
  1436          rv=$?
  1437          if [ $rv -ne 255 ]; then
  1438              rv=1
  1439          else
  1440              rv=0
  1441          fi
  1442      fi
  1443  
  1444      if [ $rv -eq 0 ]; then
  1445          log_success "$(get_duration "$start_time")" "${test_function}"
  1446      else
  1447          # clean up and log error
  1448          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
  1449          rm -f /tmp/multipart
  1450          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
  1451      fi
  1452  
  1453      return $rv
  1454  }
  1455  # tests server side encryption headers for range get calls
  1456  function test_serverside_encryption_get_range() {
  1457      #skip server side encryption tests if HTTPS disabled.
  1458      if [ "$ENABLE_HTTPS" != "1" ]; then
  1459          return 0
  1460      fi
  1461      # log start time
  1462      start_time=$(get_time)
  1463  
  1464      function="make_bucket"
  1465      bucket_name=$(make_bucket)
  1466      rv=$?
  1467      # put object with server side encryption headers
  1468      if [ $rv -eq 0 ]; then
  1469          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-10-kB --bucket ${bucket_name} --key datafile-10-kB --sse-customer-algorithm AES256 --sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg=="
  1470          test_function=${function}
  1471          out=$($function 2>&1)
  1472          rv=$?
  1473      fi
  1474      # now get encrypted object from server for range 500-999
  1475      if [ $rv -eq 0 ]; then
  1476          etag1=$(echo "$out" | jq -r .ETag)
  1477          sse_customer_key1=$(echo "$out" | jq -r .SSECustomerKeyMD5)
  1478          sse_customer_algo1=$(echo "$out" | jq -r .SSECustomerAlgorithm)
  1479          function="${AWS} s3api get-object --bucket ${bucket_name} --key datafile-10-kB --range bytes=500-999 --sse-customer-algorithm AES256 --sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg== /tmp/datafile-10-kB"
  1480          test_function=${function}
  1481          out=$($function 2>&1)
  1482          rv=$?
  1483      fi
  1484      if [ $rv -eq 0 ]; then
  1485          cnt=$(stat -c%s /tmp/datafile-10-kB)
  1486          if [ "$cnt" -ne 500 ]; then
  1487              rv=1
  1488          fi
  1489      fi
  1490      if [ $rv -eq 0 ]; then
  1491          log_success "$(get_duration "$start_time")" "${test_function}"
  1492      else
  1493          # clean up and log error
  1494          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
  1495          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
  1496      fi
  1497      return $rv
  1498  }
  1499  
  1500  # tests server side encryption error for get and put calls
  1501  function test_serverside_encryption_error() {
  1502      #skip server side encryption tests if HTTPS disabled.
  1503      if [ "$ENABLE_HTTPS" != "1" ]; then
  1504          return 0
  1505      fi
  1506      # log start time
  1507      start_time=$(get_time)
  1508  
  1509      function="make_bucket"
  1510      bucket_name=$(make_bucket)
  1511      rv=$?
  1512  
  1513      # put object with server side encryption headers  with MD5Sum mismatch for sse-customer-key-md5 header
  1514      if [ $rv -eq 0 ]; then
  1515          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB --sse-customer-algorithm AES256 --sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg"
  1516          test_function=${function}
  1517          out=$($function 2>&1)
  1518          rv=$?
  1519      fi
  1520  
  1521      if [ $rv -ne 255 ]; then
  1522          rv=1
  1523      else
  1524          rv=0
  1525      fi
  1526      # put object with missing server side encryption header sse-customer-algorithm
  1527      if [ $rv -eq 0 ]; then
  1528          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB  --sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg=="
  1529          test_function=${function}
  1530          out=$($function 2>&1)
  1531          rv=$?
  1532      fi
  1533  
  1534      if [ $rv -ne 255 ]; then
  1535          rv=1
  1536      else
  1537          rv=0
  1538      fi
  1539  
  1540      # put object with server side encryption headers successfully
  1541      if [ $rv -eq 0 ]; then
  1542          function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB --sse-customer-algorithm AES256 --sse-customer-key MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ= --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg=="
  1543          test_function=${function}
  1544          out=$($function 2>&1)
  1545          rv=$?
  1546      fi
  1547  
  1548      # now test get on encrypted object with nonmatching sse-customer-key and sse-customer-md5 headers
  1549      if [ $rv -eq 0 ]; then
  1550          function="${AWS} s3api get-object --bucket ${bucket_name} --key datafile-1-kB --sse-customer-algorithm AES256 --sse-customer-key MzJieXRlc --sse-customer-key-md5 7PpPLAK26ONlVUGOWlusfg== /tmp/datafile-1-kB"
  1551          test_function=${function}
  1552          out=$($function 2>&1)
  1553          rv=$?
  1554      fi
  1555      if [ $rv -ne 255 ]; then
  1556          rv=1
  1557      else
  1558          rv=0
  1559      fi
  1560      # delete bucket
  1561      if [ $rv -eq 0 ]; then
  1562          function="delete_bucket"
  1563          out=$(delete_bucket "$bucket_name")
  1564          rv=$?
  1565      fi
  1566      if [ $rv -eq 0 ]; then
  1567          log_success "$(get_duration "$start_time")" "${test_function}"
  1568      else
  1569          # clean up and log error
  1570          ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
  1571          log_failure "$(get_duration "$start_time")" "${function}" "${out}"
  1572      fi
  1573  
  1574      return $rv
  1575  }
  1576  
  1577  # # WORM bucket tests.
  1578  # function test_worm_bucket() {
  1579  #     # log start time
  1580  #     start_time=$(get_time)
  1581  
  1582  #     # Make bucket
  1583  #     bucket_name="awscli-mint-test-bucket-$RANDOM"
  1584  #     function="${AWS} s3api create-bucket --bucket ${bucket_name} --object-lock-enabled-for-bucket"
  1585  
  1586  #     # execute the test
  1587  #     out=$($function 2>&1)
  1588  #     rv=$?
  1589  
  1590  #     if [ $rv -ne 0 ]; then
  1591  #         # if this functionality is not implemented return right away.
  1592  #         if echo "$out" | grep -q "NotImplemented"; then
  1593  #             ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
  1594  #             return 0
  1595  #         fi
  1596  #     fi
  1597  
  1598  #     # if make bucket succeeds set object lock configuration
  1599  #     if [ $rv -eq 0 ]; then
  1600  #         function="${AWS} s3api put-object-lock-configuration --bucket ${bucket_name} --object-lock-configuration ObjectLockEnabled=Enabled"
  1601  #         out=$($function 2>&1)
  1602  #         rv=$?
  1603  # 	if [ $rv -ne 0 ]; then
  1604  #             # if this functionality is not implemented return right away.
  1605  #             if echo "$out" | grep -q "NotImplemented"; then
  1606  # 		${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
  1607  # 		return 0
  1608  #             fi
  1609  # 	fi
  1610  #     else
  1611  #         # if make bucket fails, $bucket_name has the error output
  1612  #         out="${bucket_name}"
  1613  #     fi
  1614  
  1615  #     # if setting object lock configuration succeeds, upload a file first time
  1616  #     if [ $rv -eq 0 ]; then
  1617  #         function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB"
  1618  #         out=$($function 2>&1)
  1619  #         rv=$?
  1620  #     else
  1621  #         # if make bucket fails, $bucket_name has the error output
  1622  #         out="${bucket_name}"
  1623  #     fi
  1624  
  1625  #     # second time upload will succeed and there shall be now two versions of the object
  1626  #     if [ $rv -eq 0 ]; then
  1627  #         function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB"
  1628  #         out=$($function 2>&1)
  1629  #         rv=$?
  1630  #     else
  1631  #         out="First time object upload failed"
  1632  #     fi
  1633  
  1634  #     if [ $rv -eq 0 ]; then
  1635  #         log_success "$(get_duration "$start_time")" "${test_function}"
  1636  #     else
  1637  #         # cleanup is not possible due to one day validity of object lock configurataion
  1638  #         log_failure "$(get_duration "$start_time")" "${function}" "${out}"
  1639  #     fi
  1640  
  1641  #     return $rv
  1642  # }
  1643  
  1644  # # Tests creating and deleting an object with legal hold.
  1645  # function test_legal_hold() {
  1646  #     # log start time
  1647  #     start_time=$(get_time)
  1648  
  1649  #     # Make bucket
  1650  #     bucket_name="awscli-mint-test-bucket-$RANDOM"
  1651  #     function="${AWS} s3api create-bucket --bucket ${bucket_name} --object-lock-enabled-for-bucket"
  1652  
  1653  #     # execute the test
  1654  #     out=$($function 2>&1)
  1655  #     rv=$?
  1656  
  1657  #     if [ $rv -ne 0 ]; then
  1658  #         # if this functionality is not implemented return right away.
  1659  #         if echo "$out" | grep -q "NotImplemented"; then
  1660  #             ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
  1661  #             return 0
  1662  #         fi
  1663  #     fi
  1664  
  1665  #     # if make bucket succeeds upload a file
  1666  #     if [ $rv -eq 0 ]; then
  1667  #         function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB --object-lock-legal-hold-status ON"
  1668  #         out=$($function 2>&1)
  1669  #         errcnt=$(echo "$out" | sed -n '/Bucket is missing ObjectLockConfiguration/p' | wc -l)
  1670  #         # skip test for gateways
  1671  #         if [ "$errcnt" -eq 1 ]; then
  1672  #             return 0
  1673  #         fi
  1674  #         rv=$?
  1675  #     else
  1676  #         # if make bucket fails, $bucket_name has the error output
  1677  #         out="${bucket_name}"
  1678  #     fi
  1679  
  1680  #     # if upload succeeds stat the file
  1681  #     if [ $rv -eq 0 ]; then
  1682  #         function="${AWS} s3api head-object --bucket ${bucket_name} --key datafile-1-kB"
  1683  #         # save the ref to function being tested, so it can be logged
  1684  #         test_function=${function}
  1685  #         out=$($function 2>&1)
  1686  #         lhold=$(echo "$out" | jq -r .ObjectLockLegalHoldStatus)
  1687  #         rv=$?
  1688  #     fi
  1689  
  1690  #     # if head-object succeeds, verify metadata has legal hold status
  1691  #     if [ $rv -eq 0 ]; then
  1692  #        if [ "${lhold}" == "" ]; then
  1693  #             rv=1
  1694  #             out="Legal hold was not applied"
  1695  #         fi
  1696  #         if [ "${lhold}" == "OFF" ]; then
  1697  #             rv=1
  1698  #             out="Legal hold was not applied"
  1699  #         fi
  1700  #     fi
  1701  #     if [ $rv -eq 0 ]; then
  1702  #         function="${AWS} s3api put-object-legal-hold --bucket ${bucket_name} --key datafile-1-kB --legal-hold Status=OFF"
  1703  #         out=$($function 2>&1)
  1704  #         rv=$?
  1705  #     else
  1706  #         # if make bucket fails, $bucket_name has the error output
  1707  #         out="${bucket_name}"
  1708  #     fi
  1709  #     # if upload succeeds download the file
  1710  #     if [ $rv -eq 0 ]; then
  1711  #         function="${AWS} s3api get-object-legal-hold --bucket ${bucket_name} --key datafile-1-kB"
  1712  #         # save the ref to function being tested, so it can be logged
  1713  #         test_function=${function}
  1714  #         out=$($function 2>&1)
  1715  #         lhold=$(echo "$out" | jq -r .LegalHold.Status)
  1716  #         rv=$?
  1717  #     fi
  1718  
  1719  #     # if head-object succeeds, verify metadata has legal hold status
  1720  #     if [ $rv -eq 0 ]; then
  1721  #        if [ "${lhold}" == "" ]; then
  1722  #             rv=1
  1723  #             out="Legal hold was not applied"
  1724  #         fi
  1725  #         if [ "${lhold}" == "ON" ]; then
  1726  #             rv=1
  1727  #             out="Legal hold status not turned off"
  1728  #         fi
  1729  #     fi
  1730  #      # Attempt a delete on prefix shouldn't delete the directory since we have an object inside it.
  1731  #     if [ $rv -eq 0 ]; then
  1732  #         function="${AWS} s3api delete-object --bucket ${bucket_name} --key datafile-1-kB"
  1733  #         # save the ref to function being tested, so it can be logged
  1734  #         test_function=${function}
  1735  #         out=$($function 2>&1)
  1736  #         rv=$?
  1737  #     fi
  1738  #     if [ $rv -eq 0 ]; then
  1739  #         log_success "$(get_duration "$start_time")" "${test_function}"
  1740  #     else
  1741  #         # clean up and log error
  1742  #         ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
  1743  #         log_failure "$(get_duration "$start_time")" "${function}" "${out}"
  1744  #     fi
  1745  
  1746  #     return $rv
  1747  # }
  1748  
  1749  # main handler for all the tests.
  1750  main() {
  1751      # Success tests
  1752      test_create_bucket && \
  1753      test_upload_object && \
  1754      test_lookup_object_prefix && \
  1755      test_list_objects && \
  1756      test_multipart_upload_0byte && \
  1757      test_multipart_upload && \
  1758      test_max_key_list && \
  1759      test_copy_object && \
  1760      test_copy_object_storage_class && \
  1761      test_copy_object_storage_class_same && \
  1762      test_presigned_object && \
  1763      test_upload_object_10 && \
  1764      test_multipart_upload_10 && \
  1765      test_bucket_lifecycle && \
  1766      test_serverside_encryption && \
  1767      test_serverside_encryption_get_range && \
  1768      test_serverside_encryption_multipart && \
  1769      test_serverside_encryption_multipart_copy && \
  1770      # Success cli ops.
  1771      test_aws_s3_cp && \
  1772      test_aws_s3_sync && \
  1773      # Error tests
  1774      test_list_objects_error && \
  1775      test_put_object_error && \
  1776      test_serverside_encryption_error
  1777      # test_worm_bucket && \
  1778      # test_legal_hold
  1779  
  1780      return $?
  1781  }
  1782  
  1783  _init "$@" && main