github.com/hashgraph/hedera-sdk-go/v2@v2.48.0/executable.go (about)

     1  package hedera
     2  
     3  /*-
     4   *
     5   * Hedera Go SDK
     6   *
     7   * Copyright (C) 2020 - 2024 Hedera Hashgraph, LLC
     8   *
     9   * Licensed under the Apache License, Version 2.0 (the "License");
    10   * you may not use this file except in compliance with the License.
    11   * You may obtain a copy of the License at
    12   *
    13   *      http://www.apache.org/licenses/LICENSE-2.0
    14   *
    15   * Unless required by applicable law or agreed to in writing, software
    16   * distributed under the License is distributed on an "AS IS" BASIS,
    17   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    18   * See the License for the specific language governing permissions and
    19   * limitations under the License.
    20   *
    21   */
    22  
    23  import (
    24  	"context"
    25  	"encoding/hex"
    26  	"strconv"
    27  	"time"
    28  
    29  	"github.com/cenkalti/backoff/v4"
    30  
    31  	protobuf "google.golang.org/protobuf/proto"
    32  
    33  	"github.com/pkg/errors"
    34  
    35  	"github.com/hashgraph/hedera-protobufs-go/services"
    36  	"google.golang.org/grpc"
    37  	"google.golang.org/grpc/codes"
    38  	"google.golang.org/grpc/status"
    39  )
    40  
    41  const maxAttempts = 10
    42  
    43  type _ExecutionState uint32
    44  
    45  const (
    46  	executionStateRetry    _ExecutionState = 0
    47  	executionStateFinished _ExecutionState = 1
    48  	executionStateError    _ExecutionState = 2
    49  	executionStateExpired  _ExecutionState = 3
    50  )
    51  
    52  type Executable interface {
    53  	GetMaxBackoff() time.Duration
    54  	GetMinBackoff() time.Duration
    55  	GetGrpcDeadline() *time.Duration
    56  	GetMaxRetry() int
    57  	GetNodeAccountIDs() []AccountID
    58  	GetLogLevel() *LogLevel
    59  
    60  	shouldRetry(Executable, interface{}) _ExecutionState
    61  	makeRequest() interface{}
    62  	advanceRequest()
    63  	getNodeAccountID() AccountID
    64  	getMethod(*_Channel) _Method
    65  	mapStatusError(Executable, interface{}) error
    66  	mapResponse(interface{}, AccountID, interface{}) (interface{}, error)
    67  	getName() string
    68  	validateNetworkOnIDs(client *Client) error
    69  	isTransaction() bool
    70  	getLogger(Logger) Logger
    71  	getTransactionIDAndMessage() (string, string)
    72  	getLogID(Executable) string // This returns transaction creation timestamp + transaction name
    73  }
    74  
    75  type executable struct {
    76  	transactionIDs *_LockableSlice
    77  	nodeAccountIDs *_LockableSlice
    78  	maxBackoff     *time.Duration
    79  	minBackoff     *time.Duration
    80  	grpcDeadline   *time.Duration
    81  	maxRetry       int
    82  	logLevel       *LogLevel
    83  }
    84  
    85  type _Method struct {
    86  	query func(
    87  		context.Context,
    88  		*services.Query,
    89  		...grpc.CallOption,
    90  	) (*services.Response, error)
    91  	transaction func(
    92  		context.Context,
    93  		*services.Transaction,
    94  		...grpc.CallOption,
    95  	) (*services.TransactionResponse, error)
    96  }
    97  
    98  func (e *executable) GetMaxBackoff() time.Duration {
    99  	if e.maxBackoff != nil {
   100  		return *e.maxBackoff
   101  	}
   102  
   103  	return 8 * time.Second
   104  }
   105  
   106  func (e *executable) GetMinBackoff() time.Duration {
   107  	if e.minBackoff != nil {
   108  		return *e.minBackoff
   109  	}
   110  
   111  	return 250 * time.Millisecond
   112  }
   113  
   114  func (e *executable) SetMaxBackoff(max time.Duration) *executable {
   115  	if max.Nanoseconds() < 0 {
   116  		panic("maxBackoff must be a positive duration")
   117  	} else if max.Nanoseconds() < e.minBackoff.Nanoseconds() {
   118  		panic("maxBackoff must be greater than or equal to minBackoff")
   119  	}
   120  	e.maxBackoff = &max
   121  	return e
   122  }
   123  
   124  func (e *executable) SetMinBackoff(min time.Duration) *executable {
   125  	if min.Nanoseconds() < 0 {
   126  		panic("minBackoff must be a positive duration")
   127  	} else if e.maxBackoff.Nanoseconds() < min.Nanoseconds() {
   128  		panic("minBackoff must be less than or equal to maxBackoff")
   129  	}
   130  	e.minBackoff = &min
   131  	return e
   132  }
   133  
   134  // GetGrpcDeadline returns the grpc deadline
   135  func (e *executable) GetGrpcDeadline() *time.Duration {
   136  	return e.grpcDeadline
   137  }
   138  
   139  // When execution is attempted, a single attempt will timeout when this deadline is reached. (The SDK may subsequently retry the execution.)
   140  func (e *executable) SetGrpcDeadline(deadline *time.Duration) *executable {
   141  	e.grpcDeadline = deadline
   142  	return e
   143  }
   144  
   145  // GetMaxRetry returns the max number of errors before execution will fail.
   146  func (e *executable) GetMaxRetry() int {
   147  	return e.maxRetry
   148  }
   149  
   150  func (e *executable) SetMaxRetry(max int) *executable {
   151  	e.maxRetry = max
   152  	return e
   153  }
   154  
   155  // GetNodeAccountID returns the node AccountID for this transaction.
   156  func (e *executable) GetNodeAccountIDs() []AccountID {
   157  	nodeAccountIDs := []AccountID{}
   158  
   159  	for _, value := range e.nodeAccountIDs.slice {
   160  		nodeAccountIDs = append(nodeAccountIDs, value.(AccountID))
   161  	}
   162  
   163  	return nodeAccountIDs
   164  }
   165  
   166  func (e *executable) SetNodeAccountIDs(nodeAccountIDs []AccountID) *executable {
   167  	for _, nodeAccountID := range nodeAccountIDs {
   168  		e.nodeAccountIDs._Push(nodeAccountID)
   169  	}
   170  	e.nodeAccountIDs._SetLocked(true)
   171  	return e
   172  }
   173  
   174  func (e *executable) GetLogLevel() *LogLevel {
   175  	return e.logLevel
   176  }
   177  
   178  func (e *executable) SetLogLevel(level LogLevel) *executable {
   179  	e.logLevel = &level
   180  	return e
   181  }
   182  
   183  func (e *executable) getLogger(clientLogger Logger) Logger {
   184  	if e.logLevel != nil {
   185  		return clientLogger.SubLoggerWithLevel(*e.logLevel)
   186  	}
   187  	return clientLogger
   188  }
   189  
   190  func (e *executable) getNodeAccountID() AccountID {
   191  	return e.nodeAccountIDs._GetCurrent().(AccountID)
   192  }
   193  
   194  func _Execute(client *Client, e Executable) (interface{}, error) {
   195  	var maxAttempts int
   196  	backOff := backoff.NewExponentialBackOff()
   197  	backOff.InitialInterval = e.GetMinBackoff()
   198  	backOff.MaxInterval = e.GetMaxBackoff()
   199  	backOff.Multiplier = 2
   200  
   201  	if client.maxAttempts != nil {
   202  		maxAttempts = *client.maxAttempts
   203  	} else {
   204  		maxAttempts = e.GetMaxRetry()
   205  	}
   206  
   207  	currentBackoff := e.GetMinBackoff()
   208  
   209  	var attempt int64
   210  	var errPersistent error
   211  	var marshaledRequest []byte
   212  
   213  	txLogger := e.getLogger(client.logger)
   214  	txID, msg := e.getTransactionIDAndMessage()
   215  
   216  	for attempt = int64(0); attempt < int64(maxAttempts); attempt, currentBackoff = attempt+1, currentBackoff*2 {
   217  		var protoRequest interface{}
   218  		var node *_Node
   219  		var ok bool
   220  
   221  		if e.isTransaction() {
   222  			if attempt > 0 && len(e.GetNodeAccountIDs()) > 1 {
   223  				e.advanceRequest()
   224  			}
   225  		}
   226  
   227  		protoRequest = e.makeRequest()
   228  		if len(e.GetNodeAccountIDs()) == 0 {
   229  			node = client.network._GetNode()
   230  		} else {
   231  			nodeAccountID := e.getNodeAccountID()
   232  			if node, ok = client.network._GetNodeForAccountID(nodeAccountID); !ok {
   233  				return TransactionResponse{}, ErrInvalidNodeAccountIDSet{nodeAccountID}
   234  			}
   235  		}
   236  
   237  		if e.isTransaction() {
   238  			marshaledRequest, _ = protobuf.Marshal(protoRequest.(*services.Transaction))
   239  		} else {
   240  			marshaledRequest, _ = protobuf.Marshal(protoRequest.(*services.Query))
   241  		}
   242  
   243  		node._InUse()
   244  
   245  		txLogger.Trace("executing", "requestId", e.getLogID(e), "nodeAccountID", node.accountID.String(), "nodeIPAddress", node.address._String(), "Request Proto", hex.EncodeToString(marshaledRequest))
   246  
   247  		if !node._IsHealthy() {
   248  			txLogger.Trace("node is unhealthy, waiting before continuing", "requestId", e.getLogID(e), "delay", node._Wait().String())
   249  			_DelayForAttempt(e.getLogID(e), currentBackoff, attempt, txLogger, errNodeIsUnhealthy)
   250  			continue
   251  		}
   252  
   253  		txLogger.Trace("updating node account ID index", "requestId", e.getLogID(e))
   254  		channel, err := node._GetChannel(txLogger)
   255  		if err != nil {
   256  			client.network._IncreaseBackoff(node)
   257  			errPersistent = err
   258  			continue
   259  		}
   260  
   261  		e.advanceRequest()
   262  
   263  		method := e.getMethod(channel)
   264  
   265  		var resp interface{}
   266  
   267  		ctx := context.TODO()
   268  		var cancel context.CancelFunc
   269  
   270  		if e.GetGrpcDeadline() != nil {
   271  			grpcDeadline := time.Now().Add(*e.GetGrpcDeadline())
   272  			ctx, cancel = context.WithDeadline(ctx, grpcDeadline)
   273  		}
   274  
   275  		txLogger.Trace("executing gRPC call", "requestId", e.getLogID(e))
   276  
   277  		var marshaledResponse []byte
   278  		if method.query != nil {
   279  			resp, err = method.query(ctx, protoRequest.(*services.Query))
   280  			if err == nil {
   281  				marshaledResponse, _ = protobuf.Marshal(resp.(*services.Response))
   282  			}
   283  		} else {
   284  			resp, err = method.transaction(ctx, protoRequest.(*services.Transaction))
   285  			if err == nil {
   286  				marshaledResponse, _ = protobuf.Marshal(resp.(*services.TransactionResponse))
   287  			}
   288  		}
   289  
   290  		if cancel != nil {
   291  			cancel()
   292  		}
   293  		if err != nil {
   294  			errPersistent = err
   295  			if _ExecutableDefaultRetryHandler(e.getLogID(e), err, txLogger) {
   296  				client.network._IncreaseBackoff(node)
   297  				continue
   298  			}
   299  			if errPersistent == nil {
   300  				errPersistent = errors.New("error")
   301  			}
   302  
   303  			if e.isTransaction() {
   304  				return TransactionResponse{}, errors.Wrapf(errPersistent, "retry %d/%d", attempt, maxAttempts)
   305  			}
   306  
   307  			return &services.Response{}, errors.Wrapf(errPersistent, "retry %d/%d", attempt, maxAttempts)
   308  		}
   309  
   310  		node._DecreaseBackoff()
   311  
   312  		statusError := e.mapStatusError(e, resp)
   313  
   314  		txLogger.Trace(
   315  			msg,
   316  			"requestID", e.getLogID(e),
   317  			"nodeID", node.accountID.String(),
   318  			"nodeAddress", node.address._String(),
   319  			"nodeIsHealthy", strconv.FormatBool(node._IsHealthy()),
   320  			"network", client.GetLedgerID().String(),
   321  			"status", statusError.Error(),
   322  			"txID", txID,
   323  		)
   324  
   325  		switch e.shouldRetry(e, resp) {
   326  		case executionStateRetry:
   327  			errPersistent = statusError
   328  			_DelayForAttempt(e.getLogID(e), currentBackoff, attempt, txLogger, errPersistent)
   329  			continue
   330  		case executionStateExpired:
   331  			if e.isTransaction() {
   332  				transaction := e.(TransactionInterface)
   333  				if transaction.regenerateID(client) {
   334  					txLogger.Trace("received `TRANSACTION_EXPIRED` with transaction ID regeneration enabled; regenerating", "requestId", e.getLogID(e))
   335  					continue
   336  				} else {
   337  					return TransactionResponse{}, statusError
   338  				}
   339  			} else {
   340  				return &services.Response{}, statusError
   341  			}
   342  		case executionStateError:
   343  			if e.isTransaction() {
   344  				return TransactionResponse{}, statusError
   345  			}
   346  
   347  			return &services.Response{}, statusError
   348  		case executionStateFinished:
   349  			txLogger.Trace("finished", "Response Proto", hex.EncodeToString(marshaledResponse))
   350  			return e.mapResponse(resp, node.accountID, protoRequest)
   351  		}
   352  	}
   353  
   354  	if errPersistent == nil {
   355  		errPersistent = errors.New("unknown error occurred after max attempts")
   356  	}
   357  
   358  	if e.isTransaction() {
   359  		return TransactionResponse{}, errors.Wrapf(errPersistent, "retry %d/%d", attempt, maxAttempts)
   360  	}
   361  
   362  	txLogger.Error("exceeded maximum attempts for request", "last exception being", errPersistent)
   363  
   364  	return &services.Response{}, errPersistent
   365  }
   366  
   367  func _DelayForAttempt(logID string, backoff time.Duration, attempt int64, logger Logger, err error) {
   368  	logger.Trace("retrying request attempt", "requestId", logID, "delay", backoff, "attempt", attempt+1, "error", err)
   369  
   370  	time.Sleep(backoff)
   371  }
   372  
   373  func _ExecutableDefaultRetryHandler(logID string, err error, logger Logger) bool {
   374  	code := status.Code(err)
   375  	logger.Trace("received gRPC error with status code", "requestId", logID, "status", code.String())
   376  	switch code {
   377  	case codes.ResourceExhausted, codes.Unavailable:
   378  		return true
   379  	case codes.Internal:
   380  		grpcErr, ok := status.FromError(err)
   381  
   382  		if !ok {
   383  			return false
   384  		}
   385  
   386  		return rstStream.Match([]byte(grpcErr.Message()))
   387  	default:
   388  		return false
   389  	}
   390  }