go.uber.org/cadence@v1.2.9/internal/internal_workflow.go (about)

     1  // Copyright (c) 2017-2020 Uber Technologies Inc.
     2  // Portions of the Software are attributed to Copyright (c) 2020 Temporal Technologies Inc.
     3  //
     4  // Permission is hereby granted, free of charge, to any person obtaining a copy
     5  // of this software and associated documentation files (the "Software"), to deal
     6  // in the Software without restriction, including without limitation the rights
     7  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     8  // copies of the Software, and to permit persons to whom the Software is
     9  // furnished to do so, subject to the following conditions:
    10  //
    11  // The above copyright notice and this permission notice shall be included in
    12  // all copies or substantial portions of the Software.
    13  //
    14  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20  // THE SOFTWARE.
    21  
    22  package internal
    23  
    24  // All code in this file is private to the package.
    25  
    26  import (
    27  	"errors"
    28  	"fmt"
    29  	"reflect"
    30  	"runtime"
    31  	"sort"
    32  	"strings"
    33  	"sync"
    34  	"time"
    35  	"unicode"
    36  
    37  	"github.com/robfig/cron"
    38  	"go.uber.org/atomic"
    39  	"go.uber.org/zap"
    40  
    41  	"go.uber.org/cadence/.gen/go/shared"
    42  	s "go.uber.org/cadence/.gen/go/shared"
    43  	"go.uber.org/cadence/internal/common"
    44  	"go.uber.org/cadence/internal/common/metrics"
    45  	"go.uber.org/cadence/internal/common/util"
    46  )
    47  
    48  const (
    49  	defaultSignalChannelSize = 100000 // really large buffering size(100K)
    50  
    51  	panicIllegalAccessCoroutinueState = "getState: illegal access from outside of workflow context"
    52  )
    53  
    54  type (
    55  	syncWorkflowDefinition struct {
    56  		workflow   workflow
    57  		dispatcher dispatcher
    58  		cancel     CancelFunc
    59  		rootCtx    Context
    60  	}
    61  
    62  	workflowResult struct {
    63  		workflowResult []byte
    64  		error          error
    65  	}
    66  
    67  	futureImpl struct {
    68  		value   interface{}
    69  		err     error
    70  		ready   bool
    71  		channel *channelImpl
    72  		chained []asyncFuture // Futures that are chained to this one
    73  	}
    74  
    75  	// Implements WaitGroup interface
    76  	waitGroupImpl struct {
    77  		n        int      // the number of coroutines to wait on
    78  		waiting  bool     // indicates whether WaitGroup.Wait() has been called yet for the WaitGroup
    79  		future   Future   // future to signal that all awaited members of the WaitGroup have completed
    80  		settable Settable // used to unblock the future when all coroutines have completed
    81  	}
    82  
    83  	// Dispatcher is a container of a set of coroutines.
    84  	dispatcher interface {
    85  		// ExecuteUntilAllBlocked executes coroutines one by one in deterministic order
    86  		// until all of them are completed or blocked on Channel or Selector
    87  		ExecuteUntilAllBlocked() (err error)
    88  		// IsDone returns true when all of coroutines are completed
    89  		IsDone() bool
    90  		Close()             // Destroys all coroutines without waiting for their completion
    91  		StackTrace() string // Stack trace of all coroutines owned by the Dispatcher instance
    92  	}
    93  
    94  	// Workflow is an interface that any workflow should implement.
    95  	// Code of a workflow must be deterministic. It must use workflow.Channel, workflow.Selector, and workflow.Go instead of
    96  	// native channels, select and go. It also must not use range operation over map as it is randomized by go runtime.
    97  	// All time manipulation should use current time returned by GetTime(ctx) method.
    98  	// Note that workflow.Context is used instead of context.Context to avoid use of raw channels.
    99  	workflow interface {
   100  		Execute(ctx Context, input []byte) (result []byte, err error)
   101  	}
   102  
   103  	sendCallback struct {
   104  		value interface{}
   105  		fn    func() bool // false indicates that callback didn't accept the value
   106  	}
   107  
   108  	receiveCallback struct {
   109  		// false result means that callback didn't accept the value and it is still up for delivery
   110  		fn func(v interface{}, more bool) bool
   111  	}
   112  
   113  	channelImpl struct {
   114  		name            string             // human readable channel name
   115  		size            int                // Channel buffer size. 0 for non buffered.
   116  		buffer          []interface{}      // buffered messages
   117  		blockedSends    []*sendCallback    // puts waiting when buffer is full.
   118  		blockedReceives []*receiveCallback // receives waiting when no messages are available.
   119  		closed          bool               // true if channel is closed.
   120  		recValue        *interface{}       // Used only while receiving value, this is used as pre-fetch buffer value from the channel.
   121  		dataConverter   DataConverter      // for decode data
   122  		env             workflowEnvironment
   123  	}
   124  
   125  	// Single case statement of the Select
   126  	selectCase struct {
   127  		channel     *channelImpl                // Channel of this case.
   128  		receiveFunc *func(c Channel, more bool) // function to call when channel has a message. nil for send case.
   129  
   130  		sendFunc   *func()         // function to call when channel accepted a message. nil for receive case.
   131  		sendValue  *interface{}    // value to send to the channel. Used only for send case.
   132  		future     asyncFuture     // Used for future case
   133  		futureFunc *func(f Future) // function to call when Future is ready
   134  	}
   135  
   136  	// Implements Selector interface
   137  	selectorImpl struct {
   138  		name        string
   139  		cases       []*selectCase // cases that this select is comprised from
   140  		defaultFunc *func()       // default case
   141  	}
   142  
   143  	// unblockFunc is passed evaluated by a coroutine yield. When it returns false the yield returns to a caller.
   144  	// stackDepth is the depth of stack from the last blocking call relevant to user.
   145  	// Used to truncate internal stack frames from thread stack.
   146  	unblockFunc func(status string, stackDepth int) (keepBlocked bool)
   147  
   148  	coroutineState struct {
   149  		name         string
   150  		dispatcher   *dispatcherImpl  // dispatcher this context belongs to
   151  		aboutToBlock chan bool        // used to notify dispatcher that coroutine that owns this context is about to block
   152  		unblock      chan unblockFunc // used to notify coroutine that it should continue executing.
   153  		keptBlocked  bool             // true indicates that coroutine didn't make any progress since the last yield unblocking
   154  		closed       bool             // indicates that owning coroutine has finished execution
   155  		blocked      atomic.Bool
   156  		panicError   *workflowPanicError // non nil if coroutine had unhandled panic
   157  	}
   158  
   159  	dispatcherImpl struct {
   160  		sequence         int
   161  		channelSequence  int // used to name channels
   162  		selectorSequence int // used to name channels
   163  		coroutines       []*coroutineState
   164  		executing        bool       // currently running ExecuteUntilAllBlocked. Used to avoid recursive calls to it.
   165  		mutex            sync.Mutex // used to synchronize executing
   166  		closed           bool
   167  	}
   168  
   169  	// The current timeout resolution implementation is in seconds and uses math.Ceil() as the duration. But is
   170  	// subjected to change in the future.
   171  	workflowOptions struct {
   172  		taskListName                        *string
   173  		executionStartToCloseTimeoutSeconds *int32
   174  		taskStartToCloseTimeoutSeconds      *int32
   175  		domain                              *string
   176  		workflowID                          string
   177  		waitForCancellation                 bool
   178  		signalChannels                      map[string]Channel
   179  		queryHandlers                       map[string]func([]byte) ([]byte, error)
   180  		workflowIDReusePolicy               WorkflowIDReusePolicy
   181  		dataConverter                       DataConverter
   182  		retryPolicy                         *shared.RetryPolicy
   183  		cronSchedule                        string
   184  		contextPropagators                  []ContextPropagator
   185  		memo                                map[string]interface{}
   186  		searchAttributes                    map[string]interface{}
   187  		parentClosePolicy                   ParentClosePolicy
   188  		bugports                            Bugports
   189  	}
   190  
   191  	executeWorkflowParams struct {
   192  		workflowOptions
   193  		workflowType         *WorkflowType
   194  		input                []byte
   195  		header               *shared.Header
   196  		attempt              int32     // used by test framework to support child workflow retry
   197  		scheduledTime        time.Time // used by test framework to support child workflow retry
   198  		lastCompletionResult []byte    // used by test framework to support cron
   199  	}
   200  
   201  	// decodeFutureImpl
   202  	decodeFutureImpl struct {
   203  		*futureImpl
   204  		fn interface{}
   205  	}
   206  
   207  	childWorkflowFutureImpl struct {
   208  		*decodeFutureImpl             // for child workflow result
   209  		executionFuture   *futureImpl // for child workflow execution future
   210  	}
   211  
   212  	asyncFuture interface {
   213  		Future
   214  		// Used by selectorImpl
   215  		// If Future is ready returns its value immediately.
   216  		// If not registers callback which is called when it is ready.
   217  		GetAsync(callback *receiveCallback) (v interface{}, ok bool, err error)
   218  
   219  		// Used by selectorImpl
   220  		RemoveReceiveCallback(callback *receiveCallback)
   221  
   222  		// This future will added to list of dependency futures.
   223  		ChainFuture(f Future)
   224  
   225  		// Gets the current value and error.
   226  		// Make sure this is called once the future is ready.
   227  		GetValueAndError() (v interface{}, err error)
   228  
   229  		Set(value interface{}, err error)
   230  	}
   231  
   232  	queryHandler struct {
   233  		fn            interface{}
   234  		queryType     string
   235  		dataConverter DataConverter
   236  	}
   237  )
   238  
   239  const (
   240  	workflowEnvironmentContextKey    = "workflowEnv"
   241  	workflowInterceptorsContextKey   = "workflowInterceptor"
   242  	localActivityFnContextKey        = "localActivityFn"
   243  	workflowEnvInterceptorContextKey = "envInterceptor"
   244  	workflowResultContextKey         = "workflowResult"
   245  	coroutinesContextKey             = "coroutines"
   246  	workflowEnvOptionsContextKey     = "wfEnvOptions"
   247  )
   248  
   249  // Assert that structs do indeed implement the interfaces
   250  var _ Channel = (*channelImpl)(nil)
   251  var _ Selector = (*selectorImpl)(nil)
   252  var _ WaitGroup = (*waitGroupImpl)(nil)
   253  var _ dispatcher = (*dispatcherImpl)(nil)
   254  
   255  var stackBuf [100000]byte
   256  
   257  // Pointer to pointer to workflow result
   258  func getWorkflowResultPointerPointer(ctx Context) **workflowResult {
   259  	rpp := ctx.Value(workflowResultContextKey)
   260  	if rpp == nil {
   261  		panic("getWorkflowResultPointerPointer: Not a workflow context")
   262  	}
   263  	return rpp.(**workflowResult)
   264  }
   265  
   266  func getWorkflowEnvironment(ctx Context) workflowEnvironment {
   267  	wc := ctx.Value(workflowEnvironmentContextKey)
   268  	if wc == nil {
   269  		panic("getWorkflowContext: Not a workflow context")
   270  	}
   271  	return wc.(workflowEnvironment)
   272  }
   273  
   274  func getEnvInterceptor(ctx Context) *workflowEnvironmentInterceptor {
   275  	wc := ctx.Value(workflowEnvInterceptorContextKey)
   276  	if wc == nil {
   277  		panic("getWorkflowContext: Not a workflow context")
   278  	}
   279  	return wc.(*workflowEnvironmentInterceptor)
   280  }
   281  
   282  type workflowEnvironmentInterceptor struct {
   283  	env                  workflowEnvironment
   284  	interceptorChainHead WorkflowInterceptor
   285  	fn                   interface{}
   286  }
   287  
   288  func getWorkflowInterceptor(ctx Context) WorkflowInterceptor {
   289  	wc := ctx.Value(workflowInterceptorsContextKey)
   290  	if wc == nil {
   291  		panic("getWorkflowInterceptor: Not a workflow context")
   292  	}
   293  	return wc.(WorkflowInterceptor)
   294  }
   295  
   296  func (f *futureImpl) Get(ctx Context, value interface{}) error {
   297  	more := f.channel.Receive(ctx, nil)
   298  	if more {
   299  		panic("not closed")
   300  	}
   301  	if !f.ready {
   302  		panic("not ready")
   303  	}
   304  	if f.err != nil || f.value == nil || value == nil {
   305  		return f.err
   306  	}
   307  	rf := reflect.ValueOf(value)
   308  	if rf.Type().Kind() != reflect.Ptr {
   309  		return errors.New("value parameter is not a pointer")
   310  	}
   311  
   312  	if blob, ok := f.value.([]byte); ok && !util.IsTypeByteSlice(reflect.TypeOf(value)) {
   313  		if err := decodeArg(getDataConverterFromWorkflowContext(ctx), blob, value); err != nil {
   314  			return err
   315  		}
   316  		return f.err
   317  	}
   318  
   319  	fv := reflect.ValueOf(f.value)
   320  	if fv.IsValid() {
   321  		rf.Elem().Set(fv)
   322  	}
   323  	return f.err
   324  }
   325  
   326  // Used by selectorImpl
   327  // If Future is ready returns its value immediately.
   328  // If not registers callback which is called when it is ready.
   329  func (f *futureImpl) GetAsync(callback *receiveCallback) (v interface{}, ok bool, err error) {
   330  	_, _, more := f.channel.receiveAsyncImpl(callback)
   331  	// Future uses Channel.Close to indicate that it is ready.
   332  	// So more being true (channel is still open) indicates future is not ready.
   333  	if more {
   334  		return nil, false, nil
   335  	}
   336  	if !f.ready {
   337  		panic("not ready")
   338  	}
   339  	return f.value, true, f.err
   340  }
   341  
   342  // RemoveReceiveCallback removes the callback from future's channel to avoid closure leak.
   343  // Used by selectorImpl
   344  func (f *futureImpl) RemoveReceiveCallback(callback *receiveCallback) {
   345  	f.channel.removeReceiveCallback(callback)
   346  }
   347  
   348  func (f *futureImpl) IsReady() bool {
   349  	return f.ready
   350  }
   351  
   352  func (f *futureImpl) Set(value interface{}, err error) {
   353  	if f.ready {
   354  		panic("already set")
   355  	}
   356  	f.value = value
   357  	f.err = err
   358  	f.ready = true
   359  	f.channel.Close()
   360  	for _, ch := range f.chained {
   361  		ch.Set(f.value, f.err)
   362  	}
   363  }
   364  
   365  func (f *futureImpl) SetValue(value interface{}) {
   366  	if f.ready {
   367  		panic("already set")
   368  	}
   369  	f.Set(value, nil)
   370  }
   371  
   372  func (f *futureImpl) SetError(err error) {
   373  	if f.ready {
   374  		panic("already set")
   375  	}
   376  	f.Set(nil, err)
   377  }
   378  
   379  func (f *futureImpl) Chain(future Future) {
   380  	if f.ready {
   381  		panic("already set")
   382  	}
   383  
   384  	ch, ok := future.(asyncFuture)
   385  	if !ok {
   386  		panic("cannot chain Future that wasn't created with workflow.NewFuture")
   387  	}
   388  	if !ch.IsReady() {
   389  		ch.ChainFuture(f)
   390  		return
   391  	}
   392  	val, err := ch.GetValueAndError()
   393  	f.value = val
   394  	f.err = err
   395  	f.ready = true
   396  	return
   397  }
   398  
   399  func (f *futureImpl) ChainFuture(future Future) {
   400  	f.chained = append(f.chained, future.(asyncFuture))
   401  }
   402  
   403  func (f *futureImpl) GetValueAndError() (interface{}, error) {
   404  	return f.value, f.err
   405  }
   406  
   407  func (f *childWorkflowFutureImpl) GetChildWorkflowExecution() Future {
   408  	return f.executionFuture
   409  }
   410  
   411  func (f *childWorkflowFutureImpl) SignalChildWorkflow(ctx Context, signalName string, data interface{}) Future {
   412  	var childExec WorkflowExecution
   413  	if err := f.GetChildWorkflowExecution().Get(ctx, &childExec); err != nil {
   414  		return f.GetChildWorkflowExecution()
   415  	}
   416  
   417  	childWorkflowOnly := true // this means we are targeting child workflow
   418  	// below we use empty run ID indicating the current running one, in case child do continue-as-new
   419  	return signalExternalWorkflow(ctx, childExec.ID, "", signalName, data, childWorkflowOnly)
   420  }
   421  
   422  func newWorkflowContext(env workflowEnvironment, interceptors WorkflowInterceptor, envInterceptor *workflowEnvironmentInterceptor) Context {
   423  	rootCtx := WithValue(background, workflowEnvironmentContextKey, env)
   424  	rootCtx = WithValue(rootCtx, workflowEnvInterceptorContextKey, envInterceptor)
   425  	rootCtx = WithValue(rootCtx, workflowInterceptorsContextKey, interceptors)
   426  
   427  	var resultPtr *workflowResult
   428  	rootCtx = WithValue(rootCtx, workflowResultContextKey, &resultPtr)
   429  
   430  	// Set default values for the workflow execution.
   431  	wInfo := env.WorkflowInfo()
   432  	rootCtx = WithWorkflowDomain(rootCtx, wInfo.Domain)
   433  	rootCtx = WithWorkflowTaskList(rootCtx, wInfo.TaskListName)
   434  	rootCtx = WithExecutionStartToCloseTimeout(rootCtx, time.Duration(wInfo.ExecutionStartToCloseTimeoutSeconds)*time.Second)
   435  	rootCtx = WithWorkflowTaskStartToCloseTimeout(rootCtx, time.Duration(wInfo.TaskStartToCloseTimeoutSeconds)*time.Second)
   436  	rootCtx = WithTaskList(rootCtx, wInfo.TaskListName)
   437  	rootCtx = WithDataConverter(rootCtx, env.GetDataConverter())
   438  	rootCtx = withContextPropagators(rootCtx, env.GetContextPropagators())
   439  	getActivityOptions(rootCtx).OriginalTaskListName = wInfo.TaskListName
   440  
   441  	return rootCtx
   442  }
   443  
   444  func newWorkflowInterceptors(
   445  	env workflowEnvironment,
   446  	factories []WorkflowInterceptorFactory,
   447  ) (WorkflowInterceptor, *workflowEnvironmentInterceptor) {
   448  	envInterceptor := &workflowEnvironmentInterceptor{env: env}
   449  	var interceptor WorkflowInterceptor = envInterceptor
   450  	for i := len(factories) - 1; i >= 0; i-- {
   451  		interceptor = factories[i].NewInterceptor(env.WorkflowInfo(), interceptor)
   452  	}
   453  	envInterceptor.interceptorChainHead = interceptor
   454  	return interceptor, envInterceptor
   455  }
   456  
   457  func (d *syncWorkflowDefinition) Execute(env workflowEnvironment, header *shared.Header, input []byte) {
   458  	interceptors, envInterceptor := newWorkflowInterceptors(env, env.GetWorkflowInterceptors())
   459  	dispatcher, rootCtx := newDispatcher(newWorkflowContext(env, interceptors, envInterceptor), func(ctx Context) {
   460  		r := &workflowResult{}
   461  
   462  		// We want to execute the user workflow definition from the first decision task started,
   463  		// so they can see everything before that. Here we would have all initialization done, hence
   464  		// we are yielding.
   465  		state := getState(d.rootCtx)
   466  		state.yield("yield before executing to setup state")
   467  
   468  		// TODO: @shreyassrivatsan - add workflow trace span here
   469  		r.workflowResult, r.error = d.workflow.Execute(d.rootCtx, input)
   470  		rpp := getWorkflowResultPointerPointer(ctx)
   471  		*rpp = r
   472  	})
   473  
   474  	// set the information from the headers that is to be propagated in the workflow context
   475  	for _, ctxProp := range env.GetContextPropagators() {
   476  		var err error
   477  		if rootCtx, err = ctxProp.ExtractToWorkflow(rootCtx, NewHeaderReader(header)); err != nil {
   478  			panic(fmt.Sprintf("Unable to propagate context %v", err))
   479  		}
   480  	}
   481  
   482  	d.rootCtx, d.cancel = WithCancel(rootCtx)
   483  	d.dispatcher = dispatcher
   484  
   485  	getWorkflowEnvironment(d.rootCtx).RegisterCancelHandler(func() {
   486  		// It is ok to call this method multiple times.
   487  		// it doesn't do anything new, the context remains cancelled.
   488  		d.cancel()
   489  	})
   490  
   491  	getWorkflowEnvironment(d.rootCtx).RegisterSignalHandler(func(name string, result []byte) {
   492  		eo := getWorkflowEnvOptions(d.rootCtx)
   493  		// We don't want this code to be blocked ever, using sendAsync().
   494  		ch := eo.getSignalChannel(d.rootCtx, name).(*channelImpl)
   495  		ok := ch.SendAsync(result)
   496  		if !ok {
   497  			panic(fmt.Sprintf("Exceeded channel buffer size for signal: %v", name))
   498  		}
   499  	})
   500  
   501  	getWorkflowEnvironment(d.rootCtx).RegisterQueryHandler(func(queryType string, queryArgs []byte) ([]byte, error) {
   502  		eo := getWorkflowEnvOptions(d.rootCtx)
   503  		handler, ok := eo.queryHandlers[queryType]
   504  		if !ok {
   505  			return nil, fmt.Errorf("unknown queryType %v. KnownQueryTypes=%v", queryType, eo.KnownQueryTypes())
   506  		}
   507  		return handler(queryArgs)
   508  	})
   509  }
   510  
   511  func (d *syncWorkflowDefinition) OnDecisionTaskStarted() {
   512  	executeDispatcher(d.rootCtx, d.dispatcher)
   513  }
   514  
   515  func (d *syncWorkflowDefinition) StackTrace() string {
   516  	return d.dispatcher.StackTrace()
   517  }
   518  
   519  func (d *syncWorkflowDefinition) KnownQueryTypes() []string {
   520  	return getWorkflowEnvOptions(d.rootCtx).KnownQueryTypes()
   521  }
   522  
   523  func (d *syncWorkflowDefinition) Close() {
   524  	if d.dispatcher != nil {
   525  		d.dispatcher.Close()
   526  	}
   527  }
   528  
   529  // NewDispatcher creates a new Dispatcher instance with a root coroutine function.
   530  // Context passed to the root function is child of the passed rootCtx.
   531  // This way rootCtx can be used to pass values to the coroutine code.
   532  func newDispatcher(rootCtx Context, root func(ctx Context)) (*dispatcherImpl, Context) {
   533  	result := &dispatcherImpl{}
   534  	ctxWithState := result.newCoroutine(rootCtx, root)
   535  	return result, ctxWithState
   536  }
   537  
   538  // executeDispatcher executed coroutines in the calling thread and calls workflow completion callbacks
   539  // if root workflow function returned
   540  func executeDispatcher(ctx Context, dispatcher dispatcher) {
   541  	env := getWorkflowEnvironment(ctx)
   542  	panicErr := dispatcher.ExecuteUntilAllBlocked()
   543  	if panicErr != nil {
   544  		env.Complete(nil, panicErr)
   545  		return
   546  	}
   547  
   548  	rp := *getWorkflowResultPointerPointer(ctx)
   549  	if rp == nil {
   550  		// Result is not set, so workflow is still executing
   551  		return
   552  	}
   553  
   554  	us := getWorkflowEnvOptions(ctx).getUnhandledSignalNames()
   555  	if len(us) > 0 {
   556  		env.GetLogger().Info("Workflow has unhandled signals", zap.Strings("SignalNames", us))
   557  		env.GetMetricsScope().Counter(metrics.UnhandledSignalsCounter).Inc(1)
   558  	}
   559  
   560  	env.Complete(rp.workflowResult, rp.error)
   561  }
   562  
   563  // For troubleshooting stack pretty printing only.
   564  // Set to true to see full stack trace that includes framework methods.
   565  const disableCleanStackTraces = false
   566  
   567  func getState(ctx Context) *coroutineState {
   568  	s := ctx.Value(coroutinesContextKey)
   569  	if s == nil {
   570  		panic("getState: not workflow context")
   571  	}
   572  	state := s.(*coroutineState)
   573  	// When workflow gets evicted from cache is closes the dispatcher and exits all its coroutines.
   574  	// However if workflow function have a defer, it will be executed. Many workflow API calls will end up here.
   575  	// The following check prevents coroutine executing further. It would panic otherwise as context is no longer valid.
   576  	if state.dispatcher.closed {
   577  		runtime.Goexit()
   578  	}
   579  	if !state.dispatcher.executing {
   580  		panic(panicIllegalAccessCoroutinueState)
   581  	}
   582  	return state
   583  }
   584  
   585  func (c *channelImpl) Receive(ctx Context, valuePtr interface{}) (more bool) {
   586  	state := getState(ctx)
   587  	hasResult := false
   588  	var result interface{}
   589  	callback := &receiveCallback{
   590  		fn: func(v interface{}, m bool) bool {
   591  			result = v
   592  			hasResult = true
   593  			more = m
   594  			return true
   595  		},
   596  	}
   597  
   598  	for {
   599  		hasResult = false
   600  		v, ok, m := c.receiveAsyncImpl(callback)
   601  
   602  		if !ok && !m { // channel closed and empty
   603  			return m
   604  		}
   605  
   606  		if ok || !m {
   607  			err := c.assignValue(v, valuePtr)
   608  			if err == nil {
   609  				state.unblocked()
   610  				return m
   611  			}
   612  			continue // corrupt signal. Drop and reset process
   613  		}
   614  		for {
   615  			if hasResult {
   616  				err := c.assignValue(result, valuePtr)
   617  				if err == nil {
   618  					state.unblocked()
   619  					return more
   620  				}
   621  				break // Corrupt signal. Drop and reset process.
   622  			}
   623  			state.yield(fmt.Sprintf("blocked on %s.Receive", c.name))
   624  		}
   625  	}
   626  
   627  }
   628  
   629  func (c *channelImpl) ReceiveAsync(valuePtr interface{}) (ok bool) {
   630  	ok, _ = c.ReceiveAsyncWithMoreFlag(valuePtr)
   631  	return ok
   632  }
   633  
   634  func (c *channelImpl) ReceiveAsyncWithMoreFlag(valuePtr interface{}) (ok bool, more bool) {
   635  	for {
   636  		v, ok, more := c.receiveAsyncImpl(nil)
   637  		if !ok && !more { // channel closed and empty
   638  			return ok, more
   639  		}
   640  
   641  		err := c.assignValue(v, valuePtr)
   642  		if err != nil {
   643  			continue
   644  			// keep consuming until a good signal is hit or channel is drained
   645  		}
   646  		return ok, more
   647  	}
   648  }
   649  
   650  // ok = true means that value was received
   651  // more = true means that channel is not closed and more deliveries are possible
   652  func (c *channelImpl) receiveAsyncImpl(callback *receiveCallback) (v interface{}, ok bool, more bool) {
   653  	if c.recValue != nil {
   654  		r := *c.recValue
   655  		c.recValue = nil
   656  		return r, true, true
   657  	}
   658  	if len(c.buffer) > 0 {
   659  		r := c.buffer[0]
   660  		c.buffer[0] = nil
   661  		c.buffer = c.buffer[1:]
   662  
   663  		// Move blocked sends into buffer
   664  		for len(c.blockedSends) > 0 {
   665  			b := c.blockedSends[0]
   666  			c.blockedSends[0] = nil
   667  			c.blockedSends = c.blockedSends[1:]
   668  			if b.fn() {
   669  				c.buffer = append(c.buffer, b.value)
   670  				break
   671  			}
   672  		}
   673  
   674  		return r, true, true
   675  	}
   676  	if c.closed {
   677  		return nil, false, false
   678  	}
   679  	for len(c.blockedSends) > 0 {
   680  		b := c.blockedSends[0]
   681  		c.blockedSends[0] = nil
   682  		c.blockedSends = c.blockedSends[1:]
   683  		if b.fn() {
   684  			return b.value, true, true
   685  		}
   686  	}
   687  	if callback != nil {
   688  		c.blockedReceives = append(c.blockedReceives, callback)
   689  	}
   690  	return nil, false, true
   691  }
   692  
   693  func (c *channelImpl) removeReceiveCallback(callback *receiveCallback) {
   694  	for i, blockedCallback := range c.blockedReceives {
   695  		if callback == blockedCallback {
   696  			c.blockedReceives = append(c.blockedReceives[:i], c.blockedReceives[i+1:]...)
   697  			break
   698  		}
   699  	}
   700  }
   701  
   702  func (c *channelImpl) removeSendCallback(callback *sendCallback) {
   703  	for i, blockedCallback := range c.blockedSends {
   704  		if callback == blockedCallback {
   705  			c.blockedSends = append(c.blockedSends[:i], c.blockedSends[i+1:]...)
   706  			break
   707  		}
   708  	}
   709  }
   710  
   711  func (c *channelImpl) Send(ctx Context, v interface{}) {
   712  	state := getState(ctx)
   713  	valueConsumed := false
   714  	callback := &sendCallback{
   715  		value: v,
   716  		fn: func() bool {
   717  			valueConsumed = true
   718  			return true
   719  		},
   720  	}
   721  	ok := c.sendAsyncImpl(v, callback)
   722  	if ok {
   723  		state.unblocked()
   724  		return
   725  	}
   726  	for {
   727  		if valueConsumed {
   728  			state.unblocked()
   729  			return
   730  		}
   731  
   732  		// Check for closed in the loop as close can be called when send is blocked
   733  		if c.closed {
   734  			panic("Closed channel")
   735  		}
   736  		state.yield(fmt.Sprintf("blocked on %s.Send", c.name))
   737  	}
   738  }
   739  
   740  func (c *channelImpl) SendAsync(v interface{}) (ok bool) {
   741  	return c.sendAsyncImpl(v, nil)
   742  }
   743  
   744  func (c *channelImpl) sendAsyncImpl(v interface{}, pair *sendCallback) (ok bool) {
   745  	if c.closed {
   746  		panic("Closed channel")
   747  	}
   748  	for len(c.blockedReceives) > 0 {
   749  		blockedGet := c.blockedReceives[0].fn
   750  		c.blockedReceives[0] = nil
   751  		c.blockedReceives = c.blockedReceives[1:]
   752  		// false from callback indicates that value wasn't consumed
   753  		if blockedGet(v, true) {
   754  			return true
   755  		}
   756  	}
   757  	if len(c.buffer) < c.size {
   758  		c.buffer = append(c.buffer, v)
   759  		return true
   760  	}
   761  	if pair != nil {
   762  		c.blockedSends = append(c.blockedSends, pair)
   763  	}
   764  	return false
   765  }
   766  
   767  func (c *channelImpl) Close() {
   768  	c.closed = true
   769  	// Use a copy of blockedReceives for iteration as invoking callback could result in modification
   770  	copy := append(c.blockedReceives[:0:0], c.blockedReceives...)
   771  	for _, callback := range copy {
   772  		callback.fn(nil, false)
   773  	}
   774  	// All blocked sends are going to panic
   775  }
   776  
   777  // Takes a value and assigns that 'to' value. logs a metric if it is unable to deserialize
   778  func (c *channelImpl) assignValue(from interface{}, to interface{}) error {
   779  	err := decodeAndAssignValue(c.dataConverter, from, to)
   780  	// add to metrics
   781  	if err != nil {
   782  		c.env.GetLogger().Error(fmt.Sprintf("Corrupt signal received on channel %s. Error deserializing", c.name), zap.Error(err))
   783  		c.env.GetMetricsScope().Counter(metrics.CorruptedSignalsCounter).Inc(1)
   784  	}
   785  	return err
   786  }
   787  
   788  // initialYield called at the beginning of the coroutine execution
   789  // stackDepth is the depth of top of the stack to omit when stack trace is generated
   790  // to hide frames internal to the framework.
   791  func (s *coroutineState) initialYield(stackDepth int, status string) {
   792  	if s.blocked.Swap(true) {
   793  		panic("trying to block on coroutine which is already blocked, most likely a wrong Context is used to do blocking" +
   794  			" call (like Future.Get() or Channel.Receive()")
   795  	}
   796  	keepBlocked := true
   797  	for keepBlocked {
   798  		f := <-s.unblock
   799  		keepBlocked = f(status, stackDepth+1)
   800  	}
   801  	s.blocked.Swap(false)
   802  }
   803  
   804  // yield indicates that coroutine cannot make progress and should sleep
   805  // this call blocks
   806  func (s *coroutineState) yield(status string) {
   807  	s.aboutToBlock <- true
   808  	s.initialYield(3, status) // omit three levels of stack. To adjust change to 0 and count the lines to remove.
   809  	s.keptBlocked = true
   810  }
   811  
   812  func getStackTrace(coroutineName, status string, stackDepth int) string {
   813  	top := fmt.Sprintf("coroutine %s [%s]:", coroutineName, status)
   814  	// Omit top stackDepth frames + top status line.
   815  	// Omit bottom two frames which is wrapping of coroutine in a goroutine.
   816  	return getStackTraceRaw(top, stackDepth*2+1, 4)
   817  }
   818  
   819  func getStackTraceRaw(top string, omitTop, omitBottom int) string {
   820  	stack := stackBuf[:runtime.Stack(stackBuf[:], false)]
   821  	rawStack := fmt.Sprintf("%s", strings.TrimRightFunc(string(stack), unicode.IsSpace))
   822  	if disableCleanStackTraces {
   823  		return rawStack
   824  	}
   825  	lines := strings.Split(rawStack, "\n")
   826  	lines = lines[omitTop : len(lines)-omitBottom]
   827  	lines = append([]string{top}, lines...)
   828  	return strings.Join(lines, "\n")
   829  }
   830  
   831  // unblocked is called by coroutine to indicate that since the last time yield was unblocked channel or select
   832  // where unblocked versus calling yield again after checking their condition
   833  func (s *coroutineState) unblocked() {
   834  	s.keptBlocked = false
   835  }
   836  
   837  func (s *coroutineState) call() {
   838  	s.unblock <- func(status string, stackDepth int) bool {
   839  		return false // unblock
   840  	}
   841  	<-s.aboutToBlock
   842  }
   843  
   844  func (s *coroutineState) close() {
   845  	s.closed = true
   846  	s.aboutToBlock <- true
   847  }
   848  
   849  func (s *coroutineState) exit() {
   850  	if !s.closed {
   851  		s.unblock <- func(status string, stackDepth int) bool {
   852  			runtime.Goexit()
   853  			return true
   854  		}
   855  	}
   856  }
   857  
   858  func (s *coroutineState) stackTrace() string {
   859  	if s.closed {
   860  		return ""
   861  	}
   862  	stackCh := make(chan string, 1)
   863  	s.unblock <- func(status string, stackDepth int) bool {
   864  		stackCh <- getStackTrace(s.name, status, stackDepth+2)
   865  		return true
   866  	}
   867  	return <-stackCh
   868  }
   869  
   870  func (d *dispatcherImpl) newCoroutine(ctx Context, f func(ctx Context)) Context {
   871  	return d.newNamedCoroutine(ctx, fmt.Sprintf("%v", d.sequence+1), f)
   872  }
   873  
   874  func (d *dispatcherImpl) newNamedCoroutine(ctx Context, name string, f func(ctx Context)) Context {
   875  	state := d.newState(name)
   876  	spawned := WithValue(ctx, coroutinesContextKey, state)
   877  	go func(crt *coroutineState) {
   878  		defer crt.close()
   879  		defer func() {
   880  			if r := recover(); r != nil {
   881  				st := getStackTrace(name, "panic", 4)
   882  				crt.panicError = newWorkflowPanicError(r, st)
   883  			}
   884  		}()
   885  		crt.initialYield(1, "")
   886  		f(spawned)
   887  	}(state)
   888  	return spawned
   889  }
   890  
   891  func (d *dispatcherImpl) newState(name string) *coroutineState {
   892  	c := &coroutineState{
   893  		name:         name,
   894  		dispatcher:   d,
   895  		aboutToBlock: make(chan bool, 1),
   896  		unblock:      make(chan unblockFunc),
   897  	}
   898  	d.sequence++
   899  	d.coroutines = append(d.coroutines, c)
   900  	return c
   901  }
   902  
   903  func (d *dispatcherImpl) ExecuteUntilAllBlocked() (err error) {
   904  	d.mutex.Lock()
   905  	if d.closed {
   906  		panic("dispatcher is closed")
   907  	}
   908  	if d.executing {
   909  		panic("call to ExecuteUntilAllBlocked (possibly from a coroutine) while it is already running")
   910  	}
   911  	d.executing = true
   912  	d.mutex.Unlock()
   913  	defer func() { d.executing = false }()
   914  	allBlocked := false
   915  	// Keep executing until at least one goroutine made some progress
   916  	for !allBlocked {
   917  		// Give every coroutine chance to execute removing closed ones
   918  		allBlocked = true
   919  		lastSequence := d.sequence
   920  		for i := 0; i < len(d.coroutines); i++ {
   921  			c := d.coroutines[i]
   922  			if !c.closed {
   923  				// TODO: Support handling of panic in a coroutine by dispatcher.
   924  				// TODO: Dump all outstanding coroutines if one of them panics
   925  				c.call()
   926  			}
   927  			// c.call() can close the context so check again
   928  			if c.closed {
   929  				// remove the closed one from the slice
   930  				d.coroutines = append(d.coroutines[:i],
   931  					d.coroutines[i+1:]...)
   932  				i--
   933  				if c.panicError != nil {
   934  					return c.panicError
   935  				}
   936  				allBlocked = false
   937  
   938  			} else {
   939  				allBlocked = allBlocked && (c.keptBlocked || c.closed)
   940  			}
   941  		}
   942  		// Set allBlocked to false if new coroutines where created
   943  		allBlocked = allBlocked && lastSequence == d.sequence
   944  		if len(d.coroutines) == 0 {
   945  			break
   946  		}
   947  	}
   948  	return nil
   949  }
   950  
   951  func (d *dispatcherImpl) IsDone() bool {
   952  	return len(d.coroutines) == 0
   953  }
   954  
   955  func (d *dispatcherImpl) Close() {
   956  	d.mutex.Lock()
   957  	if d.closed {
   958  		d.mutex.Unlock()
   959  		return
   960  	}
   961  	d.closed = true
   962  	d.mutex.Unlock()
   963  	for i := 0; i < len(d.coroutines); i++ {
   964  		c := d.coroutines[i]
   965  		if !c.closed {
   966  			c.exit()
   967  		}
   968  	}
   969  }
   970  
   971  func (d *dispatcherImpl) StackTrace() string {
   972  	var result string
   973  	for i := 0; i < len(d.coroutines); i++ {
   974  		c := d.coroutines[i]
   975  		if !c.closed {
   976  			if len(result) > 0 {
   977  				result += "\n\n"
   978  			}
   979  			result += c.stackTrace()
   980  		}
   981  	}
   982  	return result
   983  }
   984  
   985  func (s *selectorImpl) AddReceive(c Channel, f func(c Channel, more bool)) Selector {
   986  	s.cases = append(s.cases, &selectCase{channel: c.(*channelImpl), receiveFunc: &f})
   987  	return s
   988  }
   989  
   990  func (s *selectorImpl) AddSend(c Channel, v interface{}, f func()) Selector {
   991  	s.cases = append(s.cases, &selectCase{channel: c.(*channelImpl), sendFunc: &f, sendValue: &v})
   992  	return s
   993  }
   994  
   995  func (s *selectorImpl) AddFuture(future Future, f func(future Future)) Selector {
   996  	asyncF, ok := future.(asyncFuture)
   997  	if !ok {
   998  		panic("cannot chain Future that wasn't created with workflow.NewFuture")
   999  	}
  1000  	s.cases = append(s.cases, &selectCase{future: asyncF, futureFunc: &f})
  1001  	return s
  1002  }
  1003  
  1004  func (s *selectorImpl) AddDefault(f func()) {
  1005  	s.defaultFunc = &f
  1006  }
  1007  
  1008  func (s *selectorImpl) Select(ctx Context) {
  1009  	state := getState(ctx)
  1010  	var readyBranch func()
  1011  	var cleanups []func()
  1012  	defer func() {
  1013  		for _, c := range cleanups {
  1014  			c()
  1015  		}
  1016  	}()
  1017  
  1018  	for _, pair := range s.cases {
  1019  		if pair.receiveFunc != nil {
  1020  			f := *pair.receiveFunc
  1021  			c := pair.channel
  1022  			callback := &receiveCallback{
  1023  				fn: func(v interface{}, more bool) bool {
  1024  					if readyBranch != nil {
  1025  						return false
  1026  					}
  1027  					readyBranch = func() {
  1028  						c.recValue = &v
  1029  						f(c, more)
  1030  					}
  1031  					return true
  1032  				},
  1033  			}
  1034  			v, ok, more := c.receiveAsyncImpl(callback)
  1035  			if ok || !more {
  1036  				// Select() returns in this case/branch. The callback won't be called for this case. However, callback
  1037  				// will be called for previous cases/branches. We should set readyBranch so that when other case/branch
  1038  				// become ready they won't consume the value for this Select() call.
  1039  				readyBranch = func() {
  1040  				}
  1041  				// Avoid assigning pointer to nil interface which makes
  1042  				// c.RecValue != nil and breaks the nil check at the beginning of receiveAsyncImpl
  1043  				if more {
  1044  					c.recValue = &v
  1045  				}
  1046  				f(c, more)
  1047  				return
  1048  			}
  1049  			// callback closure is added to channel's blockedReceives, we need to clean it up to avoid closure leak
  1050  			cleanups = append(cleanups, func() {
  1051  				c.removeReceiveCallback(callback)
  1052  			})
  1053  		} else if pair.sendFunc != nil {
  1054  			f := *pair.sendFunc
  1055  			c := pair.channel
  1056  			callback := &sendCallback{
  1057  				value: *pair.sendValue,
  1058  				fn: func() bool {
  1059  					if readyBranch != nil {
  1060  						return false
  1061  					}
  1062  					readyBranch = func() {
  1063  						f()
  1064  					}
  1065  					return true
  1066  				},
  1067  			}
  1068  			ok := c.sendAsyncImpl(*pair.sendValue, callback)
  1069  			if ok {
  1070  				// Select() returns in this case/branch. The callback won't be called for this case. However, callback
  1071  				// will be called for previous cases/branches. We should set readyBranch so that when other case/branch
  1072  				// become ready they won't consume the value for this Select() call.
  1073  				readyBranch = func() {
  1074  				}
  1075  				f()
  1076  				return
  1077  			}
  1078  			// callback closure is added to channel's blockedSends, we need to clean it up to avoid closure leak
  1079  			cleanups = append(cleanups, func() {
  1080  				c.removeSendCallback(callback)
  1081  			})
  1082  		} else if pair.futureFunc != nil {
  1083  			p := pair
  1084  			f := *p.futureFunc
  1085  			callback := &receiveCallback{
  1086  				fn: func(v interface{}, more bool) bool {
  1087  					if readyBranch != nil {
  1088  						return false
  1089  					}
  1090  					readyBranch = func() {
  1091  						p.futureFunc = nil
  1092  						f(p.future)
  1093  					}
  1094  					return true
  1095  				},
  1096  			}
  1097  
  1098  			_, ok, _ := p.future.GetAsync(callback)
  1099  			if ok {
  1100  				// Select() returns in this case/branch. The callback won't be called for this case. However, callback
  1101  				// will be called for previous cases/branches. We should set readyBranch so that when other case/branch
  1102  				// become ready they won't consume the value for this Select() call.
  1103  				readyBranch = func() {
  1104  				}
  1105  				p.futureFunc = nil
  1106  				f(p.future)
  1107  				return
  1108  			}
  1109  			// callback closure is added to future's channel's blockedReceives, need to clean up to avoid leak
  1110  			cleanups = append(cleanups, func() {
  1111  				p.future.RemoveReceiveCallback(callback)
  1112  			})
  1113  		}
  1114  	}
  1115  	if s.defaultFunc != nil {
  1116  		f := *s.defaultFunc
  1117  		f()
  1118  		return
  1119  	}
  1120  	for {
  1121  		if readyBranch != nil {
  1122  			readyBranch()
  1123  			state.unblocked()
  1124  			return
  1125  		}
  1126  		state.yield(fmt.Sprintf("blocked on %s.Select", s.name))
  1127  	}
  1128  }
  1129  
  1130  // NewWorkflowDefinition creates a WorkflowDefinition from a Workflow
  1131  func newSyncWorkflowDefinition(workflow workflow) *syncWorkflowDefinition {
  1132  	return &syncWorkflowDefinition{workflow: workflow}
  1133  }
  1134  
  1135  func getValidatedWorkflowFunction(workflowFunc interface{}, args []interface{}, dataConverter DataConverter, r *registry) (*WorkflowType, []byte, error) {
  1136  	fnName := ""
  1137  	fType := reflect.TypeOf(workflowFunc)
  1138  	switch getKind(fType) {
  1139  	case reflect.String:
  1140  		fnName = reflect.ValueOf(workflowFunc).String()
  1141  
  1142  	case reflect.Func:
  1143  		if err := validateFunctionArgs(workflowFunc, args, true); err != nil {
  1144  			return nil, nil, err
  1145  		}
  1146  		fnName = getWorkflowFunctionName(r, workflowFunc)
  1147  
  1148  	default:
  1149  		return nil, nil, fmt.Errorf(
  1150  			"invalid type 'workflowFunc' parameter provided, it can be either worker function or name of the worker type: %v",
  1151  			workflowFunc)
  1152  	}
  1153  
  1154  	if dataConverter == nil {
  1155  		dataConverter = getDefaultDataConverter()
  1156  	}
  1157  	input, err := encodeArgs(dataConverter, args)
  1158  	if err != nil {
  1159  		return nil, nil, err
  1160  	}
  1161  	return &WorkflowType{Name: fnName}, input, nil
  1162  }
  1163  
  1164  func getValidatedWorkflowOptions(ctx Context) (*workflowOptions, error) {
  1165  	p := getWorkflowEnvOptions(ctx)
  1166  	if p == nil {
  1167  		// We need task list as a compulsory parameter. This can be removed after registration
  1168  		return nil, errWorkflowOptionBadRequest
  1169  	}
  1170  	info := GetWorkflowInfo(ctx)
  1171  	if p.domain == nil || *p.domain == "" {
  1172  		// default to use current workflow's domain
  1173  		p.domain = common.StringPtr(info.Domain)
  1174  	}
  1175  	if p.taskListName == nil || *p.taskListName == "" {
  1176  		// default to use current workflow's task list
  1177  		p.taskListName = common.StringPtr(info.TaskListName)
  1178  	}
  1179  	if p.taskStartToCloseTimeoutSeconds == nil || *p.taskStartToCloseTimeoutSeconds < 0 {
  1180  		return nil, errors.New("missing or negative DecisionTaskStartToCloseTimeout")
  1181  	}
  1182  	if *p.taskStartToCloseTimeoutSeconds == 0 {
  1183  		p.taskStartToCloseTimeoutSeconds = common.Int32Ptr(defaultDecisionTaskTimeoutInSecs)
  1184  	}
  1185  	if p.executionStartToCloseTimeoutSeconds == nil || *p.executionStartToCloseTimeoutSeconds <= 0 {
  1186  		return nil, errors.New("missing or invalid ExecutionStartToCloseTimeout")
  1187  	}
  1188  	if err := validateRetryPolicy(p.retryPolicy); err != nil {
  1189  		return nil, err
  1190  	}
  1191  	if err := validateCronSchedule(p.cronSchedule); err != nil {
  1192  		return nil, err
  1193  	}
  1194  
  1195  	return p, nil
  1196  }
  1197  
  1198  func validateCronSchedule(cronSchedule string) error {
  1199  	if len(cronSchedule) == 0 {
  1200  		return nil
  1201  	}
  1202  
  1203  	_, err := cron.ParseStandard(cronSchedule)
  1204  	return err
  1205  }
  1206  
  1207  func getWorkflowEnvOptions(ctx Context) *workflowOptions {
  1208  	options := ctx.Value(workflowEnvOptionsContextKey)
  1209  	if options != nil {
  1210  		return options.(*workflowOptions)
  1211  	}
  1212  	return nil
  1213  }
  1214  
  1215  func setWorkflowEnvOptionsIfNotExist(ctx Context) Context {
  1216  	options := getWorkflowEnvOptions(ctx)
  1217  	var newOptions workflowOptions
  1218  	if options != nil {
  1219  		newOptions = *options
  1220  	} else {
  1221  		newOptions.signalChannels = make(map[string]Channel)
  1222  		newOptions.queryHandlers = make(map[string]func([]byte) ([]byte, error))
  1223  	}
  1224  	if newOptions.dataConverter == nil {
  1225  		newOptions.dataConverter = getDefaultDataConverter()
  1226  	}
  1227  	return WithValue(ctx, workflowEnvOptionsContextKey, &newOptions)
  1228  }
  1229  
  1230  func getDataConverterFromWorkflowContext(ctx Context) DataConverter {
  1231  	options := getWorkflowEnvOptions(ctx)
  1232  	if options == nil || options.dataConverter == nil {
  1233  		return getDefaultDataConverter()
  1234  	}
  1235  	return options.dataConverter
  1236  }
  1237  
  1238  func getRegistryFromWorkflowContext(ctx Context) *registry {
  1239  	env := getWorkflowEnvironment(ctx)
  1240  	return env.GetRegistry()
  1241  }
  1242  
  1243  func getContextPropagatorsFromWorkflowContext(ctx Context) []ContextPropagator {
  1244  	options := getWorkflowEnvOptions(ctx)
  1245  	return options.contextPropagators
  1246  }
  1247  
  1248  func getHeadersFromContext(ctx Context) *shared.Header {
  1249  	header := &s.Header{
  1250  		Fields: make(map[string][]byte),
  1251  	}
  1252  	contextPropagators := getContextPropagatorsFromWorkflowContext(ctx)
  1253  	for _, ctxProp := range contextPropagators {
  1254  		ctxProp.InjectFromWorkflow(ctx, NewHeaderWriter(header))
  1255  	}
  1256  	return header
  1257  }
  1258  
  1259  // getSignalChannel finds the associated channel for the signal.
  1260  func (w *workflowOptions) getSignalChannel(ctx Context, signalName string) Channel {
  1261  	if ch, ok := w.signalChannels[signalName]; ok {
  1262  		return ch
  1263  	}
  1264  	ch := NewBufferedChannel(ctx, defaultSignalChannelSize)
  1265  	w.signalChannels[signalName] = ch
  1266  	return ch
  1267  }
  1268  
  1269  // GetUnhandledSignalNames returns signal names that have  unconsumed signals.
  1270  func GetUnhandledSignalNames(ctx Context) []string {
  1271  	return getWorkflowEnvOptions(ctx).getUnhandledSignalNames()
  1272  }
  1273  
  1274  // getUnhandledSignalNames returns signal names that have  unconsumed signals.
  1275  func (w *workflowOptions) getUnhandledSignalNames() []string {
  1276  	unhandledSignals := []string{}
  1277  	for k, c := range w.signalChannels {
  1278  		ch := c.(*channelImpl)
  1279  		v, ok, _ := ch.receiveAsyncImpl(nil)
  1280  		if ok {
  1281  			unhandledSignals = append(unhandledSignals, k)
  1282  			ch.recValue = &v
  1283  		}
  1284  	}
  1285  	return unhandledSignals
  1286  }
  1287  
  1288  // KnownQueryTypes returns a list of known query types of the workflowOptions with BuiltinQueryTypes
  1289  func (w *workflowOptions) KnownQueryTypes() []string {
  1290  	keys := BuiltinQueryTypes()
  1291  
  1292  	for k := range w.queryHandlers {
  1293  		keys = append(keys, k)
  1294  	}
  1295  
  1296  	sort.Strings(keys)
  1297  	return keys
  1298  }
  1299  
  1300  func (d *decodeFutureImpl) Get(ctx Context, value interface{}) error {
  1301  	more := d.futureImpl.channel.Receive(ctx, nil)
  1302  	if more {
  1303  		panic("not closed")
  1304  	}
  1305  	if !d.futureImpl.ready {
  1306  		panic("not ready")
  1307  	}
  1308  	if d.futureImpl.err != nil || d.futureImpl.value == nil || value == nil {
  1309  		return d.futureImpl.err
  1310  	}
  1311  	rf := reflect.ValueOf(value)
  1312  	if rf.Type().Kind() != reflect.Ptr {
  1313  		return errors.New("value parameter is not a pointer")
  1314  	}
  1315  
  1316  	err := deSerializeFunctionResult(d.fn, d.futureImpl.value.([]byte), value, getDataConverterFromWorkflowContext(ctx), d.channel.env.GetRegistry())
  1317  	if err != nil {
  1318  		return err
  1319  	}
  1320  	return d.futureImpl.err
  1321  }
  1322  
  1323  // newDecodeFuture creates a new future as well as associated Settable that is used to set its value.
  1324  // fn - the decoded value needs to be validated against a function.
  1325  func newDecodeFuture(ctx Context, fn interface{}) (Future, Settable) {
  1326  	impl := &decodeFutureImpl{
  1327  		&futureImpl{channel: NewChannel(ctx).(*channelImpl)}, fn}
  1328  	return impl, impl
  1329  }
  1330  
  1331  // setQueryHandler sets query handler for given queryType.
  1332  func setQueryHandler(ctx Context, queryType string, handler interface{}) error {
  1333  	qh := &queryHandler{fn: handler, queryType: queryType, dataConverter: getDataConverterFromWorkflowContext(ctx)}
  1334  	err := qh.validateHandlerFn()
  1335  	if err != nil {
  1336  		return err
  1337  	}
  1338  
  1339  	getWorkflowEnvOptions(ctx).queryHandlers[queryType] = qh.execute
  1340  	return nil
  1341  }
  1342  
  1343  func (h *queryHandler) validateHandlerFn() error {
  1344  	fnType := reflect.TypeOf(h.fn)
  1345  	if fnType.Kind() != reflect.Func {
  1346  		return fmt.Errorf("query handler must be function but was %s", fnType.Kind())
  1347  	}
  1348  
  1349  	if fnType.NumOut() != 2 {
  1350  		return fmt.Errorf(
  1351  			"query handler must return 2 values (serializable result and error), but found %d return values", fnType.NumOut(),
  1352  		)
  1353  	}
  1354  
  1355  	if !isValidResultType(fnType.Out(0)) {
  1356  		return fmt.Errorf(
  1357  			"first return value of query handler must be serializable but found: %v", fnType.Out(0).Kind(),
  1358  		)
  1359  	}
  1360  	if !isError(fnType.Out(1)) {
  1361  		return fmt.Errorf(
  1362  			"second return value of query handler must be error but found %v", fnType.Out(fnType.NumOut()-1).Kind(),
  1363  		)
  1364  	}
  1365  	return nil
  1366  }
  1367  
  1368  func (h *queryHandler) execute(input []byte) (result []byte, err error) {
  1369  	// if query handler panic, convert it to error
  1370  	defer func() {
  1371  		if p := recover(); p != nil {
  1372  			result = nil
  1373  			st := getStackTraceRaw("query handler [panic]:", 7, 0)
  1374  			if p == panicIllegalAccessCoroutinueState {
  1375  				// query handler code try to access workflow functions outside of workflow context, make error message
  1376  				// more descriptive and clear.
  1377  				p = "query handler must not use cadence context to do things like workflow.NewChannel(), " +
  1378  					"workflow.Go() or to call any workflow blocking functions like Channel.Get() or Future.Get()"
  1379  			}
  1380  			err = fmt.Errorf("query handler panic: %v, stack trace: %v", p, st)
  1381  		}
  1382  	}()
  1383  
  1384  	fnType := reflect.TypeOf(h.fn)
  1385  	var args []reflect.Value
  1386  
  1387  	if fnType.NumIn() == 1 && util.IsTypeByteSlice(fnType.In(0)) {
  1388  		args = append(args, reflect.ValueOf(input))
  1389  	} else {
  1390  		decoded, err := decodeArgs(h.dataConverter, fnType, input)
  1391  		if err != nil {
  1392  			return nil, fmt.Errorf("unable to decode the input for queryType: %v, with error: %v", h.queryType, err)
  1393  		}
  1394  		args = append(args, decoded...)
  1395  	}
  1396  
  1397  	// invoke the query handler with arguments.
  1398  	fnValue := reflect.ValueOf(h.fn)
  1399  	retValues := fnValue.Call(args)
  1400  
  1401  	// we already verified (in validateHandlerFn()) that the query handler returns 2 values
  1402  	retValue := retValues[0]
  1403  	if retValue.Kind() != reflect.Ptr || !retValue.IsNil() {
  1404  		result, err = encodeArg(h.dataConverter, retValue.Interface())
  1405  		if err != nil {
  1406  			return nil, err
  1407  		}
  1408  	}
  1409  
  1410  	errValue := retValues[1]
  1411  	if errValue.IsNil() {
  1412  		return result, nil
  1413  	}
  1414  	err, ok := errValue.Interface().(error)
  1415  	if !ok {
  1416  		return nil, fmt.Errorf("failed to parse error result as it is not of error interface: %v", errValue)
  1417  	}
  1418  	return result, err
  1419  }
  1420  
  1421  // Add adds delta, which may be negative, to the WaitGroup counter.
  1422  // If the counter becomes zero, all goroutines blocked on Wait are released.
  1423  // If the counter goes negative, Add panics.
  1424  //
  1425  // Note that calls with a positive delta that occur when the counter is zero
  1426  // must happen before a Wait. Calls with a negative delta, or calls with a
  1427  // positive delta that start when the counter is greater than zero, may happen
  1428  // at any time.
  1429  // Typically this means the calls to Add should execute before the statement
  1430  // creating the goroutine or other event to be waited for.
  1431  // If a WaitGroup is reused to wait for several independent sets of events,
  1432  // new Add calls must happen after all previous Wait calls have returned.
  1433  //
  1434  // param delta int -> the value to increment the WaitGroup counter by
  1435  func (wg *waitGroupImpl) Add(delta int) {
  1436  	wg.n = wg.n + delta
  1437  	if wg.n < 0 {
  1438  		panic("negative WaitGroup counter")
  1439  	}
  1440  	if (wg.n > 0) || (!wg.waiting) {
  1441  		return
  1442  	}
  1443  	if wg.n == 0 {
  1444  		wg.settable.Set(false, nil)
  1445  	}
  1446  }
  1447  
  1448  // Done decrements the WaitGroup counter by 1, indicating
  1449  // that a coroutine in the WaitGroup has completed
  1450  func (wg *waitGroupImpl) Done() {
  1451  	wg.Add(-1)
  1452  }
  1453  
  1454  // Wait blocks and waits for specified number of couritines to
  1455  // finish executing and then unblocks once the counter has reached 0.
  1456  //
  1457  // param ctx Context -> workflow context
  1458  func (wg *waitGroupImpl) Wait(ctx Context) {
  1459  	if wg.n <= 0 {
  1460  		return
  1461  	}
  1462  	if wg.waiting {
  1463  		panic("WaitGroup is reused before previous Wait has returned")
  1464  	}
  1465  
  1466  	wg.waiting = true
  1467  	if err := wg.future.Get(ctx, &wg.waiting); err != nil {
  1468  		panic(err)
  1469  	}
  1470  	wg.future, wg.settable = NewFuture(ctx)
  1471  }