github.com/asynkron/protoactor-go@v0.0.0-20240308120642-ef91a6abee75/actor/future.go (about) 1 package actor 2 3 import ( 4 "context" 5 "errors" 6 "log/slog" 7 "sync" 8 "sync/atomic" 9 "time" 10 "unsafe" 11 12 "github.com/asynkron/protoactor-go/metrics" 13 "go.opentelemetry.io/otel/attribute" 14 "go.opentelemetry.io/otel/metric" 15 ) 16 17 // ErrTimeout is the error used when a future times out before receiving a result. 18 var ErrTimeout = errors.New("future: timeout") 19 20 // ErrDeadLetter is meaning you request to a unreachable PID. 21 var ErrDeadLetter = errors.New("future: dead letter") 22 23 // NewFuture creates and returns a new actor.Future with a timeout of duration d. 24 func NewFuture(actorSystem *ActorSystem, d time.Duration) *Future { 25 ref := &futureProcess{Future{actorSystem: actorSystem, cond: sync.NewCond(&sync.Mutex{})}} 26 id := actorSystem.ProcessRegistry.NextId() 27 28 pid, ok := actorSystem.ProcessRegistry.Add(ref, "future"+id) 29 if !ok { 30 actorSystem.Logger().Error("failed to register future process", slog.Any("pid", pid)) 31 } 32 33 sysMetrics, ok := actorSystem.Extensions.Get(extensionId).(*Metrics) 34 if ok && sysMetrics.enabled { 35 if instruments := sysMetrics.metrics.Get(metrics.InternalActorMetrics); instruments != nil { 36 ctx := context.Background() 37 labels := []attribute.KeyValue{ 38 attribute.String("address", ref.actorSystem.Address()), 39 } 40 41 instruments.FuturesStartedCount.Add(ctx, 1, metric.WithAttributes(labels...)) 42 } 43 } 44 45 ref.pid = pid 46 47 if d >= 0 { 48 tp := time.AfterFunc(d, func() { 49 ref.cond.L.Lock() 50 if ref.done { 51 ref.cond.L.Unlock() 52 53 return 54 } 55 ref.err = ErrTimeout 56 ref.cond.L.Unlock() 57 ref.Stop(pid) 58 }) 59 atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&ref.t)), unsafe.Pointer(tp)) 60 } 61 62 return &ref.Future 63 } 64 65 type Future struct { 66 actorSystem *ActorSystem 67 pid *PID 68 cond *sync.Cond 69 // protected by cond 70 done bool 71 result interface{} 72 err error 73 t *time.Timer 74 pipes []*PID 75 completions []func(res interface{}, err error) 76 } 77 78 // PID to the backing actor for the Future result. 79 func (f *Future) PID() *PID { 80 return f.pid 81 } 82 83 // PipeTo forwards the result or error of the future to the specified pids. 84 func (f *Future) PipeTo(pids ...*PID) { 85 f.cond.L.Lock() 86 f.pipes = append(f.pipes, pids...) 87 // for an already completed future, force push the result to targets. 88 if f.done { 89 f.sendToPipes() 90 } 91 f.cond.L.Unlock() 92 } 93 94 func (f *Future) sendToPipes() { 95 if f.pipes == nil { 96 return 97 } 98 99 var m interface{} 100 if f.err != nil { 101 m = f.err 102 } else { 103 m = f.result 104 } 105 106 for _, pid := range f.pipes { 107 pid.sendUserMessage(f.actorSystem, m) 108 } 109 110 f.pipes = nil 111 } 112 113 func (f *Future) wait() { 114 f.cond.L.Lock() 115 for !f.done { 116 f.cond.Wait() 117 } 118 f.cond.L.Unlock() 119 } 120 121 // Result waits for the future to resolve. 122 func (f *Future) Result() (interface{}, error) { 123 f.wait() 124 125 return f.result, f.err 126 } 127 128 func (f *Future) Wait() error { 129 f.wait() 130 131 return f.err 132 } 133 134 func (f *Future) continueWith(continuation func(res interface{}, err error)) { 135 f.cond.L.Lock() 136 defer f.cond.L.Unlock() // use defer as the continuation co 137 // uld blow up 138 if f.done { 139 continuation(f.result, f.err) 140 } else { 141 f.completions = append(f.completions, continuation) 142 } 143 } 144 145 // futureProcess is a struct carrying a response PID and a channel where the response is placed. 146 type futureProcess struct { 147 Future 148 } 149 150 var _ Process = &futureProcess{} 151 152 func (ref *futureProcess) SendUserMessage(pid *PID, message interface{}) { 153 defer ref.instrument() 154 155 _, msg, _ := UnwrapEnvelope(message) 156 157 if _, ok := msg.(*DeadLetterResponse); ok { 158 ref.result = nil 159 ref.err = ErrDeadLetter 160 } else { 161 ref.result = msg 162 } 163 164 ref.Stop(pid) 165 } 166 167 func (ref *futureProcess) SendSystemMessage(pid *PID, message interface{}) { 168 defer ref.instrument() 169 ref.result = message 170 ref.Stop(pid) 171 } 172 173 func (ref *futureProcess) instrument() { 174 sysMetrics, ok := ref.actorSystem.Extensions.Get(extensionId).(*Metrics) 175 if ok && sysMetrics.enabled { 176 ctx := context.Background() 177 labels := []attribute.KeyValue{ 178 attribute.String("address", ref.actorSystem.Address()), 179 } 180 181 instruments := sysMetrics.metrics.Get(metrics.InternalActorMetrics) 182 if instruments != nil { 183 if ref.err == nil { 184 instruments.FuturesCompletedCount.Add(ctx, 1, metric.WithAttributes(labels...)) 185 } else { 186 instruments.FuturesTimedOutCount.Add(ctx, 1, metric.WithAttributes(labels...)) 187 } 188 } 189 } 190 } 191 192 func (ref *futureProcess) Stop(pid *PID) { 193 ref.cond.L.Lock() 194 if ref.done { 195 ref.cond.L.Unlock() 196 197 return 198 } 199 200 ref.done = true 201 tp := (*time.Timer)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&ref.t)))) 202 203 if tp != nil { 204 tp.Stop() 205 } 206 207 ref.actorSystem.ProcessRegistry.Remove(pid) 208 209 ref.sendToPipes() 210 ref.runCompletions() 211 ref.cond.L.Unlock() 212 ref.cond.Signal() 213 } 214 215 // TODO: we could replace "pipes" with this 216 // instead of pushing PIDs to pipes, we could push wrapper funcs that tells the pid 217 // as a completion, that would unify the model. 218 func (f *Future) runCompletions() { 219 if f.completions == nil { 220 return 221 } 222 223 for _, c := range f.completions { 224 c(f.result, f.err) 225 } 226 227 f.completions = nil 228 }