github.com/kaisenlinux/docker.io@v0.0.0-20230510090727-ea55db55fac7/swarmkit/design/tla/WorkerImpl.tla (about)

     1  ---------------------------- MODULE WorkerImpl ----------------------------------
     2  
     3  EXTENDS TLC, Types, Tasks, EventCounter
     4  
     5  (*
     6  `WorkerSpec' provides a high-level specification of worker nodes that only refers to
     7  the state of the tasks recorded in SwarmKit's store. This specification (WorkerImpl)
     8  refines WorkerSpec by also modelling the state of the containers running on a node.
     9  It should be easier to see that this lower-level specification corresponds to what
    10  actually happens on worker nodes.
    11  
    12  The reason for having this in a separate specification is that including the container
    13  state greatly increases the number of states to be considered and so slows down model
    14  checking. Instead of checking
    15  
    16    SwarmKit /\ WorkerImpl => EventuallyAsDesired
    17  
    18  (which is very slow), we check two separate expressions:
    19  
    20    SwarmKit /\ WorkerSpec => EventuallyAsDesired
    21    WorkerImpl => WorkerSpec
    22  
    23  TLAPS can check that separating the specification in this way makes sense: *)
    24  THEOREM ASSUME TEMPORAL SwarmKit, TEMPORAL WorkerSpec,
    25                 TEMPORAL WorkerImpl, TEMPORAL EventuallyAsDesired,
    26                 TEMPORAL Env,  \* A simplified version of SwarmKit
    27                 SwarmKit /\ WorkerSpec => EventuallyAsDesired,
    28                 Env /\ WorkerImpl => WorkerSpec,
    29                 SwarmKit => Env
    30          PROVE  SwarmKit /\ WorkerImpl => EventuallyAsDesired
    31  OBVIOUS
    32  
    33  \* This worker's node ID
    34  CONSTANT node
    35  ASSUME node \in Node
    36  
    37  VARIABLES nodes         \* Defined in WorkerSpec.tla
    38  VARIABLE containers     \* The actual container state on the node, indexed by ModelTaskId
    39  
    40  (* The high-level specification of worker nodes.
    41     This module should be a refinement of `WS'. *)
    42  WS == INSTANCE WorkerSpec
    43  
    44  terminating == "terminating"        \* A container which we're trying to stop
    45  
    46  (* The state of an actual container on a worker node. *)
    47  ContainerState == { running, terminating, complete, failed }
    48  
    49  (* A running container finishes running on its own (or crashes). *)
    50  ContainerExit ==
    51    /\ UNCHANGED << nodes, tasks >>
    52    /\ CountEvent
    53    /\ \E id \in DOMAIN containers,
    54          s2 \in {failed, complete} :      \* Either a successful or failed exit status
    55          /\ containers[id] = running
    56          /\ containers' = [containers EXCEPT ![id] = s2]
    57  
    58  (* A running container finishes because we stopped it. *)
    59  ShutdownComplete ==
    60    /\ UNCHANGED << nodes, tasks, nEvents >>
    61    /\ \E id \in DOMAIN containers :
    62          /\ containers[id] = terminating
    63          /\ containers' = [containers EXCEPT ![id] = failed]
    64  
    65  (* SwarmKit thinks the node is up. i.e. the agent is connected to a manager. *)
    66  IsUp(n) == WS!IsUp(n)
    67  
    68  (* The new value that `containers' should take after getting an update from the
    69     managers. If the managers asked us to run a container and then stop mentioning
    70     that task, we shut the container down and (once stopped) remove it. *)
    71  DesiredContainers ==
    72    LET WantShutdown(id) ==
    73          \* The managers stop mentioning the task, or ask for it to be stopped.
    74          \/ id \notin IdSet(tasks)
    75          \/ running \prec (CHOOSE t \in tasks : Id(t) = id).desired_state
    76        (* Remove containers that no longer have tasks, once they've stopped. *)
    77        rm == { id \in DOMAIN containers :
    78                    /\ containers[id] \in { complete, failed }
    79                    /\ id \notin IdSet(tasks) }
    80    IN [ id \in DOMAIN containers \ rm |->
    81             IF containers[id] = running /\ WantShutdown(id) THEN terminating
    82             ELSE containers[id]
    83       ]
    84  
    85  (* The updates that SwarmKit should apply to its store to bring it up-to-date
    86     with the real state of the containers. *)
    87  RequiredTaskUpdates ==
    88    LET \* Tasks the manager is expecting news about:
    89        oldTasks == { t \in tasks : t.node = node /\ State(t) = running }
    90        \* The state to report for task `t':
    91        ReportFor(t) ==
    92          IF Id(t) \notin DOMAIN containers THEN \* We were asked to forget about this container.
    93            shutdown \* SwarmKit doesn't care which terminal state we finish in.
    94          ELSE IF /\ containers[Id(t)] = failed       \* It's terminated and
    95                  /\ t.desired_state = shutdown THEN  \* we wanted to shut it down,
    96            shutdown \* Report a successful shutdown
    97          ELSE IF containers[Id(t)] = terminating THEN
    98            running  \* SwarmKit doesn't record progress of the shutdown
    99          ELSE
   100            containers[Id(t)]  \* Report the actual state
   101    IN [ t \in oldTasks |-> [ t EXCEPT !.status.state = ReportFor(t) ]]
   102  
   103  (* Our node synchronises its state with a manager. *)
   104  DoSync ==
   105     /\ containers' = DesiredContainers
   106     /\ UpdateTasks(RequiredTaskUpdates)
   107  
   108  (* Try to advance containers towards `desired_state' if we're not there yet.
   109  
   110     XXX: do we need a connection to the manager to do this, or can we make progress
   111     while disconnected and just report the final state?
   112  *)
   113  ProgressTask ==
   114    /\ UNCHANGED << nodes, nEvents >>
   115    /\ \E t  \in tasks,
   116          s2 \in TaskState :   \* The state we want to move to
   117          LET t2 == [t EXCEPT !.status.state = s2]
   118          IN
   119          /\ s2 \preceq t.desired_state       \* Can't be after the desired state
   120          /\ << State(t), State(t2) >> \in {  \* Possible ``progress'' (desirable) transitions
   121               << assigned, accepted >>,
   122               << accepted, preparing >>,
   123               << preparing, ready >>,
   124               << ready, starting >>,
   125               << starting, running >>
   126             }
   127          /\ IsUp(t.node)                     \* Node must be connected to SwarmKit
   128          /\ IF s2 = running THEN
   129                \* The container started running
   130                containers' = Id(t) :> running @@ containers
   131             ELSE
   132                UNCHANGED containers
   133          /\ UpdateTasks(t :> t2)
   134  
   135  (* The agent on the node synchronises with a manager. *)
   136  SyncWithManager ==
   137    /\ UNCHANGED << nodes, nEvents >>
   138    /\ IsUp(node)
   139    /\ DoSync
   140  
   141  (* We can reject a task once we're responsible for it (it has reached `assigned')
   142     until it reaches the `running' state.
   143     Note that an ``accepted'' task can still be rejected. *)
   144  RejectTask ==
   145    /\ UNCHANGED << nodes, containers >>
   146    /\ CountEvent
   147    /\ \E t \in tasks :
   148         /\ State(t) \in { assigned, accepted, preparing, ready, starting }
   149         /\ t.node = node
   150         /\ IsUp(node)
   151         /\ UpdateTasks(t :> [t EXCEPT !.status.state = rejected])
   152  
   153  (* The dispatcher notices that the worker is down (the connection is lost). *)
   154  WorkerDown ==
   155    /\ UNCHANGED << tasks, containers >>
   156    /\ CountEvent
   157    /\ \E n \in Node :
   158         /\ IsUp(n)
   159         /\ nodes' = [nodes EXCEPT ![n] = WS!nodeDown]
   160  
   161  (* When the node reconnects to the cluster, it gets an assignment set from the dispatcher
   162     which does not include any tasks that have been marked orphaned and then deleted.
   163     Any time an agent gets an assignment set that does not include some task it has running,
   164     it shuts down those tasks.
   165  
   166     We model this separately with the `SyncWithManager' action. *)
   167  WorkerUp ==
   168    /\ UNCHANGED << nEvents, containers, tasks >>
   169    /\ \E n \in Node :
   170         /\ ~IsUp(n)
   171         /\ nodes' = [nodes EXCEPT ![n] = WS!nodeUp]
   172  
   173  (* Tasks assigned to a node and for which the node is responsible. *)
   174  TasksOwnedByNode(n) == { t \in tasks :
   175    /\ t.node = n
   176    /\ assigned \preceq State(t)
   177    /\ State(t) \prec remove
   178  }
   179  
   180  (* If SwarmKit sees a node as down for a long time (48 hours or so) then
   181     it marks all the node's tasks as orphaned.
   182     Note that this sets the actual state, not the desired state.
   183  
   184     ``Moving a task to the Orphaned state is not desirable,
   185     because it's the one case where we break the otherwise invariant
   186     that the agent sets all states past ASSIGNED.''
   187  *)
   188  OrphanTasks ==
   189    /\ UNCHANGED << nodes, containers, nEvents >>
   190    /\ LET affected == { t \in TasksOwnedByNode(node) : Runnable(t) }
   191       IN
   192       /\ ~IsUp(node)    \* Our connection to the agent is down
   193       /\ UpdateTasks([ t \in affected |->
   194                           [t EXCEPT !.status.state = orphaned] ])
   195  
   196  (* The worker reboots. All containers are terminated. *)
   197  WorkerReboot ==
   198    /\ UNCHANGED << nodes, tasks >>
   199    /\ CountEvent
   200    /\ containers' = [ id \in DOMAIN containers |->
   201                         LET state == containers[id]
   202                         IN  CASE state \in {running, terminating} -> failed
   203                               [] state \in {complete, failed}     -> state
   204                     ]
   205  
   206  (* Actions we require to happen eventually when possible. *)
   207  AgentProgress ==
   208    \/ ProgressTask
   209    \/ OrphanTasks
   210    \/ WorkerUp
   211    \/ ShutdownComplete
   212    \/ SyncWithManager
   213  
   214  (* All actions of the agent/worker. *)
   215  Agent ==
   216    \/ AgentProgress
   217    \/ RejectTask
   218    \/ WorkerDown
   219    \/ ContainerExit
   220    \/ WorkerReboot
   221  
   222  -------------------------------------------------------------------------------
   223  \* A simplified model of the rest of the system
   224  
   225  (* A new task is created. *)
   226  CreateTask ==
   227    /\ UNCHANGED << containers, nEvents, nodes >>
   228    /\ \E t \in Task :    \* `t' is the new task
   229        \* Don't reuse IDs (only really an issue for model checking)
   230        /\ Id(t) \notin IdSet(tasks)
   231        /\ Id(t) \notin DOMAIN containers
   232        /\ State(t) = new
   233        /\ t.desired_state \in { ready, running }
   234        /\ \/ /\ t.node = unassigned  \* A task for a replicated service
   235              /\ t.slot \in Slot
   236           \/ /\ t.node \in Node      \* A task for a global service
   237              /\ t.slot = global
   238        /\ ~\E t2 \in tasks : \* All tasks of a service have the same mode
   239              /\ t.service = t2.service
   240              /\ (t.slot = global) # (t2.slot = global)
   241        /\ tasks' = tasks \union {t}
   242  
   243  (* States before `assigned' aren't shared with worker nodes, so modelling them
   244     isn't very useful. You can use this in a model to override `CreateTask' to
   245     speed things up a bit. It creates tasks directly in the `assigned' state. *)
   246  CreateTaskQuick ==
   247    /\ UNCHANGED << containers, nEvents, nodes >>
   248    /\ \E t \in Task :
   249        /\ Id(t) \notin IdSet(tasks)
   250        /\ Id(t) \notin DOMAIN containers
   251        /\ State(t) = assigned
   252        /\ t.desired_state \in { ready, running }
   253        /\ t.node \in Node
   254        /\ t.slot \in Slot \union {global}
   255        /\ ~\E t2 \in tasks : \* All tasks of a service have the same mode
   256              /\ t.service = t2.service
   257              /\ (t.slot = global) # (t2.slot = global)
   258        /\ tasks' = tasks \union {t}
   259  
   260  (* The state or desired_state of a task is updated. *)
   261  UpdateTask ==
   262    /\ UNCHANGED << containers, nEvents, nodes >>
   263    /\ \E t \in tasks, t2 \in Task :  \* `t' becomes `t2'
   264          /\ Id(t) = Id(t2)           \* The ID can't change
   265          /\ State(t) # State(t2) =>  \* If the state changes then
   266               \E actor \in DOMAIN Transitions :  \* it is a legal transition
   267                   /\ actor = "agent"  =>  t.node # node    \* and not one our worker does
   268                   /\ << State(t), State(t2) >> \in Transitions[actor]
   269          \* When tasks reach the `assigned' state, they must have a node
   270          /\ IF State(t2) = assigned /\ t.node = unassigned THEN t2.node \in Node
   271                                                            ELSE t2.node = t.node
   272          /\ UpdateTasks(t :> t2)
   273  
   274  (* The reaper removes a task. *)
   275  RemoveTask ==
   276    /\ UNCHANGED << containers, nEvents, nodes >>
   277    /\ \E t \in tasks :
   278        /\ << State(t), null >> \in Transitions.reaper
   279        /\ tasks' = tasks \ {t}
   280  
   281  (* Actions of our worker's environment (i.e. SwarmKit and other workers). *)
   282  OtherComponent ==
   283    \/ CreateTask
   284    \/ UpdateTask
   285    \/ RemoveTask
   286  
   287  -------------------------------------------------------------------------------
   288  \* A complete system
   289  
   290  vars == << tasks, nEvents, nodes, containers >>
   291  
   292  Init ==
   293    /\ tasks = {}
   294    /\ containers = << >>
   295    /\ nodes = [ n \in Node |-> WS!nodeUp ]
   296    /\ InitEvents
   297  
   298  Next ==
   299    \/ OtherComponent
   300    \/ Agent
   301  
   302  (* The specification for our worker node. *)
   303  Impl == Init /\ [][Next]_vars /\ WF_vars(AgentProgress)
   304  
   305  -------------------------------------------------------------------------------
   306  
   307  TypeOK ==
   308    /\ TasksTypeOK
   309    \* The node's container map maps IDs to states
   310    /\ DOMAIN containers \in SUBSET ModelTaskId
   311    /\ containers \in [ DOMAIN containers -> ContainerState ]
   312  
   313  wsVars == << tasks, nEvents, nodes >>
   314  
   315  (* We want to check that a worker implementing `Impl' is also implementing
   316     `WorkerSpec'. i.e. we need to check that Impl => WSSpec. *)
   317  WSSpec ==
   318    /\ [][WS!Agent \/ OtherComponent]_wsVars
   319    /\ WF_wsVars(WS!AgentProgress)
   320  
   321  =============================================================================