github.com/argoproj/argo-cd/v2@v2.10.9/resource_customizations/sparkoperator.k8s.io/SparkApplication/health.lua (about) 1 local health_status = {} 2 -- Can't use standard lib, math.huge equivalent 3 local infinity = 2^1024-1 4 5 local function executor_range_api() 6 local min_executor_instances = 0 7 local max_executor_instances = infinity 8 if obj.spec.dynamicAllocation.maxExecutors then 9 max_executor_instances = obj.spec.dynamicAllocation.maxExecutors 10 end 11 if obj.spec.dynamicAllocation.minExecutors then 12 min_executor_instances = obj.spec.dynamicAllocation.minExecutors 13 end 14 return min_executor_instances, max_executor_instances 15 end 16 17 local function maybe_executor_range_spark_conf() 18 local min_executor_instances = 0 19 local max_executor_instances = infinity 20 if obj.spec.sparkConf["spark.streaming.dynamicAllocation.enabled"] ~= nil and 21 obj.spec.sparkConf["spark.streaming.dynamicAllocation.enabled"] == "true" then 22 if(obj.spec.sparkConf["spark.streaming.dynamicAllocation.maxExecutors"] ~= nil) then 23 max_executor_instances = tonumber(obj.spec.sparkConf["spark.streaming.dynamicAllocation.maxExecutors"]) 24 end 25 if(obj.spec.sparkConf["spark.streaming.dynamicAllocation.minExecutors"] ~= nil) then 26 min_executor_instances = tonumber(obj.spec.sparkConf["spark.streaming.dynamicAllocation.minExecutors"]) 27 end 28 return min_executor_instances, max_executor_instances 29 elseif obj.spec.sparkConf["spark.dynamicAllocation.enabled"] ~= nil and 30 obj.spec.sparkConf["spark.dynamicAllocation.enabled"] == "true" then 31 if(obj.spec.sparkConf["spark.dynamicAllocation.maxExecutors"] ~= nil) then 32 max_executor_instances = tonumber(obj.spec.sparkConf["spark.dynamicAllocation.maxExecutors"]) 33 end 34 if(obj.spec.sparkConf["spark.dynamicAllocation.minExecutors"] ~= nil) then 35 min_executor_instances = tonumber(obj.spec.sparkConf["spark.dynamicAllocation.minExecutors"]) 36 end 37 return min_executor_instances, max_executor_instances 38 else 39 return nil 40 end 41 end 42 43 local function maybe_executor_range() 44 if obj.spec["dynamicAllocation"] and obj.spec.dynamicAllocation.enabled then 45 return executor_range_api() 46 elseif obj.spec["sparkConf"] ~= nil then 47 return maybe_executor_range_spark_conf() 48 else 49 return nil 50 end 51 end 52 53 local function dynamic_executors_without_spec_config() 54 if obj.spec.dynamicAllocation == nil and obj.spec.executor.instances == nil then 55 return true 56 else 57 return false 58 end 59 end 60 61 if obj.status ~= nil then 62 if obj.status.applicationState.state ~= nil then 63 if obj.status.applicationState.state == "" then 64 health_status.status = "Progressing" 65 health_status.message = "SparkApplication was added, enqueuing it for submission" 66 return health_status 67 end 68 if obj.status.applicationState.state == "RUNNING" then 69 if obj.status.executorState ~= nil then 70 count=0 71 for i, executorState in pairs(obj.status.executorState) do 72 if executorState == "RUNNING" then 73 count=count+1 74 end 75 end 76 if obj.spec.executor.instances ~= nil and obj.spec.executor.instances == count then 77 health_status.status = "Healthy" 78 health_status.message = "SparkApplication is Running" 79 return health_status 80 elseif maybe_executor_range() then 81 local min_executor_instances, max_executor_instances = maybe_executor_range() 82 if count >= min_executor_instances and count <= max_executor_instances then 83 health_status.status = "Healthy" 84 health_status.message = "SparkApplication is Running" 85 return health_status 86 end 87 elseif dynamic_executors_without_spec_config() and count >= 1 then 88 health_status.status = "Healthy" 89 health_status.message = "SparkApplication is Running" 90 return health_status 91 end 92 end 93 end 94 if obj.status.applicationState.state == "SUBMITTED" then 95 health_status.status = "Progressing" 96 health_status.message = "SparkApplication was submitted successfully" 97 return health_status 98 end 99 if obj.status.applicationState.state == "COMPLETED" then 100 health_status.status = "Healthy" 101 health_status.message = "SparkApplication was Completed" 102 return health_status 103 end 104 if obj.status.applicationState.state == "FAILED" then 105 health_status.status = "Degraded" 106 health_status.message = obj.status.applicationState.errorMessage 107 return health_status 108 end 109 if obj.status.applicationState.state == "SUBMISSION_FAILED" then 110 health_status.status = "Degraded" 111 health_status.message = obj.status.applicationState.errorMessage 112 return health_status 113 end 114 if obj.status.applicationState.state == "PENDING_RERUN" then 115 health_status.status = "Progressing" 116 health_status.message = "SparkApplication is Pending Rerun" 117 return health_status 118 end 119 if obj.status.applicationState.state == "INVALIDATING" then 120 health_status.status = "Missing" 121 health_status.message = "SparkApplication is in InvalidatingState" 122 return health_status 123 end 124 if obj.status.applicationState.state == "SUCCEEDING" then 125 health_status.status = "Progressing" 126 health_status.message = [[The driver pod has been completed successfully. The executor pods terminate and are cleaned up. 127 Under this circumstances, we assume the executor pod are completed.]] 128 return health_status 129 end 130 if obj.status.applicationState.state == "FAILING" then 131 health_status.status = "Degraded" 132 health_status.message = obj.status.applicationState.errorMessage 133 return health_status 134 end 135 if obj.status.applicationState.state == "UNKNOWN" then 136 health_status.status = "Progressing" 137 health_status.message = "SparkApplication is in UnknownState because either driver pod or one or all executor pods in unknown state " 138 return health_status 139 end 140 end 141 end 142 health_status.status = "Progressing" 143 health_status.message = "Waiting for Executor pods" 144 return health_status