github.com/google/syzkaller@v0.0.0-20240517125934-c0f1611a36d6/sys/linux/dev_mali.txt (about) 1 # Copyright 2024 syzkaller project authors. All rights reserved. 2 # Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. 3 4 # Generated on android14-gs-pixel-5.15-udc-d1 5 # CSF and non-csf builds have overlapping definitions. Must define 6 # "MALI_USE_CSF" during generation. 7 meta noextract 8 9 include <gpu/common/include/uapi/gpu/arm/midgard/mali_kbase_hwcnt_reader.h> 10 include <gpu/common/include/uapi/gpu/arm/midgard/mali_base_kernel.h> 11 include <gpu/common/include/uapi/gpu/arm/midgard/mali_kbase_ioctl.h> 12 include <gpu/mali_kbase/mali_kbase.h> 13 include <uapi/linux/fcntl.h> 14 15 define max_supported_streams (MAX_SUPPORTED_CSGS * MAX_SUPPORTED_STREAMS_PER_GROUP) 16 17 resource gpu_heap_va[int64] 18 resource kcpu_queue_id[int8] 19 resource cs_queue_group_handle[int8] 20 21 ioctl$KBASE_IOCTL_READ_USER_PAGE(fd fd_bifrost, cmd const[KBASE_IOCTL_READ_USER_PAGE], arg ptr[inout, kbase_ioctl_read_user_page]) 22 ioctl$KBASE_IOCTL_MEM_ALLOC_EX(fd fd_bifrost, cmd const[KBASE_IOCTL_MEM_ALLOC_EX], arg ptr[inout, kbase_ioctl_mem_alloc_ex]) 23 ioctl$KBASE_IOCTL_CS_CPU_QUEUE_DUMP(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_CPU_QUEUE_DUMP], arg ptr[in, kbase_ioctl_cs_cpu_queue_info]) 24 ioctl$KBASE_IOCTL_CS_GET_GLB_IFACE(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_GET_GLB_IFACE], arg ptr[inout, kbase_ioctl_cs_get_glb_iface]) 25 ioctl$KBASE_IOCTL_CS_TILER_HEAP_TERM(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_TILER_HEAP_TERM], arg ptr[in, kbase_ioctl_cs_tiler_heap_term]) 26 ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13], arg ptr[inout, kbase_ioctl_cs_tiler_heap_init_1_13]) 27 ioctl$KBASE_IOCTL_CS_TILER_HEAP_INIT(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_TILER_HEAP_INIT], arg ptr[inout, kbase_ioctl_cs_tiler_heap_init]) 28 ioctl$KBASE_IOCTL_KCPU_QUEUE_ENQUEUE(fd fd_bifrost, cmd const[KBASE_IOCTL_KCPU_QUEUE_ENQUEUE], arg ptr[in, kbase_ioctl_kcpu_queue_enqueue]) 29 ioctl$KBASE_IOCTL_KCPU_QUEUE_DELETE(fd fd_bifrost, cmd const[KBASE_IOCTL_KCPU_QUEUE_DELETE], arg ptr[in, kbase_ioctl_kcpu_queue_delete]) 30 ioctl$KBASE_IOCTL_KCPU_QUEUE_CREATE(fd fd_bifrost, cmd const[KBASE_IOCTL_KCPU_QUEUE_CREATE], arg ptr[out, kbase_ioctl_kcpu_queue_new]) 31 ioctl$KBASE_IOCTL_CS_EVENT_SIGNAL(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_EVENT_SIGNAL]) 32 ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE], arg ptr[in, kbase_ioctl_cs_queue_group_terminate]) 33 ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_QUEUE_GROUP_CREATE], arg ptr[inout, kbase_ioctl_cs_queue_group_create]) 34 ioctl$KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6], arg ptr[inout, kbase_ioctl_cs_queue_group_create_1_6]) 35 ioctl$KBASE_IOCTL_CS_QUEUE_TERMINATE(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_QUEUE_TERMINATE], arg ptr[in, kbase_ioctl_cs_queue_terminate]) 36 ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER_EX(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_QUEUE_REGISTER_EX], arg ptr[in, kbase_ioctl_cs_queue_register_ex]) 37 ioctl$KBASE_IOCTL_CS_QUEUE_BIND(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_QUEUE_BIND], arg ptr[inout, kbase_ioctl_cs_queue_bind]) 38 ioctl$KBASE_IOCTL_CS_QUEUE_KICK(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_QUEUE_KICK], arg ptr[in, kbase_ioctl_cs_queue_kick]) 39 ioctl$KBASE_IOCTL_CS_QUEUE_REGISTER(fd fd_bifrost, cmd const[KBASE_IOCTL_CS_QUEUE_REGISTER], arg ptr[in, base_ioctl_cs_queue_register]) 40 ioctl$KBASE_IOCTL_VERSION_CHECK_RESERVED(fd fd_bifrost, cmd const[KBASE_IOCTL_VERSION_CHECK_RESERVED], arg ptr[inout, kbase_ioctl_version_check]) 41 ioctl$KBASE_IOCTL_BUFFER_LIVENESS_UPDATE(fd fd_bifrost, cmd const[KBASE_IOCTL_BUFFER_LIVENESS_UPDATE], arg ptr[in, kbase_ioctl_buffer_liveness_update]) 42 ioctl$KBASE_IOCTL_KINSTR_PRFCNT_SETUP(fd fd_bifrost, cmd const[KBASE_IOCTL_KINSTR_PRFCNT_SETUP], arg ptr[inout, kbase_ioctl_kinstr_prfcnt_setup]) 43 ioctl$KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO(fd fd_bifrost, cmd const[KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO], arg ptr[inout, kbase_ioctl_kinstr_prfcnt_enum_info]) 44 ioctl$KBASE_IOCTL_SET_LIMITED_CORE_COUNT(fd fd_bifrost, cmd const[KBASE_IOCTL_SET_LIMITED_CORE_COUNT], arg ptr[in, kbase_ioctl_set_limited_core_count]) 45 ioctl$KBASE_IOCTL_CONTEXT_PRIORITY_CHECK(fd fd_bifrost, cmd const[KBASE_IOCTL_CONTEXT_PRIORITY_CHECK], arg ptr[inout, kbase_ioctl_context_priority_check]) 46 ioctl$KBASE_HWCNT_READER_GET_BUFFER_WITH_CYCLES(fd fd_hwcnt, cmd const[KBASE_HWCNT_READER_GET_BUFFER_WITH_CYCLES], arg ptr[out, kbase_hwcnt_reader_metadata_with_cycles]) 47 ioctl$KBASE_HWCNT_READER_PUT_BUFFER_WITH_CYCLES(fd fd_hwcnt, cmd const[KBASE_HWCNT_READER_PUT_BUFFER_WITH_CYCLES], arg ptr[in, kbase_hwcnt_reader_metadata_with_cycles]) 48 ioctl$KBASE_HWCNT_READER_GET_API_VERSION_WITH_FEATURES(fd fd_hwcnt, cmd const[KBASE_HWCNT_READER_GET_API_VERSION], arg ptr[out, kbase_hwcnt_reader_api_version]) 49 50 kbase_hwcnt_reader_metadata_cycles { 51 top int64 52 shader_cores int64 53 } 54 55 kbase_hwcnt_reader_metadata_with_cycles { 56 timestamp int64 57 event_id int32 58 buffer_idx int32 59 cycles kbase_hwcnt_reader_metadata_cycles 60 } 61 62 kbase_hwcnt_reader_api_version { 63 version int32 64 features int32 65 } 66 67 kbase_ioctl_read_user_page { 68 offset flags[user_offsets, int32] 69 padding const[0, int32] 70 val_lo int32 (out_overlay) 71 val_hi int32 72 } 73 74 kbase_ioctl_mem_alloc_ex { 75 va_pages int64 76 commit_pages int64 77 extension int64 78 flags flags[base_mem_alloc_flags, int64] 79 fixed_address int64 80 extra array[const[0, int64], 3] 81 out_flags int64 (out_overlay) 82 gpu_va gpu_addr 83 } 84 85 kbase_ioctl_cs_cpu_queue_info { 86 buffer ptr64[in, array[int8]] 87 size len[buffer, int64] 88 } 89 90 kbase_ioctl_cs_get_glb_iface { 91 max_group_num int32[0:MAX_SUPPORTED_CSGS] 92 max_total_stream_num int32[0:max_supported_streams] 93 groups_ptr ptr64[out, int64] 94 streams_ptr ptr64[out, int64] 95 glb_version int32 (out_overlay) 96 features int32 97 group_num int32 98 prfcnt_size int32 99 total_stream_num int32 100 instr_featurs int32 101 } 102 103 kbase_ioctl_cs_tiler_heap_term { 104 gpu_heap_va gpu_heap_va 105 } 106 107 kbase_ioctl_cs_tiler_heap_init_1_13 { 108 chunk_size flags[tiler_heap_chunk_sizes, int32] 109 initial_chunks int32 110 max_chunks int32 111 target_in_flight int16 112 group_id int8[0:BASE_MEM_GROUP_COUNT] 113 padding const[0, int8] 114 gpu_heap_va gpu_heap_va (out_overlay) 115 first_chunk_va int64 116 } 117 118 kbase_ioctl_cs_tiler_heap_init { 119 chunk_size flags[tiler_heap_chunk_sizes, int32] 120 initial_chunks int32 121 max_chunks int32 122 target_in_flight int16 123 group_id int8[0:BASE_MEM_GROUP_COUNT] 124 padding const[0, int8] 125 buf_desc_va int64 126 gpu_heap_va gpu_heap_va (out_overlay) 127 first_chunk_va int64 128 } 129 130 kbase_ioctl_kcpu_queue_new { 131 id kcpu_queue_id 132 pad array[const[0, int8], 7] 133 } 134 135 kbase_ioctl_kcpu_queue_delete { 136 id kcpu_queue_id 137 pad array[const[0, int8], 7] 138 } 139 140 kbase_ioctl_kcpu_queue_enqueue { 141 addr gpu_addr 142 nr_commands len[addr, int32] 143 id kcpu_queue_id 144 padding array[const[0, int8], 3] 145 } 146 147 kbase_ioctl_cs_queue_terminate { 148 buffer_gpu_addr gpu_addr 149 } 150 151 kbase_ioctl_cs_queue_group_terminate { 152 group_handle cs_queue_group_handle 153 padding array[const[0, int8], 7] 154 } 155 156 kbase_ioctl_cs_queue_group_create { 157 tiler_mask int64 158 fragment_mask int64 159 compute_mask int64 160 cs_min int8 161 priority flags[queue_group_priority, int8] 162 tiler_max int8 163 fragment_max int8 164 compute_max int8 165 csi_handlers flags[csf_csi_flags, int8] 166 padding array[const[0, int8], 2] 167 reserved int64 168 group_handle cs_queue_group_handle (out_overlay) 169 padding_out array[const[0, int8], 3] 170 group_uid int32 171 } 172 173 kbase_ioctl_cs_queue_group_create_1_6 { 174 tiler_mask int64 175 fragment_mask int64 176 compute_mask int64 177 cs_min int8 178 priority flags[queue_group_priority, int8] 179 tiler_max int8 180 fragment_max int8 181 compute_max int8 182 padding array[const[0, int8], 2] 183 reserved int64 184 group_handle cs_queue_group_handle (out_overlay) 185 padding_out array[const[0, int8], 3] 186 group_uid int32 187 } 188 189 kbase_ioctl_cs_queue_register_ex { 190 buffer_gpu_addr gpu_addr 191 buffer_size int32 192 priority int8[0:BASE_QUEUE_MAX_PRIORITY] 193 padding array[const[0, int8], 3] 194 ex_offset_var_addr gpu_addr 195 ex_buffer_base gpu_addr 196 ex_buffer_size int32 197 ex_event_size int8 198 ex_event_state int8 199 ex_padding array[const[0, int8], 2] 200 } 201 202 kbase_ioctl_cs_queue_bind { 203 buffer_gpu_addr gpu_addr 204 group_handle cs_queue_group_handle 205 csi_index int8 206 padding array[const[0, int8], 6] 207 mmap_handle int64 (out_overlay) 208 } 209 210 kbase_ioctl_cs_queue_kick { 211 buffer_gpu_addr gpu_addr 212 } 213 214 base_ioctl_cs_queue_register { 215 buffer_gpu_addr gpu_addr 216 buffer_size int32 217 priority int8[0:BASE_QUEUE_MAX_PRIORITY] 218 padding array[const[0, int8], 6] 219 } 220 221 kbase_ioctl_buffer_liveness_update { 222 live_ranges_address ptr64[in, array[kbase_pixel_gpu_slc_liveness_mark]] 223 live_ranges_count len[live_ranges_address, int64] 224 buffer_va_address ptr64[in, array[gpu_addr]] 225 buffer_sizes_address ptr64[in, array[int64]] 226 buffer_count len[buffer_va_address, int64] 227 } 228 229 kbase_pixel_gpu_slc_liveness_mark { 230 type int32 231 index int32 232 } 233 234 kbase_ioctl_kinstr_prfcnt_setup { 235 request_item_count len[requests_ptr, int32] 236 request_item_size int32 237 requests_ptr ptr64[in, array[prfcnt_request_item]] 238 prfcnt_metadata_item_size int32 (out_overlay) 239 prfcnt_mmap_size_bytes int32 240 } 241 242 prfcnt_request_item { 243 hdr prfcnt_request_item_header 244 u prfcnt_request_union 245 } 246 247 prfcnt_request_item_header { 248 item_type flags[prfcnt_request_item_type, int16] 249 item_version const[0, int16] 250 } 251 252 prfcnt_request_union [ 253 req_mode prfcnt_request_mode 254 req_enable prfcnt_request_enable 255 req_scope prfcnt_request_scope 256 ] [varlen] 257 258 prfcnt_request_mode { 259 mode flags[prfcnt_mode, int8] 260 pad array[const[0, int8], 7] 261 mode_config prfcnt_request_mode_union 262 } 263 264 prfcnt_request_mode_union [ 265 periodic periodic_config 266 ] 267 268 periodic_config { 269 period_ns int64 270 } 271 272 prfcnt_request_enable { 273 block_type flags[prfcnt_block_type, int8] 274 set flags[prfcnt_set, int8] 275 pad array[const[0, int8], 6] 276 enable_mask array[int64, 2] 277 } 278 279 prfcnt_request_scope { 280 scope flags[prfcnt_scope, int8] 281 pad array[const[0, int8], 7] 282 } 283 284 kbase_ioctl_kinstr_prfcnt_enum_info { 285 info_item_size len[info_list_ptr, int32] 286 info_item_count bytesize[info_list_ptr, int32] 287 info_list_ptr ptr[out, array[prfcnt_enum_item]] 288 } 289 290 prfcnt_enum_item { 291 hdr prfcnt_enum_item_header 292 u prfcnt_enum_union 293 } 294 295 prfcnt_enum_item_header { 296 item_type flags[prfcnt_request_item_type, int16] 297 item_version const[0, int16] 298 } 299 300 prfcnt_enum_union [ 301 block_counter prfcnt_enum_block_counter 302 request prfcnt_enum_request 303 sample_info prfcnt_enum_sample_info 304 ] [varlen] 305 306 prfcnt_enum_block_counter { 307 block_type flags[prfcnt_block_type, int8] 308 set flags[prfcnt_set, int8] 309 pad array[const[0, int8], 2] 310 num_instances int16 311 num_values int16 312 counter_mask array[int64, 2] 313 } 314 315 prfcnt_enum_request { 316 request_item_type flags[prfcnt_request_enum_type, int16] 317 pad const[0, int16] 318 versions_mask const[0, int32] 319 } 320 321 prfcnt_enum_sample_info { 322 num_clock_domains int32 323 pad const[0, int32] 324 } 325 326 kbase_ioctl_set_limited_core_count { 327 max_core_count int8 328 } 329 330 kbase_ioctl_context_priority_check { 331 priority flags[queue_group_priority, int8] 332 } 333 334 csf_csi_flags = BASE_CSF_TILER_OOM_EXCEPTION_FLAG 335 user_offsets = LATEST_FLUSH 336 prfcnt_request_enum_type = PRFCNT_ENUM_TYPE_BLOCK, PRFCNT_ENUM_TYPE_REQUEST, PRFCNT_ENUM_TYPE_SAMPLE_INFO 337 prfcnt_request_item_type = PRFCNT_REQUEST_TYPE_MODE, PRFCNT_REQUEST_TYPE_ENABLE, PRFCNT_REQUEST_TYPE_SCOPE 338 prfcnt_scope = PRFCNT_SCOPE_GLOBAL, PRFCNT_SCOPE_RESERVED 339 prfcnt_set = PRFCNT_SET_PRIMARY, PRFCNT_SET_SECONDARY, PRFCNT_SET_TERTIARY, PRFCNT_SET_RESERVED 340 prfcnt_block_type = PRFCNT_BLOCK_TYPE_FE, PRFCNT_BLOCK_TYPE_TILER, PRFCNT_BLOCK_TYPE_MEMORY, PRFCNT_BLOCK_TYPE_SHADER_CORE, PRFCNT_BLOCK_TYPE_RESERVED 341 prfcnt_mode = PRFCNT_MODE_MANUAL, PRFCNT_MODE_PERIODIC, PRFCNT_MODE_RESERVED 342 tiler_heap_chunk_sizes = 2048, 4096 343 queue_group_priority = KBASE_QUEUE_GROUP_PRIORITY_REALTIME, KBASE_QUEUE_GROUP_PRIORITY_HIGH, KBASE_QUEUE_GROUP_PRIORITY_MEDIUM, KBASE_QUEUE_GROUP_PRIORITY_LOW, KBASE_QUEUE_GROUP_PRIORITY_COUNT