1 #ifndef _MSM_KGSL_H 2 #define _MSM_KGSL_H 3 4 /* 5 * The KGSL version has proven not to be very useful in userspace if features 6 * are cherry picked into other trees out of order so it is frozen as of 3.14. 7 * It is left here for backwards compatabilty and as a reminder that 8 * software releases are never linear. Also, I like pie. 9 */ 10 11 #define KGSL_VERSION_MAJOR 3 12 #define KGSL_VERSION_MINOR 14 13 14 /*context flags */ 15 #define KGSL_CONTEXT_SAVE_GMEM 0x00000001 16 #define KGSL_CONTEXT_NO_GMEM_ALLOC 0x00000002 17 #define KGSL_CONTEXT_SUBMIT_IB_LIST 0x00000004 18 #define KGSL_CONTEXT_CTX_SWITCH 0x00000008 19 #define KGSL_CONTEXT_PREAMBLE 0x00000010 20 #define KGSL_CONTEXT_TRASH_STATE 0x00000020 21 #define KGSL_CONTEXT_PER_CONTEXT_TS 0x00000040 22 #define KGSL_CONTEXT_USER_GENERATED_TS 0x00000080 23 #define KGSL_CONTEXT_END_OF_FRAME 0x00000100 24 25 #define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200 26 #define KGSL_CONTEXT_SYNC 0x00000400 27 /* bits [12:15] are reserved for future use */ 28 #define KGSL_CONTEXT_TYPE_MASK 0x01F00000 29 #define KGSL_CONTEXT_TYPE_SHIFT 20 30 31 #define KGSL_CONTEXT_TYPE_ANY 0 32 #define KGSL_CONTEXT_TYPE_GL 1 33 #define KGSL_CONTEXT_TYPE_CL 2 34 #define KGSL_CONTEXT_TYPE_C2D 3 35 #define KGSL_CONTEXT_TYPE_RS 4 36 37 #define KGSL_CONTEXT_INVALID 0xffffffff 38 39 /* --- Memory allocation flags --- */ 40 41 /* General allocation hints */ 42 #define KGSL_MEMFLAGS_GPUREADONLY 0x01000000 43 #define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000 44 45 /* Memory caching hints */ 46 #define KGSL_CACHEMODE_MASK 0x0C000000 47 #define KGSL_CACHEMODE_SHIFT 26 48 49 #define KGSL_CACHEMODE_WRITECOMBINE 0 50 #define KGSL_CACHEMODE_UNCACHED 1 51 #define KGSL_CACHEMODE_WRITETHROUGH 2 52 #define KGSL_CACHEMODE_WRITEBACK 3 53 54 /* Memory types for which allocations are made */ 55 #define KGSL_MEMTYPE_MASK 0x0000FF00 56 #define KGSL_MEMTYPE_SHIFT 8 57 58 #define KGSL_MEMTYPE_OBJECTANY 0 59 #define KGSL_MEMTYPE_FRAMEBUFFER 1 60 #define KGSL_MEMTYPE_RENDERBUFFER 2 61 #define KGSL_MEMTYPE_ARRAYBUFFER 3 62 #define KGSL_MEMTYPE_ELEMENTARRAYBUFFER 4 63 #define KGSL_MEMTYPE_VERTEXARRAYBUFFER 5 64 #define KGSL_MEMTYPE_TEXTURE 6 65 #define KGSL_MEMTYPE_SURFACE 7 66 #define KGSL_MEMTYPE_EGL_SURFACE 8 67 #define KGSL_MEMTYPE_GL 9 68 #define KGSL_MEMTYPE_CL 10 69 #define KGSL_MEMTYPE_CL_BUFFER_MAP 11 70 #define KGSL_MEMTYPE_CL_BUFFER_NOMAP 12 71 #define KGSL_MEMTYPE_CL_IMAGE_MAP 13 72 #define KGSL_MEMTYPE_CL_IMAGE_NOMAP 14 73 #define KGSL_MEMTYPE_CL_KERNEL_STACK 15 74 #define KGSL_MEMTYPE_COMMAND 16 75 #define KGSL_MEMTYPE_2D 17 76 #define KGSL_MEMTYPE_EGL_IMAGE 18 77 #define KGSL_MEMTYPE_EGL_SHADOW 19 78 #define KGSL_MEMTYPE_MULTISAMPLE 20 79 #define KGSL_MEMTYPE_KERNEL 255 80 81 /* 82 * Alignment hint, passed as the power of 2 exponent. 83 * i.e 4k (2^12) would be 12, 64k (2^16)would be 16. 84 */ 85 #define KGSL_MEMALIGN_MASK 0x00FF0000 86 #define KGSL_MEMALIGN_SHIFT 16 87 88 /* --- generic KGSL flag values --- */ 89 90 #define KGSL_FLAGS_NORMALMODE 0x00000000 91 #define KGSL_FLAGS_SAFEMODE 0x00000001 92 #define KGSL_FLAGS_INITIALIZED0 0x00000002 93 #define KGSL_FLAGS_INITIALIZED 0x00000004 94 #define KGSL_FLAGS_STARTED 0x00000008 95 #define KGSL_FLAGS_ACTIVE 0x00000010 96 #define KGSL_FLAGS_RESERVED0 0x00000020 97 #define KGSL_FLAGS_RESERVED1 0x00000040 98 #define KGSL_FLAGS_RESERVED2 0x00000080 99 #define KGSL_FLAGS_SOFT_RESET 0x00000100 100 #define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200 101 102 /* Clock flags to show which clocks should be controled by a given platform */ 103 #define KGSL_CLK_SRC 0x00000001 104 #define KGSL_CLK_CORE 0x00000002 105 #define KGSL_CLK_IFACE 0x00000004 106 #define KGSL_CLK_MEM 0x00000008 107 #define KGSL_CLK_MEM_IFACE 0x00000010 108 #define KGSL_CLK_AXI 0x00000020 109 110 /* Server Side Sync Timeout in milliseconds */ 111 #define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000 112 113 /* 114 * Reset status values for context 115 */ 116 enum kgsl_ctx_reset_stat { 117 KGSL_CTX_STAT_NO_ERROR = 0x00000000, 118 KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT = 0x00000001, 119 KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT = 0x00000002, 120 KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT = 0x00000003 121 }; 122 123 #define KGSL_CONVERT_TO_MBPS(val) \ 124 (val*1000*1000U) 125 126 /* device id */ 127 enum kgsl_deviceid { 128 KGSL_DEVICE_3D0 = 0x00000000, 129 KGSL_DEVICE_2D0 = 0x00000001, 130 KGSL_DEVICE_2D1 = 0x00000002, 131 KGSL_DEVICE_MAX = 0x00000003 132 }; 133 134 enum kgsl_user_mem_type { 135 KGSL_USER_MEM_TYPE_PMEM = 0x00000000, 136 KGSL_USER_MEM_TYPE_ASHMEM = 0x00000001, 137 KGSL_USER_MEM_TYPE_ADDR = 0x00000002, 138 KGSL_USER_MEM_TYPE_ION = 0x00000003, 139 KGSL_USER_MEM_TYPE_MAX = 0x00000004, 140 }; 141 142 struct kgsl_devinfo { 143 144 unsigned int device_id; 145 /* chip revision id 146 * coreid:8 majorrev:8 minorrev:8 patch:8 147 */ 148 unsigned int chip_id; 149 unsigned int mmu_enabled; 150 unsigned int gmem_gpubaseaddr; 151 /* 152 * This field contains the adreno revision 153 * number 200, 205, 220, etc... 154 */ 155 unsigned int gpu_id; 156 unsigned int gmem_sizebytes; 157 }; 158 159 /* this structure defines the region of memory that can be mmap()ed from this 160 driver. The timestamp fields are volatile because they are written by the 161 GPU 162 */ 163 struct kgsl_devmemstore { 164 volatile unsigned int soptimestamp; 165 unsigned int sbz; 166 volatile unsigned int eoptimestamp; 167 unsigned int sbz2; 168 volatile unsigned int ts_cmp_enable; 169 unsigned int sbz3; 170 volatile unsigned int ref_wait_ts; 171 unsigned int sbz4; 172 unsigned int current_context; 173 unsigned int sbz5; 174 }; 175 176 #define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \ 177 ((ctxt_id)*sizeof(struct kgsl_devmemstore) + \ 178 offsetof(struct kgsl_devmemstore, field)) 179 180 /* timestamp id*/ 181 enum kgsl_timestamp_type { 182 KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */ 183 KGSL_TIMESTAMP_RETIRED = 0x00000002, /* end-of-pipeline timestamp*/ 184 KGSL_TIMESTAMP_QUEUED = 0x00000003, 185 }; 186 187 /* property types - used with kgsl_device_getproperty */ 188 enum kgsl_property_type { 189 KGSL_PROP_DEVICE_INFO = 0x00000001, 190 KGSL_PROP_DEVICE_SHADOW = 0x00000002, 191 KGSL_PROP_DEVICE_POWER = 0x00000003, 192 KGSL_PROP_SHMEM = 0x00000004, 193 KGSL_PROP_SHMEM_APERTURES = 0x00000005, 194 KGSL_PROP_MMU_ENABLE = 0x00000006, 195 KGSL_PROP_INTERRUPT_WAITS = 0x00000007, 196 KGSL_PROP_VERSION = 0x00000008, 197 KGSL_PROP_GPU_RESET_STAT = 0x00000009, 198 KGSL_PROP_PWRCTRL = 0x0000000E, 199 }; 200 201 struct kgsl_shadowprop { 202 unsigned int gpuaddr; 203 unsigned int size; 204 unsigned int flags; /* contains KGSL_FLAGS_ values */ 205 }; 206 207 struct kgsl_version { 208 unsigned int drv_major; 209 unsigned int drv_minor; 210 unsigned int dev_major; 211 unsigned int dev_minor; 212 }; 213 214 /* Performance counter groups */ 215 216 #define KGSL_PERFCOUNTER_GROUP_CP 0x0 217 #define KGSL_PERFCOUNTER_GROUP_RBBM 0x1 218 #define KGSL_PERFCOUNTER_GROUP_PC 0x2 219 #define KGSL_PERFCOUNTER_GROUP_VFD 0x3 220 #define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4 221 #define KGSL_PERFCOUNTER_GROUP_VPC 0x5 222 #define KGSL_PERFCOUNTER_GROUP_TSE 0x6 223 #define KGSL_PERFCOUNTER_GROUP_RAS 0x7 224 #define KGSL_PERFCOUNTER_GROUP_UCHE 0x8 225 #define KGSL_PERFCOUNTER_GROUP_TP 0x9 226 #define KGSL_PERFCOUNTER_GROUP_SP 0xA 227 #define KGSL_PERFCOUNTER_GROUP_RB 0xB 228 #define KGSL_PERFCOUNTER_GROUP_PWR 0xC 229 #define KGSL_PERFCOUNTER_GROUP_VBIF 0xD 230 #define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE 231 232 #define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF 233 234 /* structure holds list of ibs */ 235 struct kgsl_ibdesc { 236 unsigned int gpuaddr; 237 void *hostptr; 238 unsigned int sizedwords; 239 unsigned int ctrl; 240 }; 241 242 /* ioctls */ 243 #define KGSL_IOC_TYPE 0x09 244 245 /* get misc info about the GPU 246 type should be a value from enum kgsl_property_type 247 value points to a structure that varies based on type 248 sizebytes is sizeof() that structure 249 for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo 250 this structure contaings hardware versioning info. 251 for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop 252 this is used to find mmap() offset and sizes for mapping 253 struct kgsl_memstore into userspace. 254 */ 255 struct kgsl_device_getproperty { 256 unsigned int type; 257 void *value; 258 unsigned int sizebytes; 259 }; 260 261 #define IOCTL_KGSL_DEVICE_GETPROPERTY \ 262 _IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty) 263 264 /* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012 265 */ 266 267 /* block until the GPU has executed past a given timestamp 268 * timeout is in milliseconds. 269 */ 270 struct kgsl_device_waittimestamp { 271 unsigned int timestamp; 272 unsigned int timeout; 273 }; 274 275 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \ 276 _IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp) 277 278 struct kgsl_device_waittimestamp_ctxtid { 279 unsigned int context_id; 280 unsigned int timestamp; 281 unsigned int timeout; 282 }; 283 284 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \ 285 _IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid) 286 287 /* DEPRECATED: issue indirect commands to the GPU. 288 * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE 289 * ibaddr and sizedwords must specify a subset of a buffer created 290 * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM 291 * flags may be a mask of KGSL_CONTEXT_ values 292 * timestamp is a returned counter value which can be passed to 293 * other ioctls to determine when the commands have been executed by 294 * the GPU. 295 * 296 * This fucntion is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS 297 * instead 298 */ 299 struct kgsl_ringbuffer_issueibcmds { 300 unsigned int drawctxt_id; 301 unsigned int ibdesc_addr; 302 unsigned int numibs; 303 unsigned int timestamp; /*output param */ 304 unsigned int flags; 305 }; 306 307 #define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \ 308 _IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds) 309 310 /* read the most recently executed timestamp value 311 * type should be a value from enum kgsl_timestamp_type 312 */ 313 struct kgsl_cmdstream_readtimestamp { 314 unsigned int type; 315 unsigned int timestamp; /*output param */ 316 }; 317 318 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \ 319 _IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp) 320 321 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \ 322 _IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp) 323 324 /* free memory when the GPU reaches a given timestamp. 325 * gpuaddr specify a memory region created by a 326 * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call 327 * type should be a value from enum kgsl_timestamp_type 328 */ 329 struct kgsl_cmdstream_freememontimestamp { 330 unsigned int gpuaddr; 331 unsigned int type; 332 unsigned int timestamp; 333 }; 334 335 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \ 336 _IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp) 337 338 /* Previous versions of this header had incorrectly defined 339 IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead 340 of a write only ioctl. To ensure binary compatability, the following 341 #define will be used to intercept the incorrect ioctl 342 */ 343 344 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \ 345 _IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp) 346 347 /* create a draw context, which is used to preserve GPU state. 348 * The flags field may contain a mask KGSL_CONTEXT_* values 349 */ 350 struct kgsl_drawctxt_create { 351 unsigned int flags; 352 unsigned int drawctxt_id; /*output param */ 353 }; 354 355 #define IOCTL_KGSL_DRAWCTXT_CREATE \ 356 _IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create) 357 358 /* destroy a draw context */ 359 struct kgsl_drawctxt_destroy { 360 unsigned int drawctxt_id; 361 }; 362 363 #define IOCTL_KGSL_DRAWCTXT_DESTROY \ 364 _IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy) 365 366 /* add a block of pmem, fb, ashmem or user allocated address 367 * into the GPU address space */ 368 struct kgsl_map_user_mem { 369 int fd; 370 unsigned int gpuaddr; /*output param */ 371 unsigned int len; 372 unsigned int offset; 373 unsigned int hostptr; /*input param */ 374 enum kgsl_user_mem_type memtype; 375 unsigned int flags; 376 }; 377 378 #define IOCTL_KGSL_MAP_USER_MEM \ 379 _IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem) 380 381 struct kgsl_cmdstream_readtimestamp_ctxtid { 382 unsigned int context_id; 383 unsigned int type; 384 unsigned int timestamp; /*output param */ 385 }; 386 387 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \ 388 _IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid) 389 390 struct kgsl_cmdstream_freememontimestamp_ctxtid { 391 unsigned int context_id; 392 unsigned int gpuaddr; 393 unsigned int type; 394 unsigned int timestamp; 395 }; 396 397 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \ 398 _IOW(KGSL_IOC_TYPE, 0x17, \ 399 struct kgsl_cmdstream_freememontimestamp_ctxtid) 400 401 /* add a block of pmem or fb into the GPU address space */ 402 struct kgsl_sharedmem_from_pmem { 403 int pmem_fd; 404 unsigned int gpuaddr; /*output param */ 405 unsigned int len; 406 unsigned int offset; 407 }; 408 409 #define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \ 410 _IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem) 411 412 /* remove memory from the GPU's address space */ 413 struct kgsl_sharedmem_free { 414 unsigned int gpuaddr; 415 }; 416 417 #define IOCTL_KGSL_SHAREDMEM_FREE \ 418 _IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free) 419 420 struct kgsl_cff_user_event { 421 unsigned char cff_opcode; 422 unsigned int op1; 423 unsigned int op2; 424 unsigned int op3; 425 unsigned int op4; 426 unsigned int op5; 427 unsigned int __pad[2]; 428 }; 429 430 #define IOCTL_KGSL_CFF_USER_EVENT \ 431 _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event) 432 433 struct kgsl_gmem_desc { 434 unsigned int x; 435 unsigned int y; 436 unsigned int width; 437 unsigned int height; 438 unsigned int pitch; 439 }; 440 441 struct kgsl_buffer_desc { 442 void *hostptr; 443 unsigned int gpuaddr; 444 int size; 445 unsigned int format; 446 unsigned int pitch; 447 unsigned int enabled; 448 }; 449 450 struct kgsl_bind_gmem_shadow { 451 unsigned int drawctxt_id; 452 struct kgsl_gmem_desc gmem_desc; 453 unsigned int shadow_x; 454 unsigned int shadow_y; 455 struct kgsl_buffer_desc shadow_buffer; 456 unsigned int buffer_id; 457 }; 458 459 #define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \ 460 _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow) 461 462 /* add a block of memory into the GPU address space */ 463 464 /* 465 * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012 466 * use IOCTL_KGSL_GPUMEM_ALLOC instead 467 */ 468 469 struct kgsl_sharedmem_from_vmalloc { 470 unsigned int gpuaddr; /*output param */ 471 unsigned int hostptr; 472 unsigned int flags; 473 }; 474 475 #define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \ 476 _IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc) 477 478 /* 479 * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which 480 * supports both directions (flush and invalidate). This code will still 481 * work, but by definition it will do a flush of the cache which might not be 482 * what you want to have happen on a buffer following a GPU operation. It is 483 * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC 484 */ 485 486 #define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \ 487 _IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free) 488 489 struct kgsl_drawctxt_set_bin_base_offset { 490 unsigned int drawctxt_id; 491 unsigned int offset; 492 }; 493 494 #define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \ 495 _IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset) 496 497 enum kgsl_cmdwindow_type { 498 KGSL_CMDWINDOW_MIN = 0x00000000, 499 KGSL_CMDWINDOW_2D = 0x00000000, 500 KGSL_CMDWINDOW_3D = 0x00000001, /* legacy */ 501 KGSL_CMDWINDOW_MMU = 0x00000002, 502 KGSL_CMDWINDOW_ARBITER = 0x000000FF, 503 KGSL_CMDWINDOW_MAX = 0x000000FF, 504 }; 505 506 /* write to the command window */ 507 struct kgsl_cmdwindow_write { 508 enum kgsl_cmdwindow_type target; 509 unsigned int addr; 510 unsigned int data; 511 }; 512 513 #define IOCTL_KGSL_CMDWINDOW_WRITE \ 514 _IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write) 515 516 struct kgsl_gpumem_alloc { 517 unsigned long gpuaddr; 518 size_t size; 519 unsigned int flags; 520 }; 521 522 #define IOCTL_KGSL_GPUMEM_ALLOC \ 523 _IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc) 524 525 struct kgsl_cff_syncmem { 526 unsigned int gpuaddr; 527 unsigned int len; 528 unsigned int __pad[2]; /* For future binary compatibility */ 529 }; 530 531 #define IOCTL_KGSL_CFF_SYNCMEM \ 532 _IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem) 533 534 /* 535 * A timestamp event allows the user space to register an action following an 536 * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to 537 * _IOWR to support fences which need to return a fd for the priv parameter. 538 */ 539 540 struct kgsl_timestamp_event { 541 int type; /* Type of event (see list below) */ 542 unsigned int timestamp; /* Timestamp to trigger event on */ 543 unsigned int context_id; /* Context for the timestamp */ 544 void *priv; /* Pointer to the event specific blob */ 545 size_t len; /* Size of the event specific blob */ 546 }; 547 548 #define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \ 549 _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event) 550 551 /* A genlock timestamp event releases an existing lock on timestamp expire */ 552 553 #define KGSL_TIMESTAMP_EVENT_GENLOCK 1 554 555 struct kgsl_timestamp_event_genlock { 556 int handle; /* Handle of the genlock lock to release */ 557 }; 558 559 /* A fence timestamp event releases an existing lock on timestamp expire */ 560 561 #define KGSL_TIMESTAMP_EVENT_FENCE 2 562 563 struct kgsl_timestamp_event_fence { 564 int fence_fd; /* Fence to signal */ 565 }; 566 567 /* 568 * Set a property within the kernel. Uses the same structure as 569 * IOCTL_KGSL_GETPROPERTY 570 */ 571 572 #define IOCTL_KGSL_SETPROPERTY \ 573 _IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty) 574 575 #define IOCTL_KGSL_TIMESTAMP_EVENT \ 576 _IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event) 577 578 /** 579 * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID 580 * @id: returned id value for this allocation. 581 * @flags: mask of KGSL_MEM* values requested and actual flags on return. 582 * @size: requested size of the allocation and actual size on return. 583 * @mmapsize: returned size to pass to mmap() which may be larger than 'size' 584 * @gpuaddr: returned GPU address for the allocation 585 * 586 * Allocate memory for access by the GPU. The flags and size fields are echoed 587 * back by the kernel, so that the caller can know if the request was 588 * adjusted. 589 * 590 * Supported flags: 591 * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer 592 * KGSL_MEMTYPE*: usage hint for debugging aid 593 * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel. 594 * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU 595 * address will be 0. Calling mmap() will set the GPU address. 596 */ 597 struct kgsl_gpumem_alloc_id { 598 unsigned int id; 599 unsigned int flags; 600 unsigned int size; 601 unsigned int mmapsize; 602 unsigned long gpuaddr; 603 /* private: reserved for future use*/ 604 unsigned int __pad[2]; 605 }; 606 607 #define IOCTL_KGSL_GPUMEM_ALLOC_ID \ 608 _IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id) 609 610 /** 611 * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID 612 * @id: GPU allocation id to free 613 * 614 * Free an allocation by id, in case a GPU address has not been assigned or 615 * is unknown. Freeing an allocation by id with this ioctl or by GPU address 616 * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent. 617 */ 618 struct kgsl_gpumem_free_id { 619 unsigned int id; 620 /* private: reserved for future use*/ 621 unsigned int __pad; 622 }; 623 624 #define IOCTL_KGSL_GPUMEM_FREE_ID \ 625 _IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id) 626 627 /** 628 * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO 629 * @gpuaddr: GPU address to query. Also set on return. 630 * @id: GPU allocation id to query. Also set on return. 631 * @flags: returned mask of KGSL_MEM* values. 632 * @size: returned size of the allocation. 633 * @mmapsize: returned size to pass mmap(), which may be larger than 'size' 634 * @useraddr: returned address of the userspace mapping for this buffer 635 * 636 * This ioctl allows querying of all user visible attributes of an existing 637 * allocation, by either the GPU address or the id returned by a previous 638 * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not 639 * return all attributes so this ioctl can be used to look them up if needed. 640 * 641 */ 642 struct kgsl_gpumem_get_info { 643 unsigned long gpuaddr; 644 unsigned int id; 645 unsigned int flags; 646 unsigned int size; 647 unsigned int mmapsize; 648 unsigned long useraddr; 649 /* private: reserved for future use*/ 650 unsigned int __pad[4]; 651 }; 652 653 #define IOCTL_KGSL_GPUMEM_GET_INFO\ 654 _IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info) 655 656 /** 657 * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE 658 * @gpuaddr: GPU address of the buffer to sync. 659 * @id: id of the buffer to sync. Either gpuaddr or id is sufficient. 660 * @op: a mask of KGSL_GPUMEM_CACHE_* values 661 * 662 * Sync the L2 cache for memory headed to and from the GPU - this replaces 663 * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both 664 * directions 665 * 666 */ 667 struct kgsl_gpumem_sync_cache { 668 unsigned int gpuaddr; 669 unsigned int id; 670 unsigned int op; 671 /* private: reserved for future use*/ 672 unsigned int __pad[2]; /* For future binary compatibility */ 673 }; 674 675 #define KGSL_GPUMEM_CACHE_CLEAN (1 << 0) 676 #define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN 677 678 #define KGSL_GPUMEM_CACHE_INV (1 << 1) 679 #define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV 680 681 #define KGSL_GPUMEM_CACHE_FLUSH \ 682 (KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV) 683 684 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE \ 685 _IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache) 686 687 /** 688 * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET 689 * @groupid: Performance counter group ID 690 * @countable: Countable to select within the group 691 * @offset: Return offset of the reserved counter 692 * 693 * Get an available performance counter from a specified groupid. The offset 694 * of the performance counter will be returned after successfully assigning 695 * the countable to the counter for the specified group. An error will be 696 * returned and an offset of 0 if the groupid is invalid or there are no 697 * more counters left. After successfully getting a perfcounter, the user 698 * must call kgsl_perfcounter_put(groupid, contable) when finished with 699 * the perfcounter to clear up perfcounter resources. 700 * 701 */ 702 struct kgsl_perfcounter_get { 703 unsigned int groupid; 704 unsigned int countable; 705 unsigned int offset; 706 /* private: reserved for future use */ 707 unsigned int __pad[2]; /* For future binary compatibility */ 708 }; 709 710 #define IOCTL_KGSL_PERFCOUNTER_GET \ 711 _IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get) 712 713 /** 714 * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT 715 * @groupid: Performance counter group ID 716 * @countable: Countable to release within the group 717 * 718 * Put an allocated performance counter to allow others to have access to the 719 * resource that was previously taken. This is only to be called after 720 * successfully getting a performance counter from kgsl_perfcounter_get(). 721 * 722 */ 723 struct kgsl_perfcounter_put { 724 unsigned int groupid; 725 unsigned int countable; 726 /* private: reserved for future use */ 727 unsigned int __pad[2]; /* For future binary compatibility */ 728 }; 729 730 #define IOCTL_KGSL_PERFCOUNTER_PUT \ 731 _IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put) 732 733 /** 734 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY 735 * @groupid: Performance counter group ID 736 * @countable: Return active countables array 737 * @size: Size of active countables array 738 * @max_counters: Return total number counters for the group ID 739 * 740 * Query the available performance counters given a groupid. The array 741 * *countables is used to return the current active countables in counters. 742 * The size of the array is passed in so the kernel will only write at most 743 * size or counter->size for the group id. The total number of available 744 * counters for the group ID is returned in max_counters. 745 * If the array or size passed in are invalid, then only the maximum number 746 * of counters will be returned, no data will be written to *countables. 747 * If the groupid is invalid an error code will be returned. 748 * 749 */ 750 struct kgsl_perfcounter_query { 751 unsigned int groupid; 752 /* Array to return the current countable for up to size counters */ 753 unsigned int *countables; 754 unsigned int count; 755 unsigned int max_counters; 756 /* private: reserved for future use */ 757 unsigned int __pad[2]; /* For future binary compatibility */ 758 }; 759 760 #define IOCTL_KGSL_PERFCOUNTER_QUERY \ 761 _IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query) 762 763 /** 764 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY 765 * @groupid: Performance counter group IDs 766 * @countable: Performance counter countable IDs 767 * @value: Return performance counter reads 768 * @size: Size of all arrays (groupid/countable pair and return value) 769 * 770 * Read in the current value of a performance counter given by the groupid 771 * and countable. 772 * 773 */ 774 775 struct kgsl_perfcounter_read_group { 776 unsigned int groupid; 777 unsigned int countable; 778 unsigned long long value; 779 }; 780 781 struct kgsl_perfcounter_read { 782 struct kgsl_perfcounter_read_group *reads; 783 unsigned int count; 784 /* private: reserved for future use */ 785 unsigned int __pad[2]; /* For future binary compatibility */ 786 }; 787 788 #define IOCTL_KGSL_PERFCOUNTER_READ \ 789 _IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read) 790 /* 791 * struct kgsl_gpumem_sync_cache_bulk - argument to 792 * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK 793 * @id_list: list of GPU buffer ids of the buffers to sync 794 * @count: number of GPU buffer ids in id_list 795 * @op: a mask of KGSL_GPUMEM_CACHE_* values 796 * 797 * Sync the cache for memory headed to and from the GPU. Certain 798 * optimizations can be made on the cache operation based on the total 799 * size of the working set of memory to be managed. 800 */ 801 struct kgsl_gpumem_sync_cache_bulk { 802 unsigned int *id_list; 803 unsigned int count; 804 unsigned int op; 805 /* private: reserved for future use */ 806 unsigned int __pad[2]; /* For future binary compatibility */ 807 }; 808 809 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \ 810 _IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk) 811 812 /* 813 * struct kgsl_cmd_syncpoint_timestamp 814 * @context_id: ID of a KGSL context 815 * @timestamp: GPU timestamp 816 * 817 * This structure defines a syncpoint comprising a context/timestamp pair. A 818 * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define 819 * dependencies that must be met before the command can be submitted to the 820 * hardware 821 */ 822 struct kgsl_cmd_syncpoint_timestamp { 823 unsigned int context_id; 824 unsigned int timestamp; 825 }; 826 827 #define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0 828 829 struct kgsl_cmd_syncpoint_fence { 830 int fd; 831 }; 832 833 #define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1 834 835 /** 836 * struct kgsl_cmd_syncpoint - Define a sync point for a command batch 837 * @type: type of sync point defined here 838 * @priv: Pointer to the type specific buffer 839 * @size: Size of the type specific buffer 840 * 841 * This structure contains pointers defining a specific command sync point. 842 * The pointer and size should point to a type appropriate structure. 843 */ 844 struct kgsl_cmd_syncpoint { 845 int type; 846 void __user *priv; 847 unsigned int size; 848 }; 849 850 /** 851 * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS 852 * @context_id: KGSL context ID that owns the commands 853 * @flags: 854 * @cmdlist: User pointer to a list of kgsl_ibdesc structures 855 * @numcmds: Number of commands listed in cmdlist 856 * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures 857 * @numsyncs: Number of sync points listed in synclist 858 * @timestamp: On entry the a user defined timestamp, on exist the timestamp 859 * assigned to the command batch 860 * 861 * This structure specifies a command to send to the GPU hardware. This is 862 * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to 863 * submit IB lists and it adds sync points to block the IB until the 864 * dependencies are satisified. This entry point is the new and preferred way 865 * to submit commands to the GPU. 866 */ 867 868 struct kgsl_submit_commands { 869 unsigned int context_id; 870 unsigned int flags; 871 struct kgsl_ibdesc __user *cmdlist; 872 unsigned int numcmds; 873 struct kgsl_cmd_syncpoint __user *synclist; 874 unsigned int numsyncs; 875 unsigned int timestamp; 876 /* private: reserved for future use */ 877 unsigned int __pad[4]; 878 }; 879 880 #define IOCTL_KGSL_SUBMIT_COMMANDS \ 881 _IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands) 882 883 #ifdef __KERNEL__ 884 #ifdef CONFIG_MSM_KGSL_DRM 885 int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start, 886 unsigned long *len); 887 #else 888 #define kgsl_gem_obj_addr(...) 0 889 #endif 890 #endif 891 #endif /* _MSM_KGSL_H */ 892