Home | History | Annotate | Download | only in linux
      1 #ifndef _MSM_KGSL_H
      2 #define _MSM_KGSL_H
      3 
      4 /*
      5  * The KGSL version has proven not to be very useful in userspace if features
      6  * are cherry picked into other trees out of order so it is frozen as of 3.14.
      7  * It is left here for backwards compatabilty and as a reminder that
      8  * software releases are never linear. Also, I like pie.
      9  */
     10 
     11 #define KGSL_VERSION_MAJOR        3
     12 #define KGSL_VERSION_MINOR        14
     13 
     14 /*context flags */
     15 #define KGSL_CONTEXT_SAVE_GMEM		0x00000001
     16 #define KGSL_CONTEXT_NO_GMEM_ALLOC	0x00000002
     17 #define KGSL_CONTEXT_SUBMIT_IB_LIST	0x00000004
     18 #define KGSL_CONTEXT_CTX_SWITCH		0x00000008
     19 #define KGSL_CONTEXT_PREAMBLE		0x00000010
     20 #define KGSL_CONTEXT_TRASH_STATE	0x00000020
     21 #define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
     22 #define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
     23 #define KGSL_CONTEXT_END_OF_FRAME	0x00000100
     24 
     25 #define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
     26 #define KGSL_CONTEXT_SYNC               0x00000400
     27 #define KGSL_CONTEXT_PWR_CONSTRAINT     0x00000800
     28 /* bits [12:15] are reserved for future use */
     29 #define KGSL_CONTEXT_TYPE_MASK          0x01F00000
     30 #define KGSL_CONTEXT_TYPE_SHIFT         20
     31 
     32 #define KGSL_CONTEXT_TYPE_ANY		0
     33 #define KGSL_CONTEXT_TYPE_GL		1
     34 #define KGSL_CONTEXT_TYPE_CL		2
     35 #define KGSL_CONTEXT_TYPE_C2D		3
     36 #define KGSL_CONTEXT_TYPE_RS		4
     37 
     38 #define KGSL_CONTEXT_INVALID 0xffffffff
     39 
     40 /* --- Memory allocation flags --- */
     41 
     42 /* General allocation hints */
     43 #define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
     44 #define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000
     45 
     46 /* Memory caching hints */
     47 #define KGSL_CACHEMODE_MASK 0x0C000000
     48 #define KGSL_CACHEMODE_SHIFT 26
     49 
     50 #define KGSL_CACHEMODE_WRITECOMBINE 0
     51 #define KGSL_CACHEMODE_UNCACHED 1
     52 #define KGSL_CACHEMODE_WRITETHROUGH 2
     53 #define KGSL_CACHEMODE_WRITEBACK 3
     54 
     55 /* Memory types for which allocations are made */
     56 #define KGSL_MEMTYPE_MASK		0x0000FF00
     57 #define KGSL_MEMTYPE_SHIFT		8
     58 
     59 #define KGSL_MEMTYPE_OBJECTANY			0
     60 #define KGSL_MEMTYPE_FRAMEBUFFER		1
     61 #define KGSL_MEMTYPE_RENDERBUFFER		2
     62 #define KGSL_MEMTYPE_ARRAYBUFFER		3
     63 #define KGSL_MEMTYPE_ELEMENTARRAYBUFFER		4
     64 #define KGSL_MEMTYPE_VERTEXARRAYBUFFER		5
     65 #define KGSL_MEMTYPE_TEXTURE			6
     66 #define KGSL_MEMTYPE_SURFACE			7
     67 #define KGSL_MEMTYPE_EGL_SURFACE		8
     68 #define KGSL_MEMTYPE_GL				9
     69 #define KGSL_MEMTYPE_CL				10
     70 #define KGSL_MEMTYPE_CL_BUFFER_MAP		11
     71 #define KGSL_MEMTYPE_CL_BUFFER_NOMAP		12
     72 #define KGSL_MEMTYPE_CL_IMAGE_MAP		13
     73 #define KGSL_MEMTYPE_CL_IMAGE_NOMAP		14
     74 #define KGSL_MEMTYPE_CL_KERNEL_STACK		15
     75 #define KGSL_MEMTYPE_COMMAND			16
     76 #define KGSL_MEMTYPE_2D				17
     77 #define KGSL_MEMTYPE_EGL_IMAGE			18
     78 #define KGSL_MEMTYPE_EGL_SHADOW			19
     79 #define KGSL_MEMTYPE_MULTISAMPLE		20
     80 #define KGSL_MEMTYPE_KERNEL			255
     81 
     82 /*
     83  * Alignment hint, passed as the power of 2 exponent.
     84  * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
     85  */
     86 #define KGSL_MEMALIGN_MASK		0x00FF0000
     87 #define KGSL_MEMALIGN_SHIFT		16
     88 
     89 /* --- generic KGSL flag values --- */
     90 
     91 #define KGSL_FLAGS_NORMALMODE  0x00000000
     92 #define KGSL_FLAGS_SAFEMODE    0x00000001
     93 #define KGSL_FLAGS_INITIALIZED0 0x00000002
     94 #define KGSL_FLAGS_INITIALIZED 0x00000004
     95 #define KGSL_FLAGS_STARTED     0x00000008
     96 #define KGSL_FLAGS_ACTIVE      0x00000010
     97 #define KGSL_FLAGS_RESERVED0   0x00000020
     98 #define KGSL_FLAGS_RESERVED1   0x00000040
     99 #define KGSL_FLAGS_RESERVED2   0x00000080
    100 #define KGSL_FLAGS_SOFT_RESET  0x00000100
    101 #define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
    102 
    103 /* Clock flags to show which clocks should be controled by a given platform */
    104 #define KGSL_CLK_SRC	0x00000001
    105 #define KGSL_CLK_CORE	0x00000002
    106 #define KGSL_CLK_IFACE	0x00000004
    107 #define KGSL_CLK_MEM	0x00000008
    108 #define KGSL_CLK_MEM_IFACE 0x00000010
    109 #define KGSL_CLK_AXI	0x00000020
    110 
    111 /* Server Side Sync Timeout in milliseconds */
    112 #define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
    113 
    114 /*
    115  * Reset status values for context
    116  */
    117 enum kgsl_ctx_reset_stat {
    118 	KGSL_CTX_STAT_NO_ERROR				= 0x00000000,
    119 	KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT		= 0x00000001,
    120 	KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT	= 0x00000002,
    121 	KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT		= 0x00000003
    122 };
    123 
    124 #define KGSL_CONVERT_TO_MBPS(val) \
    125 	(val*1000*1000U)
    126 
    127 /* device id */
    128 enum kgsl_deviceid {
    129 	KGSL_DEVICE_3D0		= 0x00000000,
    130 	KGSL_DEVICE_2D0		= 0x00000001,
    131 	KGSL_DEVICE_2D1		= 0x00000002,
    132 	KGSL_DEVICE_MAX		= 0x00000003
    133 };
    134 
    135 enum kgsl_user_mem_type {
    136 	KGSL_USER_MEM_TYPE_PMEM		= 0x00000000,
    137 	KGSL_USER_MEM_TYPE_ASHMEM	= 0x00000001,
    138 	KGSL_USER_MEM_TYPE_ADDR		= 0x00000002,
    139 	KGSL_USER_MEM_TYPE_ION		= 0x00000003,
    140 	KGSL_USER_MEM_TYPE_MAX		= 0x00000004,
    141 };
    142 
    143 struct kgsl_devinfo {
    144 
    145 	unsigned int device_id;
    146 	/* chip revision id
    147 	* coreid:8 majorrev:8 minorrev:8 patch:8
    148 	*/
    149 	unsigned int chip_id;
    150 	unsigned int mmu_enabled;
    151 	unsigned int gmem_gpubaseaddr;
    152 	/*
    153 	* This field contains the adreno revision
    154 	* number 200, 205, 220, etc...
    155 	*/
    156 	unsigned int gpu_id;
    157 	unsigned int gmem_sizebytes;
    158 };
    159 
    160 /* this structure defines the region of memory that can be mmap()ed from this
    161    driver. The timestamp fields are volatile because they are written by the
    162    GPU
    163 */
    164 struct kgsl_devmemstore {
    165 	volatile unsigned int soptimestamp;
    166 	unsigned int sbz;
    167 	volatile unsigned int eoptimestamp;
    168 	unsigned int sbz2;
    169 	volatile unsigned int ts_cmp_enable;
    170 	unsigned int sbz3;
    171 	volatile unsigned int ref_wait_ts;
    172 	unsigned int sbz4;
    173 	unsigned int current_context;
    174 	unsigned int sbz5;
    175 };
    176 
    177 #define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
    178 	((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
    179 	 offsetof(struct kgsl_devmemstore, field))
    180 
    181 /* timestamp id*/
    182 enum kgsl_timestamp_type {
    183 	KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
    184 	KGSL_TIMESTAMP_RETIRED  = 0x00000002, /* end-of-pipeline timestamp*/
    185 	KGSL_TIMESTAMP_QUEUED   = 0x00000003,
    186 };
    187 
    188 /* property types - used with kgsl_device_getproperty */
    189 enum kgsl_property_type {
    190 	KGSL_PROP_DEVICE_INFO     = 0x00000001,
    191 	KGSL_PROP_DEVICE_SHADOW   = 0x00000002,
    192 	KGSL_PROP_DEVICE_POWER    = 0x00000003,
    193 	KGSL_PROP_SHMEM           = 0x00000004,
    194 	KGSL_PROP_SHMEM_APERTURES = 0x00000005,
    195 	KGSL_PROP_MMU_ENABLE 	  = 0x00000006,
    196 	KGSL_PROP_INTERRUPT_WAITS = 0x00000007,
    197 	KGSL_PROP_VERSION         = 0x00000008,
    198 	KGSL_PROP_GPU_RESET_STAT  = 0x00000009,
    199 	KGSL_PROP_PWRCTRL         = 0x0000000E,
    200 	KGSL_PROP_PWR_CONSTRAINT  = 0x00000012,
    201 };
    202 
    203 struct kgsl_shadowprop {
    204 	unsigned int gpuaddr;
    205 	unsigned int size;
    206 	unsigned int flags; /* contains KGSL_FLAGS_ values */
    207 };
    208 
    209 struct kgsl_version {
    210 	unsigned int drv_major;
    211 	unsigned int drv_minor;
    212 	unsigned int dev_major;
    213 	unsigned int dev_minor;
    214 };
    215 
    216 /* Performance counter groups */
    217 
    218 #define KGSL_PERFCOUNTER_GROUP_CP 0x0
    219 #define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
    220 #define KGSL_PERFCOUNTER_GROUP_PC 0x2
    221 #define KGSL_PERFCOUNTER_GROUP_VFD 0x3
    222 #define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
    223 #define KGSL_PERFCOUNTER_GROUP_VPC 0x5
    224 #define KGSL_PERFCOUNTER_GROUP_TSE 0x6
    225 #define KGSL_PERFCOUNTER_GROUP_RAS 0x7
    226 #define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
    227 #define KGSL_PERFCOUNTER_GROUP_TP 0x9
    228 #define KGSL_PERFCOUNTER_GROUP_SP 0xA
    229 #define KGSL_PERFCOUNTER_GROUP_RB 0xB
    230 #define KGSL_PERFCOUNTER_GROUP_PWR 0xC
    231 #define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
    232 #define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
    233 
    234 #define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
    235 #define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
    236 
    237 /* structure holds list of ibs */
    238 struct kgsl_ibdesc {
    239 	unsigned int gpuaddr;
    240 	void *hostptr;
    241 	unsigned int sizedwords;
    242 	unsigned int ctrl;
    243 };
    244 
    245 /* ioctls */
    246 #define KGSL_IOC_TYPE 0x09
    247 
    248 /* get misc info about the GPU
    249    type should be a value from enum kgsl_property_type
    250    value points to a structure that varies based on type
    251    sizebytes is sizeof() that structure
    252    for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
    253    this structure contaings hardware versioning info.
    254    for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
    255    this is used to find mmap() offset and sizes for mapping
    256    struct kgsl_memstore into userspace.
    257 */
    258 struct kgsl_device_getproperty {
    259 	unsigned int type;
    260 	void  *value;
    261 	unsigned int sizebytes;
    262 };
    263 
    264 #define IOCTL_KGSL_DEVICE_GETPROPERTY \
    265 	_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
    266 
    267 /* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
    268  */
    269 
    270 /* block until the GPU has executed past a given timestamp
    271  * timeout is in milliseconds.
    272  */
    273 struct kgsl_device_waittimestamp {
    274 	unsigned int timestamp;
    275 	unsigned int timeout;
    276 };
    277 
    278 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
    279 	_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
    280 
    281 struct kgsl_device_waittimestamp_ctxtid {
    282 	unsigned int context_id;
    283 	unsigned int timestamp;
    284 	unsigned int timeout;
    285 };
    286 
    287 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
    288 	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
    289 
    290 /* DEPRECATED: issue indirect commands to the GPU.
    291  * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
    292  * ibaddr and sizedwords must specify a subset of a buffer created
    293  * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
    294  * flags may be a mask of KGSL_CONTEXT_ values
    295  * timestamp is a returned counter value which can be passed to
    296  * other ioctls to determine when the commands have been executed by
    297  * the GPU.
    298  *
    299  * This fucntion is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
    300  * instead
    301  */
    302 struct kgsl_ringbuffer_issueibcmds {
    303 	unsigned int drawctxt_id;
    304 	unsigned int ibdesc_addr;
    305 	unsigned int numibs;
    306 	unsigned int timestamp; /*output param */
    307 	unsigned int flags;
    308 };
    309 
    310 #define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
    311 	_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
    312 
    313 /* read the most recently executed timestamp value
    314  * type should be a value from enum kgsl_timestamp_type
    315  */
    316 struct kgsl_cmdstream_readtimestamp {
    317 	unsigned int type;
    318 	unsigned int timestamp; /*output param */
    319 };
    320 
    321 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
    322 	_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
    323 
    324 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
    325 	_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
    326 
    327 /* free memory when the GPU reaches a given timestamp.
    328  * gpuaddr specify a memory region created by a
    329  * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
    330  * type should be a value from enum kgsl_timestamp_type
    331  */
    332 struct kgsl_cmdstream_freememontimestamp {
    333 	unsigned int gpuaddr;
    334 	unsigned int type;
    335 	unsigned int timestamp;
    336 };
    337 
    338 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
    339 	_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
    340 
    341 /* Previous versions of this header had incorrectly defined
    342    IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
    343    of a write only ioctl.  To ensure binary compatability, the following
    344    #define will be used to intercept the incorrect ioctl
    345 */
    346 
    347 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
    348 	_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
    349 
    350 /* create a draw context, which is used to preserve GPU state.
    351  * The flags field may contain a mask KGSL_CONTEXT_*  values
    352  */
    353 struct kgsl_drawctxt_create {
    354 	unsigned int flags;
    355 	unsigned int drawctxt_id; /*output param */
    356 };
    357 
    358 #define IOCTL_KGSL_DRAWCTXT_CREATE \
    359 	_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
    360 
    361 /* destroy a draw context */
    362 struct kgsl_drawctxt_destroy {
    363 	unsigned int drawctxt_id;
    364 };
    365 
    366 #define IOCTL_KGSL_DRAWCTXT_DESTROY \
    367 	_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
    368 
    369 /* add a block of pmem, fb, ashmem or user allocated address
    370  * into the GPU address space */
    371 struct kgsl_map_user_mem {
    372 	int fd;
    373 	unsigned int gpuaddr;   /*output param */
    374 	unsigned int len;
    375 	unsigned int offset;
    376 	unsigned int hostptr;   /*input param */
    377 	enum kgsl_user_mem_type memtype;
    378 	unsigned int flags;
    379 };
    380 
    381 #define IOCTL_KGSL_MAP_USER_MEM \
    382 	_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
    383 
    384 struct kgsl_cmdstream_readtimestamp_ctxtid {
    385 	unsigned int context_id;
    386 	unsigned int type;
    387 	unsigned int timestamp; /*output param */
    388 };
    389 
    390 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
    391 	_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
    392 
    393 struct kgsl_cmdstream_freememontimestamp_ctxtid {
    394 	unsigned int context_id;
    395 	unsigned int gpuaddr;
    396 	unsigned int type;
    397 	unsigned int timestamp;
    398 };
    399 
    400 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
    401 	_IOW(KGSL_IOC_TYPE, 0x17, \
    402 	struct kgsl_cmdstream_freememontimestamp_ctxtid)
    403 
    404 /* add a block of pmem or fb into the GPU address space */
    405 struct kgsl_sharedmem_from_pmem {
    406 	int pmem_fd;
    407 	unsigned int gpuaddr;	/*output param */
    408 	unsigned int len;
    409 	unsigned int offset;
    410 };
    411 
    412 #define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
    413 	_IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
    414 
    415 /* remove memory from the GPU's address space */
    416 struct kgsl_sharedmem_free {
    417 	unsigned int gpuaddr;
    418 };
    419 
    420 #define IOCTL_KGSL_SHAREDMEM_FREE \
    421 	_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
    422 
    423 struct kgsl_cff_user_event {
    424 	unsigned char cff_opcode;
    425 	unsigned int op1;
    426 	unsigned int op2;
    427 	unsigned int op3;
    428 	unsigned int op4;
    429 	unsigned int op5;
    430 	unsigned int __pad[2];
    431 };
    432 
    433 #define IOCTL_KGSL_CFF_USER_EVENT \
    434 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
    435 
    436 struct kgsl_gmem_desc {
    437 	unsigned int x;
    438 	unsigned int y;
    439 	unsigned int width;
    440 	unsigned int height;
    441 	unsigned int pitch;
    442 };
    443 
    444 struct kgsl_buffer_desc {
    445 	void 			*hostptr;
    446 	unsigned int	gpuaddr;
    447 	int				size;
    448 	unsigned int	format;
    449 	unsigned int  	pitch;
    450 	unsigned int  	enabled;
    451 };
    452 
    453 struct kgsl_bind_gmem_shadow {
    454 	unsigned int drawctxt_id;
    455 	struct kgsl_gmem_desc gmem_desc;
    456 	unsigned int shadow_x;
    457 	unsigned int shadow_y;
    458 	struct kgsl_buffer_desc shadow_buffer;
    459 	unsigned int buffer_id;
    460 };
    461 
    462 #define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
    463     _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
    464 
    465 /* add a block of memory into the GPU address space */
    466 
    467 /*
    468  * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
    469  * use IOCTL_KGSL_GPUMEM_ALLOC instead
    470  */
    471 
    472 struct kgsl_sharedmem_from_vmalloc {
    473 	unsigned int gpuaddr;	/*output param */
    474 	unsigned int hostptr;
    475 	unsigned int flags;
    476 };
    477 
    478 #define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
    479 	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
    480 
    481 /*
    482  * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
    483  * supports both directions (flush and invalidate). This code will still
    484  * work, but by definition it will do a flush of the cache which might not be
    485  * what you want to have happen on a buffer following a GPU operation.  It is
    486  * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
    487  */
    488 
    489 #define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
    490 	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
    491 
    492 struct kgsl_drawctxt_set_bin_base_offset {
    493 	unsigned int drawctxt_id;
    494 	unsigned int offset;
    495 };
    496 
    497 #define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
    498 	_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
    499 
    500 enum kgsl_cmdwindow_type {
    501 	KGSL_CMDWINDOW_MIN     = 0x00000000,
    502 	KGSL_CMDWINDOW_2D      = 0x00000000,
    503 	KGSL_CMDWINDOW_3D      = 0x00000001, /* legacy */
    504 	KGSL_CMDWINDOW_MMU     = 0x00000002,
    505 	KGSL_CMDWINDOW_ARBITER = 0x000000FF,
    506 	KGSL_CMDWINDOW_MAX     = 0x000000FF,
    507 };
    508 
    509 /* write to the command window */
    510 struct kgsl_cmdwindow_write {
    511 	enum kgsl_cmdwindow_type target;
    512 	unsigned int addr;
    513 	unsigned int data;
    514 };
    515 
    516 #define IOCTL_KGSL_CMDWINDOW_WRITE \
    517 	_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
    518 
    519 struct kgsl_gpumem_alloc {
    520 	unsigned long gpuaddr;
    521 	size_t size;
    522 	unsigned int flags;
    523 };
    524 
    525 #define IOCTL_KGSL_GPUMEM_ALLOC \
    526 	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
    527 
    528 struct kgsl_cff_syncmem {
    529 	unsigned int gpuaddr;
    530 	unsigned int len;
    531 	unsigned int __pad[2]; /* For future binary compatibility */
    532 };
    533 
    534 #define IOCTL_KGSL_CFF_SYNCMEM \
    535 	_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
    536 
    537 /*
    538  * A timestamp event allows the user space to register an action following an
    539  * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
    540  * _IOWR to support fences which need to return a fd for the priv parameter.
    541  */
    542 
    543 struct kgsl_timestamp_event {
    544 	int type;                /* Type of event (see list below) */
    545 	unsigned int timestamp;  /* Timestamp to trigger event on */
    546 	unsigned int context_id; /* Context for the timestamp */
    547 	void *priv;              /* Pointer to the event specific blob */
    548 	size_t len;              /* Size of the event specific blob */
    549 };
    550 
    551 #define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
    552 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
    553 
    554 /* A genlock timestamp event releases an existing lock on timestamp expire */
    555 
    556 #define KGSL_TIMESTAMP_EVENT_GENLOCK 1
    557 
    558 struct kgsl_timestamp_event_genlock {
    559 	int handle; /* Handle of the genlock lock to release */
    560 };
    561 
    562 /* A fence timestamp event releases an existing lock on timestamp expire */
    563 
    564 #define KGSL_TIMESTAMP_EVENT_FENCE 2
    565 
    566 struct kgsl_timestamp_event_fence {
    567 	int fence_fd; /* Fence to signal */
    568 };
    569 
    570 /*
    571  * Set a property within the kernel.  Uses the same structure as
    572  * IOCTL_KGSL_GETPROPERTY
    573  */
    574 
    575 #define IOCTL_KGSL_SETPROPERTY \
    576 	_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
    577 
    578 #define IOCTL_KGSL_TIMESTAMP_EVENT \
    579 	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
    580 
    581 /**
    582  * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
    583  * @id: returned id value for this allocation.
    584  * @flags: mask of KGSL_MEM* values requested and actual flags on return.
    585  * @size: requested size of the allocation and actual size on return.
    586  * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
    587  * @gpuaddr: returned GPU address for the allocation
    588  *
    589  * Allocate memory for access by the GPU. The flags and size fields are echoed
    590  * back by the kernel, so that the caller can know if the request was
    591  * adjusted.
    592  *
    593  * Supported flags:
    594  * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
    595  * KGSL_MEMTYPE*: usage hint for debugging aid
    596  * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
    597  * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
    598  * address will be 0. Calling mmap() will set the GPU address.
    599  */
    600 struct kgsl_gpumem_alloc_id {
    601 	unsigned int id;
    602 	unsigned int flags;
    603 	unsigned int size;
    604 	unsigned int mmapsize;
    605 	unsigned long gpuaddr;
    606 /* private: reserved for future use*/
    607 	unsigned int __pad[2];
    608 };
    609 
    610 #define IOCTL_KGSL_GPUMEM_ALLOC_ID \
    611 	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
    612 
    613 /**
    614  * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
    615  * @id: GPU allocation id to free
    616  *
    617  * Free an allocation by id, in case a GPU address has not been assigned or
    618  * is unknown. Freeing an allocation by id with this ioctl or by GPU address
    619  * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
    620  */
    621 struct kgsl_gpumem_free_id {
    622 	unsigned int id;
    623 /* private: reserved for future use*/
    624 	unsigned int __pad;
    625 };
    626 
    627 #define IOCTL_KGSL_GPUMEM_FREE_ID \
    628 	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
    629 
    630 /**
    631  * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
    632  * @gpuaddr: GPU address to query. Also set on return.
    633  * @id: GPU allocation id to query. Also set on return.
    634  * @flags: returned mask of KGSL_MEM* values.
    635  * @size: returned size of the allocation.
    636  * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
    637  * @useraddr: returned address of the userspace mapping for this buffer
    638  *
    639  * This ioctl allows querying of all user visible attributes of an existing
    640  * allocation, by either the GPU address or the id returned by a previous
    641  * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
    642  * return all attributes so this ioctl can be used to look them up if needed.
    643  *
    644  */
    645 struct kgsl_gpumem_get_info {
    646 	unsigned long gpuaddr;
    647 	unsigned int id;
    648 	unsigned int flags;
    649 	unsigned int size;
    650 	unsigned int mmapsize;
    651 	unsigned long useraddr;
    652 /* private: reserved for future use*/
    653 	unsigned int __pad[4];
    654 };
    655 
    656 #define IOCTL_KGSL_GPUMEM_GET_INFO\
    657 	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
    658 
    659 /**
    660  * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
    661  * @gpuaddr: GPU address of the buffer to sync.
    662  * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
    663  * @op: a mask of KGSL_GPUMEM_CACHE_* values
    664  *
    665  * Sync the L2 cache for memory headed to and from the GPU - this replaces
    666  * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
    667  * directions
    668  *
    669  */
    670 struct kgsl_gpumem_sync_cache {
    671 	unsigned int gpuaddr;
    672 	unsigned int id;
    673 	unsigned int op;
    674 /* private: reserved for future use*/
    675 	unsigned int __pad[2]; /* For future binary compatibility */
    676 };
    677 
    678 #define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
    679 #define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
    680 
    681 #define KGSL_GPUMEM_CACHE_INV (1 << 1)
    682 #define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
    683 
    684 #define KGSL_GPUMEM_CACHE_FLUSH \
    685 	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
    686 
    687 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
    688 	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
    689 
    690 /**
    691  * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
    692  * @groupid: Performance counter group ID
    693  * @countable: Countable to select within the group
    694  * @offset: Return offset of the reserved LO counter
    695  * @offset_hi: Return offset of the reserved HI counter
    696  *
    697  * Get an available performance counter from a specified groupid.  The offset
    698  * of the performance counter will be returned after successfully assigning
    699  * the countable to the counter for the specified group.  An error will be
    700  * returned and an offset of 0 if the groupid is invalid or there are no
    701  * more counters left.  After successfully getting a perfcounter, the user
    702  * must call kgsl_perfcounter_put(groupid, contable) when finished with
    703  * the perfcounter to clear up perfcounter resources.
    704  *
    705  */
    706 struct kgsl_perfcounter_get {
    707 	unsigned int groupid;
    708 	unsigned int countable;
    709 	unsigned int offset;
    710 	unsigned int offset_hi;
    711 /* private: reserved for future use */
    712 	unsigned int __pad; /* For future binary compatibility */
    713 };
    714 
    715 #define IOCTL_KGSL_PERFCOUNTER_GET \
    716 	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
    717 
    718 /**
    719  * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
    720  * @groupid: Performance counter group ID
    721  * @countable: Countable to release within the group
    722  *
    723  * Put an allocated performance counter to allow others to have access to the
    724  * resource that was previously taken.  This is only to be called after
    725  * successfully getting a performance counter from kgsl_perfcounter_get().
    726  *
    727  */
    728 struct kgsl_perfcounter_put {
    729 	unsigned int groupid;
    730 	unsigned int countable;
    731 /* private: reserved for future use */
    732 	unsigned int __pad[2]; /* For future binary compatibility */
    733 };
    734 
    735 #define IOCTL_KGSL_PERFCOUNTER_PUT \
    736 	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
    737 
    738 /**
    739  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
    740  * @groupid: Performance counter group ID
    741  * @countable: Return active countables array
    742  * @size: Size of active countables array
    743  * @max_counters: Return total number counters for the group ID
    744  *
    745  * Query the available performance counters given a groupid.  The array
    746  * *countables is used to return the current active countables in counters.
    747  * The size of the array is passed in so the kernel will only write at most
    748  * size or counter->size for the group id.  The total number of available
    749  * counters for the group ID is returned in max_counters.
    750  * If the array or size passed in are invalid, then only the maximum number
    751  * of counters will be returned, no data will be written to *countables.
    752  * If the groupid is invalid an error code will be returned.
    753  *
    754  */
    755 struct kgsl_perfcounter_query {
    756 	unsigned int groupid;
    757 	/* Array to return the current countable for up to size counters */
    758 	unsigned int *countables;
    759 	unsigned int count;
    760 	unsigned int max_counters;
    761 /* private: reserved for future use */
    762 	unsigned int __pad[2]; /* For future binary compatibility */
    763 };
    764 
    765 #define IOCTL_KGSL_PERFCOUNTER_QUERY \
    766 	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
    767 
    768 /**
    769  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
    770  * @groupid: Performance counter group IDs
    771  * @countable: Performance counter countable IDs
    772  * @value: Return performance counter reads
    773  * @size: Size of all arrays (groupid/countable pair and return value)
    774  *
    775  * Read in the current value of a performance counter given by the groupid
    776  * and countable.
    777  *
    778  */
    779 
    780 struct kgsl_perfcounter_read_group {
    781 	unsigned int groupid;
    782 	unsigned int countable;
    783 	unsigned long long value;
    784 };
    785 
    786 struct kgsl_perfcounter_read {
    787 	struct kgsl_perfcounter_read_group *reads;
    788 	unsigned int count;
    789 /* private: reserved for future use */
    790 	unsigned int __pad[2]; /* For future binary compatibility */
    791 };
    792 
    793 #define IOCTL_KGSL_PERFCOUNTER_READ \
    794 	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
    795 /*
    796  * struct kgsl_gpumem_sync_cache_bulk - argument to
    797  * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
    798  * @id_list: list of GPU buffer ids of the buffers to sync
    799  * @count: number of GPU buffer ids in id_list
    800  * @op: a mask of KGSL_GPUMEM_CACHE_* values
    801  *
    802  * Sync the cache for memory headed to and from the GPU. Certain
    803  * optimizations can be made on the cache operation based on the total
    804  * size of the working set of memory to be managed.
    805  */
    806 struct kgsl_gpumem_sync_cache_bulk {
    807 	unsigned int *id_list;
    808 	unsigned int count;
    809 	unsigned int op;
    810 /* private: reserved for future use */
    811 	unsigned int __pad[2]; /* For future binary compatibility */
    812 };
    813 
    814 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
    815 	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
    816 
    817 /*
    818  * struct kgsl_cmd_syncpoint_timestamp
    819  * @context_id: ID of a KGSL context
    820  * @timestamp: GPU timestamp
    821  *
    822  * This structure defines a syncpoint comprising a context/timestamp pair. A
    823  * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
    824  * dependencies that must be met before the command can be submitted to the
    825  * hardware
    826  */
    827 struct kgsl_cmd_syncpoint_timestamp {
    828 	unsigned int context_id;
    829 	unsigned int timestamp;
    830 };
    831 
    832 #define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
    833 
    834 struct kgsl_cmd_syncpoint_fence {
    835 	int fd;
    836 };
    837 
    838 #define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
    839 
    840 /**
    841  * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
    842  * @type: type of sync point defined here
    843  * @priv: Pointer to the type specific buffer
    844  * @size: Size of the type specific buffer
    845  *
    846  * This structure contains pointers defining a specific command sync point.
    847  * The pointer and size should point to a type appropriate structure.
    848  */
    849 struct kgsl_cmd_syncpoint {
    850 	int type;
    851 	void __user *priv;
    852 	unsigned int size;
    853 };
    854 
    855 /**
    856  * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
    857  * @context_id: KGSL context ID that owns the commands
    858  * @flags:
    859  * @cmdlist: User pointer to a list of kgsl_ibdesc structures
    860  * @numcmds: Number of commands listed in cmdlist
    861  * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
    862  * @numsyncs: Number of sync points listed in synclist
    863  * @timestamp: On entry the a user defined timestamp, on exist the timestamp
    864  * assigned to the command batch
    865  *
    866  * This structure specifies a command to send to the GPU hardware.  This is
    867  * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
    868  * submit IB lists and it adds sync points to block the IB until the
    869  * dependencies are satisified.  This entry point is the new and preferred way
    870  * to submit commands to the GPU.
    871  */
    872 
    873 struct kgsl_submit_commands {
    874 	unsigned int context_id;
    875 	unsigned int flags;
    876 	struct kgsl_ibdesc __user *cmdlist;
    877 	unsigned int numcmds;
    878 	struct kgsl_cmd_syncpoint __user *synclist;
    879 	unsigned int numsyncs;
    880 	unsigned int timestamp;
    881 /* private: reserved for future use */
    882 	unsigned int __pad[4];
    883 };
    884 
    885 #define IOCTL_KGSL_SUBMIT_COMMANDS \
    886 	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
    887 
    888 /**
    889  * struct kgsl_device_constraint - device constraint argument
    890  * @context_id: KGSL context ID
    891  * @type: type of constraint i.e pwrlevel/none
    892  * @data: constraint data
    893  * @size: size of the constraint data
    894  */
    895 struct kgsl_device_constraint {
    896 	unsigned int type;
    897 	unsigned int context_id;
    898 	void __user *data;
    899 	size_t size;
    900 };
    901 
    902 /* Constraint Type*/
    903 #define KGSL_CONSTRAINT_NONE 0
    904 #define KGSL_CONSTRAINT_PWRLEVEL 1
    905 
    906 /* PWRLEVEL constraint level*/
    907 /* set to min frequency */
    908 #define KGSL_CONSTRAINT_PWR_MIN    0
    909 /* set to max frequency */
    910 #define KGSL_CONSTRAINT_PWR_MAX    1
    911 
    912 struct kgsl_device_constraint_pwrlevel {
    913 	unsigned int level;
    914 };
    915 
    916 #ifdef __KERNEL__
    917 #ifdef CONFIG_MSM_KGSL_DRM
    918 int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
    919 			unsigned long *len);
    920 #else
    921 #define kgsl_gem_obj_addr(...) 0
    922 #endif
    923 #endif
    924 #endif /* _MSM_KGSL_H */
    925