Home | History | Annotate | Download | only in linux
      1 #ifndef _UAPI_MSM_KGSL_H
      2 #define _UAPI_MSM_KGSL_H
      3 
      4 /*
      5  * The KGSL version has proven not to be very useful in userspace if features
      6  * are cherry picked into other trees out of order so it is frozen as of 3.14.
      7  * It is left here for backwards compatabilty and as a reminder that
      8  * software releases are never linear. Also, I like pie.
      9  */
     10 
     11 #define KGSL_VERSION_MAJOR        3
     12 #define KGSL_VERSION_MINOR        14
     13 
     14 /*context flags */
     15 #define KGSL_CONTEXT_SAVE_GMEM		0x00000001
     16 #define KGSL_CONTEXT_NO_GMEM_ALLOC	0x00000002
     17 #define KGSL_CONTEXT_SUBMIT_IB_LIST	0x00000004
     18 #define KGSL_CONTEXT_CTX_SWITCH		0x00000008
     19 #define KGSL_CONTEXT_PREAMBLE		0x00000010
     20 #define KGSL_CONTEXT_TRASH_STATE	0x00000020
     21 #define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
     22 #define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
     23 #define KGSL_CONTEXT_END_OF_FRAME	0x00000100
     24 
     25 #define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
     26 #define KGSL_CONTEXT_SYNC               0x00000400
     27 #define KGSL_CONTEXT_PWR_CONSTRAINT     0x00000800
     28 /* bits [12:15] are reserved for future use */
     29 #define KGSL_CONTEXT_PRIORITY_MASK      0x0000F000
     30 #define KGSL_CONTEXT_PRIORITY_SHIFT     12
     31 #define KGSL_CONTEXT_PRIORITY_UNDEF     0
     32 
     33 #define KGSL_CONTEXT_TYPE_MASK          0x01F00000
     34 #define KGSL_CONTEXT_TYPE_SHIFT         20
     35 
     36 #define KGSL_CONTEXT_TYPE_ANY		0
     37 #define KGSL_CONTEXT_TYPE_GL		1
     38 #define KGSL_CONTEXT_TYPE_CL		2
     39 #define KGSL_CONTEXT_TYPE_C2D		3
     40 #define KGSL_CONTEXT_TYPE_RS		4
     41 #define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
     42 
     43 #define KGSL_CONTEXT_INVALID 0xffffffff
     44 
     45 /* --- Memory allocation flags --- */
     46 
     47 /* General allocation hints */
     48 #define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
     49 #define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000
     50 
     51 /* Memory caching hints */
     52 #define KGSL_CACHEMODE_MASK 0x0C000000
     53 #define KGSL_CACHEMODE_SHIFT 26
     54 
     55 #define KGSL_CACHEMODE_WRITECOMBINE 0
     56 #define KGSL_CACHEMODE_UNCACHED 1
     57 #define KGSL_CACHEMODE_WRITETHROUGH 2
     58 #define KGSL_CACHEMODE_WRITEBACK 3
     59 
     60 /* Memory types for which allocations are made */
     61 #define KGSL_MEMTYPE_MASK		0x0000FF00
     62 #define KGSL_MEMTYPE_SHIFT		8
     63 
     64 #define KGSL_MEMTYPE_OBJECTANY			0
     65 #define KGSL_MEMTYPE_FRAMEBUFFER		1
     66 #define KGSL_MEMTYPE_RENDERBUFFER		2
     67 #define KGSL_MEMTYPE_ARRAYBUFFER		3
     68 #define KGSL_MEMTYPE_ELEMENTARRAYBUFFER		4
     69 #define KGSL_MEMTYPE_VERTEXARRAYBUFFER		5
     70 #define KGSL_MEMTYPE_TEXTURE			6
     71 #define KGSL_MEMTYPE_SURFACE			7
     72 #define KGSL_MEMTYPE_EGL_SURFACE		8
     73 #define KGSL_MEMTYPE_GL				9
     74 #define KGSL_MEMTYPE_CL				10
     75 #define KGSL_MEMTYPE_CL_BUFFER_MAP		11
     76 #define KGSL_MEMTYPE_CL_BUFFER_NOMAP		12
     77 #define KGSL_MEMTYPE_CL_IMAGE_MAP		13
     78 #define KGSL_MEMTYPE_CL_IMAGE_NOMAP		14
     79 #define KGSL_MEMTYPE_CL_KERNEL_STACK		15
     80 #define KGSL_MEMTYPE_COMMAND			16
     81 #define KGSL_MEMTYPE_2D				17
     82 #define KGSL_MEMTYPE_EGL_IMAGE			18
     83 #define KGSL_MEMTYPE_EGL_SHADOW			19
     84 #define KGSL_MEMTYPE_MULTISAMPLE		20
     85 #define KGSL_MEMTYPE_KERNEL			255
     86 
     87 /*
     88  * Alignment hint, passed as the power of 2 exponent.
     89  * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
     90  */
     91 #define KGSL_MEMALIGN_MASK		0x00FF0000
     92 #define KGSL_MEMALIGN_SHIFT		16
     93 
     94 enum kgsl_user_mem_type {
     95 	KGSL_USER_MEM_TYPE_PMEM		= 0x00000000,
     96 	KGSL_USER_MEM_TYPE_ASHMEM	= 0x00000001,
     97 	KGSL_USER_MEM_TYPE_ADDR		= 0x00000002,
     98 	KGSL_USER_MEM_TYPE_ION		= 0x00000003,
     99 	KGSL_USER_MEM_TYPE_MAX		= 0x00000004,
    100 };
    101 #define KGSL_MEMFLAGS_USERMEM_MASK 0x000000e0
    102 #define KGSL_MEMFLAGS_USERMEM_SHIFT 5
    103 
    104 /*
    105  * Unfortunately, enum kgsl_user_mem_type starts at 0 which does not
    106  * leave a good value for allocated memory. In the flags we use
    107  * 0 to indicate allocated memory and thus need to add 1 to the enum
    108  * values.
    109  */
    110 #define KGSL_USERMEM_FLAG(x) (((x) + 1) << KGSL_MEMFLAGS_USERMEM_SHIFT)
    111 
    112 #define KGSL_MEMFLAGS_NOT_USERMEM 0
    113 #define KGSL_MEMFLAGS_USERMEM_PMEM KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_PMEM)
    114 #define KGSL_MEMFLAGS_USERMEM_ASHMEM \
    115 		KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ASHMEM)
    116 #define KGSL_MEMFLAGS_USERMEM_ADDR KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ADDR)
    117 #define KGSL_MEMFLAGS_USERMEM_ION KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ION)
    118 
    119 /* --- generic KGSL flag values --- */
    120 
    121 #define KGSL_FLAGS_NORMALMODE  0x00000000
    122 #define KGSL_FLAGS_SAFEMODE    0x00000001
    123 #define KGSL_FLAGS_INITIALIZED0 0x00000002
    124 #define KGSL_FLAGS_INITIALIZED 0x00000004
    125 #define KGSL_FLAGS_STARTED     0x00000008
    126 #define KGSL_FLAGS_ACTIVE      0x00000010
    127 #define KGSL_FLAGS_RESERVED0   0x00000020
    128 #define KGSL_FLAGS_RESERVED1   0x00000040
    129 #define KGSL_FLAGS_RESERVED2   0x00000080
    130 #define KGSL_FLAGS_SOFT_RESET  0x00000100
    131 #define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
    132 
    133 /* Clock flags to show which clocks should be controled by a given platform */
    134 #define KGSL_CLK_SRC	0x00000001
    135 #define KGSL_CLK_CORE	0x00000002
    136 #define KGSL_CLK_IFACE	0x00000004
    137 #define KGSL_CLK_MEM	0x00000008
    138 #define KGSL_CLK_MEM_IFACE 0x00000010
    139 #define KGSL_CLK_AXI	0x00000020
    140 
    141 /* Server Side Sync Timeout in milliseconds */
    142 #define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
    143 
    144 /*
    145  * Reset status values for context
    146  */
    147 enum kgsl_ctx_reset_stat {
    148 	KGSL_CTX_STAT_NO_ERROR				= 0x00000000,
    149 	KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT		= 0x00000001,
    150 	KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT	= 0x00000002,
    151 	KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT		= 0x00000003
    152 };
    153 
    154 #define KGSL_CONVERT_TO_MBPS(val) \
    155 	(val*1000*1000U)
    156 
    157 /* device id */
    158 enum kgsl_deviceid {
    159 	KGSL_DEVICE_3D0		= 0x00000000,
    160 	KGSL_DEVICE_2D0		= 0x00000001,
    161 	KGSL_DEVICE_2D1		= 0x00000002,
    162 	KGSL_DEVICE_MAX		= 0x00000003
    163 };
    164 
    165 struct kgsl_devinfo {
    166 
    167 	unsigned int device_id;
    168 	/* chip revision id
    169 	* coreid:8 majorrev:8 minorrev:8 patch:8
    170 	*/
    171 	unsigned int chip_id;
    172 	unsigned int mmu_enabled;
    173 	unsigned long gmem_gpubaseaddr;
    174 	/*
    175 	* This field contains the adreno revision
    176 	* number 200, 205, 220, etc...
    177 	*/
    178 	unsigned int gpu_id;
    179 	size_t gmem_sizebytes;
    180 };
    181 
    182 /* this structure defines the region of memory that can be mmap()ed from this
    183    driver. The timestamp fields are volatile because they are written by the
    184    GPU
    185 */
    186 struct kgsl_devmemstore {
    187 	volatile unsigned int soptimestamp;
    188 	unsigned int sbz;
    189 	volatile unsigned int eoptimestamp;
    190 	unsigned int sbz2;
    191 	volatile unsigned int ts_cmp_enable;
    192 	unsigned int sbz3;
    193 	volatile unsigned int ref_wait_ts;
    194 	unsigned int sbz4;
    195 	unsigned int current_context;
    196 	unsigned int sbz5;
    197 };
    198 
    199 #define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
    200 	((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
    201 	 offsetof(struct kgsl_devmemstore, field))
    202 
    203 /* timestamp id*/
    204 enum kgsl_timestamp_type {
    205 	KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
    206 	KGSL_TIMESTAMP_RETIRED  = 0x00000002, /* end-of-pipeline timestamp*/
    207 	KGSL_TIMESTAMP_QUEUED   = 0x00000003,
    208 };
    209 
    210 /* property types - used with kgsl_device_getproperty */
    211 enum kgsl_property_type {
    212 	KGSL_PROP_DEVICE_INFO     = 0x00000001,
    213 	KGSL_PROP_DEVICE_SHADOW   = 0x00000002,
    214 	KGSL_PROP_DEVICE_POWER    = 0x00000003,
    215 	KGSL_PROP_SHMEM           = 0x00000004,
    216 	KGSL_PROP_SHMEM_APERTURES = 0x00000005,
    217 	KGSL_PROP_MMU_ENABLE 	  = 0x00000006,
    218 	KGSL_PROP_INTERRUPT_WAITS = 0x00000007,
    219 	KGSL_PROP_VERSION         = 0x00000008,
    220 	KGSL_PROP_GPU_RESET_STAT  = 0x00000009,
    221 	KGSL_PROP_PWRCTRL         = 0x0000000E,
    222 	KGSL_PROP_PWR_CONSTRAINT  = 0x00000012,
    223 };
    224 
    225 struct kgsl_shadowprop {
    226 	unsigned long gpuaddr;
    227 	size_t size;
    228 	unsigned int flags; /* contains KGSL_FLAGS_ values */
    229 };
    230 
    231 struct kgsl_version {
    232 	unsigned int drv_major;
    233 	unsigned int drv_minor;
    234 	unsigned int dev_major;
    235 	unsigned int dev_minor;
    236 };
    237 
    238 /* Performance counter groups */
    239 
    240 #define KGSL_PERFCOUNTER_GROUP_CP 0x0
    241 #define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
    242 #define KGSL_PERFCOUNTER_GROUP_PC 0x2
    243 #define KGSL_PERFCOUNTER_GROUP_VFD 0x3
    244 #define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
    245 #define KGSL_PERFCOUNTER_GROUP_VPC 0x5
    246 #define KGSL_PERFCOUNTER_GROUP_TSE 0x6
    247 #define KGSL_PERFCOUNTER_GROUP_RAS 0x7
    248 #define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
    249 #define KGSL_PERFCOUNTER_GROUP_TP 0x9
    250 #define KGSL_PERFCOUNTER_GROUP_SP 0xA
    251 #define KGSL_PERFCOUNTER_GROUP_RB 0xB
    252 #define KGSL_PERFCOUNTER_GROUP_PWR 0xC
    253 #define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
    254 #define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
    255 #define KGSL_PERFCOUNTER_GROUP_MH 0xF
    256 #define KGSL_PERFCOUNTER_GROUP_PA_SU 0x10
    257 #define KGSL_PERFCOUNTER_GROUP_SQ 0x11
    258 #define KGSL_PERFCOUNTER_GROUP_SX 0x12
    259 #define KGSL_PERFCOUNTER_GROUP_TCF 0x13
    260 #define KGSL_PERFCOUNTER_GROUP_TCM 0x14
    261 #define KGSL_PERFCOUNTER_GROUP_TCR 0x15
    262 #define KGSL_PERFCOUNTER_GROUP_L2 0x16
    263 #define KGSL_PERFCOUNTER_GROUP_VSC 0x17
    264 #define KGSL_PERFCOUNTER_GROUP_CCU 0x18
    265 #define KGSL_PERFCOUNTER_GROUP_MAX 0x19
    266 
    267 #define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
    268 #define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
    269 
    270 /* structure holds list of ibs */
    271 struct kgsl_ibdesc {
    272 	unsigned long gpuaddr;
    273 	unsigned long __pad;
    274 	size_t sizedwords;
    275 	unsigned int ctrl;
    276 };
    277 
    278 /* ioctls */
    279 #define KGSL_IOC_TYPE 0x09
    280 
    281 /* get misc info about the GPU
    282    type should be a value from enum kgsl_property_type
    283    value points to a structure that varies based on type
    284    sizebytes is sizeof() that structure
    285    for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
    286    this structure contaings hardware versioning info.
    287    for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
    288    this is used to find mmap() offset and sizes for mapping
    289    struct kgsl_memstore into userspace.
    290 */
    291 struct kgsl_device_getproperty {
    292 	unsigned int type;
    293 	void __user *value;
    294 	size_t sizebytes;
    295 };
    296 
    297 #define IOCTL_KGSL_DEVICE_GETPROPERTY \
    298 	_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
    299 
    300 /* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
    301  */
    302 
    303 /* block until the GPU has executed past a given timestamp
    304  * timeout is in milliseconds.
    305  */
    306 struct kgsl_device_waittimestamp {
    307 	unsigned int timestamp;
    308 	unsigned int timeout;
    309 };
    310 
    311 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
    312 	_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
    313 
    314 struct kgsl_device_waittimestamp_ctxtid {
    315 	unsigned int context_id;
    316 	unsigned int timestamp;
    317 	unsigned int timeout;
    318 };
    319 
    320 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
    321 	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
    322 
    323 /* DEPRECATED: issue indirect commands to the GPU.
    324  * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
    325  * ibaddr and sizedwords must specify a subset of a buffer created
    326  * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
    327  * flags may be a mask of KGSL_CONTEXT_ values
    328  * timestamp is a returned counter value which can be passed to
    329  * other ioctls to determine when the commands have been executed by
    330  * the GPU.
    331  *
    332  * This fucntion is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
    333  * instead
    334  */
    335 struct kgsl_ringbuffer_issueibcmds {
    336 	unsigned int drawctxt_id;
    337 	unsigned long ibdesc_addr;
    338 	unsigned int numibs;
    339 	unsigned int timestamp; /*output param */
    340 	unsigned int flags;
    341 };
    342 
    343 #define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
    344 	_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
    345 
    346 /* read the most recently executed timestamp value
    347  * type should be a value from enum kgsl_timestamp_type
    348  */
    349 struct kgsl_cmdstream_readtimestamp {
    350 	unsigned int type;
    351 	unsigned int timestamp; /*output param */
    352 };
    353 
    354 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
    355 	_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
    356 
    357 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
    358 	_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
    359 
    360 /* free memory when the GPU reaches a given timestamp.
    361  * gpuaddr specify a memory region created by a
    362  * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
    363  * type should be a value from enum kgsl_timestamp_type
    364  */
    365 struct kgsl_cmdstream_freememontimestamp {
    366 	unsigned long gpuaddr;
    367 	unsigned int type;
    368 	unsigned int timestamp;
    369 };
    370 
    371 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
    372 	_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
    373 
    374 /* Previous versions of this header had incorrectly defined
    375    IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
    376    of a write only ioctl.  To ensure binary compatability, the following
    377    #define will be used to intercept the incorrect ioctl
    378 */
    379 
    380 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
    381 	_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
    382 
    383 /* create a draw context, which is used to preserve GPU state.
    384  * The flags field may contain a mask KGSL_CONTEXT_*  values
    385  */
    386 struct kgsl_drawctxt_create {
    387 	unsigned int flags;
    388 	unsigned int drawctxt_id; /*output param */
    389 };
    390 
    391 #define IOCTL_KGSL_DRAWCTXT_CREATE \
    392 	_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
    393 
    394 /* destroy a draw context */
    395 struct kgsl_drawctxt_destroy {
    396 	unsigned int drawctxt_id;
    397 };
    398 
    399 #define IOCTL_KGSL_DRAWCTXT_DESTROY \
    400 	_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
    401 
    402 /* add a block of pmem, fb, ashmem or user allocated address
    403  * into the GPU address space */
    404 struct kgsl_map_user_mem {
    405 	int fd;
    406 	unsigned long gpuaddr;   /*output param */
    407 	size_t len;
    408 	size_t offset;
    409 	unsigned long hostptr;   /*input param */
    410 	enum kgsl_user_mem_type memtype;
    411 	unsigned int flags;
    412 };
    413 
    414 #define IOCTL_KGSL_MAP_USER_MEM \
    415 	_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
    416 
    417 struct kgsl_cmdstream_readtimestamp_ctxtid {
    418 	unsigned int context_id;
    419 	unsigned int type;
    420 	unsigned int timestamp; /*output param */
    421 };
    422 
    423 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
    424 	_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
    425 
    426 struct kgsl_cmdstream_freememontimestamp_ctxtid {
    427 	unsigned int context_id;
    428 	unsigned long gpuaddr;
    429 	unsigned int type;
    430 	unsigned int timestamp;
    431 };
    432 
    433 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
    434 	_IOW(KGSL_IOC_TYPE, 0x17, \
    435 	struct kgsl_cmdstream_freememontimestamp_ctxtid)
    436 
    437 /* add a block of pmem or fb into the GPU address space */
    438 struct kgsl_sharedmem_from_pmem {
    439 	int pmem_fd;
    440 	unsigned long gpuaddr;	/*output param */
    441 	unsigned int len;
    442 	unsigned int offset;
    443 };
    444 
    445 #define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
    446 	_IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
    447 
    448 /* remove memory from the GPU's address space */
    449 struct kgsl_sharedmem_free {
    450 	unsigned long gpuaddr;
    451 };
    452 
    453 #define IOCTL_KGSL_SHAREDMEM_FREE \
    454 	_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
    455 
    456 struct kgsl_cff_user_event {
    457 	unsigned char cff_opcode;
    458 	unsigned int op1;
    459 	unsigned int op2;
    460 	unsigned int op3;
    461 	unsigned int op4;
    462 	unsigned int op5;
    463 	unsigned int __pad[2];
    464 };
    465 
    466 #define IOCTL_KGSL_CFF_USER_EVENT \
    467 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
    468 
    469 struct kgsl_gmem_desc {
    470 	unsigned int x;
    471 	unsigned int y;
    472 	unsigned int width;
    473 	unsigned int height;
    474 	unsigned int pitch;
    475 };
    476 
    477 struct kgsl_buffer_desc {
    478 	void 			*hostptr;
    479 	unsigned long	gpuaddr;
    480 	int				size;
    481 	unsigned int	format;
    482 	unsigned int  	pitch;
    483 	unsigned int  	enabled;
    484 };
    485 
    486 struct kgsl_bind_gmem_shadow {
    487 	unsigned int drawctxt_id;
    488 	struct kgsl_gmem_desc gmem_desc;
    489 	unsigned int shadow_x;
    490 	unsigned int shadow_y;
    491 	struct kgsl_buffer_desc shadow_buffer;
    492 	unsigned int buffer_id;
    493 };
    494 
    495 #define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
    496     _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
    497 
    498 /* add a block of memory into the GPU address space */
    499 
    500 /*
    501  * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
    502  * use IOCTL_KGSL_GPUMEM_ALLOC instead
    503  */
    504 
    505 struct kgsl_sharedmem_from_vmalloc {
    506 	unsigned long gpuaddr;	/*output param */
    507 	unsigned int hostptr;
    508 	unsigned int flags;
    509 };
    510 
    511 #define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
    512 	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
    513 
    514 /*
    515  * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
    516  * supports both directions (flush and invalidate). This code will still
    517  * work, but by definition it will do a flush of the cache which might not be
    518  * what you want to have happen on a buffer following a GPU operation.  It is
    519  * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
    520  */
    521 
    522 #define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
    523 	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
    524 
    525 struct kgsl_drawctxt_set_bin_base_offset {
    526 	unsigned int drawctxt_id;
    527 	unsigned int offset;
    528 };
    529 
    530 #define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
    531 	_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
    532 
    533 enum kgsl_cmdwindow_type {
    534 	KGSL_CMDWINDOW_MIN     = 0x00000000,
    535 	KGSL_CMDWINDOW_2D      = 0x00000000,
    536 	KGSL_CMDWINDOW_3D      = 0x00000001, /* legacy */
    537 	KGSL_CMDWINDOW_MMU     = 0x00000002,
    538 	KGSL_CMDWINDOW_ARBITER = 0x000000FF,
    539 	KGSL_CMDWINDOW_MAX     = 0x000000FF,
    540 };
    541 
    542 /* write to the command window */
    543 struct kgsl_cmdwindow_write {
    544 	enum kgsl_cmdwindow_type target;
    545 	unsigned int addr;
    546 	unsigned int data;
    547 };
    548 
    549 #define IOCTL_KGSL_CMDWINDOW_WRITE \
    550 	_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
    551 
    552 struct kgsl_gpumem_alloc {
    553 	unsigned long gpuaddr; /* output param */
    554 	size_t size;
    555 	unsigned int flags;
    556 };
    557 
    558 #define IOCTL_KGSL_GPUMEM_ALLOC \
    559 	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
    560 
    561 struct kgsl_cff_syncmem {
    562 	unsigned long gpuaddr;
    563 	size_t len;
    564 	unsigned int __pad[2]; /* For future binary compatibility */
    565 };
    566 
    567 #define IOCTL_KGSL_CFF_SYNCMEM \
    568 	_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
    569 
    570 /*
    571  * A timestamp event allows the user space to register an action following an
    572  * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
    573  * _IOWR to support fences which need to return a fd for the priv parameter.
    574  */
    575 
    576 struct kgsl_timestamp_event {
    577 	int type;                /* Type of event (see list below) */
    578 	unsigned int timestamp;  /* Timestamp to trigger event on */
    579 	unsigned int context_id; /* Context for the timestamp */
    580 	void __user *priv;	 /* Pointer to the event specific blob */
    581 	size_t len;              /* Size of the event specific blob */
    582 };
    583 
    584 #define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
    585 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
    586 
    587 /* A genlock timestamp event releases an existing lock on timestamp expire */
    588 
    589 #define KGSL_TIMESTAMP_EVENT_GENLOCK 1
    590 
    591 struct kgsl_timestamp_event_genlock {
    592 	int handle; /* Handle of the genlock lock to release */
    593 };
    594 
    595 /* A fence timestamp event releases an existing lock on timestamp expire */
    596 
    597 #define KGSL_TIMESTAMP_EVENT_FENCE 2
    598 
    599 struct kgsl_timestamp_event_fence {
    600 	int fence_fd; /* Fence to signal */
    601 };
    602 
    603 /*
    604  * Set a property within the kernel.  Uses the same structure as
    605  * IOCTL_KGSL_GETPROPERTY
    606  */
    607 
    608 #define IOCTL_KGSL_SETPROPERTY \
    609 	_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
    610 
    611 #define IOCTL_KGSL_TIMESTAMP_EVENT \
    612 	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
    613 
    614 /**
    615  * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
    616  * @id: returned id value for this allocation.
    617  * @flags: mask of KGSL_MEM* values requested and actual flags on return.
    618  * @size: requested size of the allocation and actual size on return.
    619  * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
    620  * @gpuaddr: returned GPU address for the allocation
    621  *
    622  * Allocate memory for access by the GPU. The flags and size fields are echoed
    623  * back by the kernel, so that the caller can know if the request was
    624  * adjusted.
    625  *
    626  * Supported flags:
    627  * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
    628  * KGSL_MEMTYPE*: usage hint for debugging aid
    629  * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
    630  * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
    631  * address will be 0. Calling mmap() will set the GPU address.
    632  */
    633 struct kgsl_gpumem_alloc_id {
    634 	unsigned int id;
    635 	unsigned int flags;
    636 	size_t size;
    637 	size_t mmapsize;
    638 	unsigned long gpuaddr;
    639 /* private: reserved for future use*/
    640 	unsigned long __pad[2];
    641 };
    642 
    643 #define IOCTL_KGSL_GPUMEM_ALLOC_ID \
    644 	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
    645 
    646 /**
    647  * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
    648  * @id: GPU allocation id to free
    649  *
    650  * Free an allocation by id, in case a GPU address has not been assigned or
    651  * is unknown. Freeing an allocation by id with this ioctl or by GPU address
    652  * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
    653  */
    654 struct kgsl_gpumem_free_id {
    655 	unsigned int id;
    656 /* private: reserved for future use*/
    657 	unsigned int __pad;
    658 };
    659 
    660 #define IOCTL_KGSL_GPUMEM_FREE_ID \
    661 	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
    662 
    663 /**
    664  * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
    665  * @gpuaddr: GPU address to query. Also set on return.
    666  * @id: GPU allocation id to query. Also set on return.
    667  * @flags: returned mask of KGSL_MEM* values.
    668  * @size: returned size of the allocation.
    669  * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
    670  * @useraddr: returned address of the userspace mapping for this buffer
    671  *
    672  * This ioctl allows querying of all user visible attributes of an existing
    673  * allocation, by either the GPU address or the id returned by a previous
    674  * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
    675  * return all attributes so this ioctl can be used to look them up if needed.
    676  *
    677  */
    678 struct kgsl_gpumem_get_info {
    679 	unsigned long gpuaddr;
    680 	unsigned int id;
    681 	unsigned int flags;
    682 	size_t size;
    683 	size_t mmapsize;
    684 	unsigned long useraddr;
    685 /* private: reserved for future use*/
    686 	unsigned long __pad[4];
    687 };
    688 
    689 #define IOCTL_KGSL_GPUMEM_GET_INFO\
    690 	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
    691 
    692 /**
    693  * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
    694  * @gpuaddr: GPU address of the buffer to sync.
    695  * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
    696  * @op: a mask of KGSL_GPUMEM_CACHE_* values
    697  * @offset: offset into the buffer
    698  * @length: number of bytes starting from offset to perform
    699  * the cache operation on
    700  *
    701  * Sync the L2 cache for memory headed to and from the GPU - this replaces
    702  * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
    703  * directions
    704  *
    705  */
    706 struct kgsl_gpumem_sync_cache {
    707 	unsigned long gpuaddr;
    708 	unsigned int id;
    709 	unsigned int op;
    710 	size_t offset;
    711 	size_t length;
    712 };
    713 
    714 #define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
    715 #define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
    716 
    717 #define KGSL_GPUMEM_CACHE_INV (1 << 1)
    718 #define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
    719 
    720 #define KGSL_GPUMEM_CACHE_FLUSH \
    721 	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
    722 
    723 /* Flag to ensure backwards compatibility of kgsl_gpumem_sync_cache struct */
    724 #define KGSL_GPUMEM_CACHE_RANGE (1 << 31U)
    725 
    726 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
    727 	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
    728 
    729 /**
    730  * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
    731  * @groupid: Performance counter group ID
    732  * @countable: Countable to select within the group
    733  * @offset: Return offset of the reserved LO counter
    734  * @offset_hi: Return offset of the reserved HI counter
    735  *
    736  * Get an available performance counter from a specified groupid.  The offset
    737  * of the performance counter will be returned after successfully assigning
    738  * the countable to the counter for the specified group.  An error will be
    739  * returned and an offset of 0 if the groupid is invalid or there are no
    740  * more counters left.  After successfully getting a perfcounter, the user
    741  * must call kgsl_perfcounter_put(groupid, contable) when finished with
    742  * the perfcounter to clear up perfcounter resources.
    743  *
    744  */
    745 struct kgsl_perfcounter_get {
    746 	unsigned int groupid;
    747 	unsigned int countable;
    748 	unsigned int offset;
    749 	unsigned int offset_hi;
    750 /* private: reserved for future use */
    751 	unsigned int __pad; /* For future binary compatibility */
    752 };
    753 
    754 #define IOCTL_KGSL_PERFCOUNTER_GET \
    755 	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
    756 
    757 /**
    758  * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
    759  * @groupid: Performance counter group ID
    760  * @countable: Countable to release within the group
    761  *
    762  * Put an allocated performance counter to allow others to have access to the
    763  * resource that was previously taken.  This is only to be called after
    764  * successfully getting a performance counter from kgsl_perfcounter_get().
    765  *
    766  */
    767 struct kgsl_perfcounter_put {
    768 	unsigned int groupid;
    769 	unsigned int countable;
    770 /* private: reserved for future use */
    771 	unsigned int __pad[2]; /* For future binary compatibility */
    772 };
    773 
    774 #define IOCTL_KGSL_PERFCOUNTER_PUT \
    775 	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
    776 
    777 /**
    778  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
    779  * @groupid: Performance counter group ID
    780  * @countable: Return active countables array
    781  * @size: Size of active countables array
    782  * @max_counters: Return total number counters for the group ID
    783  *
    784  * Query the available performance counters given a groupid.  The array
    785  * *countables is used to return the current active countables in counters.
    786  * The size of the array is passed in so the kernel will only write at most
    787  * size or counter->size for the group id.  The total number of available
    788  * counters for the group ID is returned in max_counters.
    789  * If the array or size passed in are invalid, then only the maximum number
    790  * of counters will be returned, no data will be written to *countables.
    791  * If the groupid is invalid an error code will be returned.
    792  *
    793  */
    794 struct kgsl_perfcounter_query {
    795 	unsigned int groupid;
    796 	/* Array to return the current countable for up to size counters */
    797 	unsigned int __user *countables;
    798 	unsigned int count;
    799 	unsigned int max_counters;
    800 /* private: reserved for future use */
    801 	unsigned int __pad[2]; /* For future binary compatibility */
    802 };
    803 
    804 #define IOCTL_KGSL_PERFCOUNTER_QUERY \
    805 	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
    806 
    807 /**
    808  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
    809  * @groupid: Performance counter group IDs
    810  * @countable: Performance counter countable IDs
    811  * @value: Return performance counter reads
    812  * @size: Size of all arrays (groupid/countable pair and return value)
    813  *
    814  * Read in the current value of a performance counter given by the groupid
    815  * and countable.
    816  *
    817  */
    818 
    819 struct kgsl_perfcounter_read_group {
    820 	unsigned int groupid;
    821 	unsigned int countable;
    822 	unsigned long long value;
    823 };
    824 
    825 struct kgsl_perfcounter_read {
    826 	struct kgsl_perfcounter_read_group __user *reads;
    827 	unsigned int count;
    828 /* private: reserved for future use */
    829 	unsigned int __pad[2]; /* For future binary compatibility */
    830 };
    831 
    832 #define IOCTL_KGSL_PERFCOUNTER_READ \
    833 	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
    834 /*
    835  * struct kgsl_gpumem_sync_cache_bulk - argument to
    836  * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
    837  * @id_list: list of GPU buffer ids of the buffers to sync
    838  * @count: number of GPU buffer ids in id_list
    839  * @op: a mask of KGSL_GPUMEM_CACHE_* values
    840  *
    841  * Sync the cache for memory headed to and from the GPU. Certain
    842  * optimizations can be made on the cache operation based on the total
    843  * size of the working set of memory to be managed.
    844  */
    845 struct kgsl_gpumem_sync_cache_bulk {
    846 	unsigned int __user *id_list;
    847 	unsigned int count;
    848 	unsigned int op;
    849 /* private: reserved for future use */
    850 	unsigned int __pad[2]; /* For future binary compatibility */
    851 };
    852 
    853 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
    854 	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
    855 
    856 /*
    857  * struct kgsl_cmd_syncpoint_timestamp
    858  * @context_id: ID of a KGSL context
    859  * @timestamp: GPU timestamp
    860  *
    861  * This structure defines a syncpoint comprising a context/timestamp pair. A
    862  * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
    863  * dependencies that must be met before the command can be submitted to the
    864  * hardware
    865  */
    866 struct kgsl_cmd_syncpoint_timestamp {
    867 	unsigned int context_id;
    868 	unsigned int timestamp;
    869 };
    870 
    871 #define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
    872 
    873 struct kgsl_cmd_syncpoint_fence {
    874 	int fd;
    875 };
    876 
    877 #define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
    878 
    879 /**
    880  * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
    881  * @type: type of sync point defined here
    882  * @priv: Pointer to the type specific buffer
    883  * @size: Size of the type specific buffer
    884  *
    885  * This structure contains pointers defining a specific command sync point.
    886  * The pointer and size should point to a type appropriate structure.
    887  */
    888 struct kgsl_cmd_syncpoint {
    889 	int type;
    890 	void __user *priv;
    891 	size_t size;
    892 };
    893 
    894 /**
    895  * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
    896  * @context_id: KGSL context ID that owns the commands
    897  * @flags:
    898  * @cmdlist: User pointer to a list of kgsl_ibdesc structures
    899  * @numcmds: Number of commands listed in cmdlist
    900  * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
    901  * @numsyncs: Number of sync points listed in synclist
    902  * @timestamp: On entry the a user defined timestamp, on exist the timestamp
    903  * assigned to the command batch
    904  *
    905  * This structure specifies a command to send to the GPU hardware.  This is
    906  * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
    907  * submit IB lists and it adds sync points to block the IB until the
    908  * dependencies are satisified.  This entry point is the new and preferred way
    909  * to submit commands to the GPU.
    910  */
    911 
    912 struct kgsl_submit_commands {
    913 	unsigned int context_id;
    914 	unsigned int flags;
    915 	struct kgsl_ibdesc __user *cmdlist;
    916 	unsigned int numcmds;
    917 	struct kgsl_cmd_syncpoint __user *synclist;
    918 	unsigned int numsyncs;
    919 	unsigned int timestamp;
    920 /* private: reserved for future use */
    921 	unsigned int __pad[4];
    922 };
    923 
    924 #define IOCTL_KGSL_SUBMIT_COMMANDS \
    925 	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
    926 
    927 /**
    928  * struct kgsl_device_constraint - device constraint argument
    929  * @context_id: KGSL context ID
    930  * @type: type of constraint i.e pwrlevel/none
    931  * @data: constraint data
    932  * @size: size of the constraint data
    933  */
    934 struct kgsl_device_constraint {
    935 	unsigned int type;
    936 	unsigned int context_id;
    937 	void __user *data;
    938 	size_t size;
    939 };
    940 
    941 /* Constraint Type*/
    942 #define KGSL_CONSTRAINT_NONE 0
    943 #define KGSL_CONSTRAINT_PWRLEVEL 1
    944 
    945 /* PWRLEVEL constraint level*/
    946 /* set to min frequency */
    947 #define KGSL_CONSTRAINT_PWR_MIN    0
    948 /* set to max frequency */
    949 #define KGSL_CONSTRAINT_PWR_MAX    1
    950 
    951 struct kgsl_device_constraint_pwrlevel {
    952 	unsigned int level;
    953 };
    954 #endif /* _UAPI_MSM_KGSL_H */
    955