1 /* 2 * include/linux/ion.h 3 * 4 * Copyright (C) 2011 Google, Inc. 5 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 #ifndef _LINUX_ION_H 19 #define _LINUX_ION_H 20 21 #include <linux/ioctl.h> 22 #include <linux/types.h> 23 24 struct ion_handle; 25 /** 26 * enum ion_heap_types - list of all possible types of heaps 27 * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc 28 * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc 29 * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved 30 * carveout heap, allocations are physically 31 * contiguous 32 * @ION_HEAP_TYPE_IOMMU: IOMMU memory 33 * @ION_HEAP_TYPE_CP: memory allocated from a prereserved 34 * carveout heap, allocations are physically 35 * contiguous. Used for content protection. 36 * @ION_HEAP_TYPE_DMA: memory allocated via DMA API 37 * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask 38 * is used to identify the heaps, so only 32 39 * total heap types are supported 40 */ 41 enum ion_heap_type { 42 ION_HEAP_TYPE_SYSTEM, 43 ION_HEAP_TYPE_SYSTEM_CONTIG, 44 ION_HEAP_TYPE_CARVEOUT, 45 ION_HEAP_TYPE_DMA, 46 ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always 47 are at the end of this enum */ 48 ION_NUM_HEAPS = 16, 49 }; 50 51 #define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM) 52 #define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG) 53 #define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT) 54 #define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA) 55 56 /** 57 * heap flags - the lower 16 bits are used by core ion, the upper 16 58 * bits are reserved for use by the heaps themselves. 59 */ 60 #define ION_FLAG_CACHED 1 /* mappings of this buffer should be 61 cached, ion will do cache 62 maintenance when the buffer is 63 mapped for dma */ 64 65 #ifdef __KERNEL__ 66 #include <linux/err.h> 67 #include <mach/ion.h> 68 struct ion_device; 69 struct ion_heap; 70 struct ion_mapper; 71 struct ion_client; 72 struct ion_buffer; 73 74 /* This should be removed some day when phys_addr_t's are fully 75 plumbed in the kernel, and all instances of ion_phys_addr_t should 76 be converted to phys_addr_t. For the time being many kernel interfaces 77 do not accept phys_addr_t's that would have to */ 78 #define ion_phys_addr_t unsigned long 79 #define ion_virt_addr_t unsigned long 80 81 /** 82 * struct ion_platform_heap - defines a heap in the given platform 83 * @type: type of the heap from ion_heap_type enum 84 * @id: unique identifier for heap. When allocating (lower numbers 85 * will be allocated from first) 86 * @name: used for debug purposes 87 * @base: base address of heap in physical memory if applicable 88 * @size: size of the heap in bytes if applicable 89 * @memory_type:Memory type used for the heap 90 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise. 91 * @extra_data: Extra data specific to each heap type 92 * @priv: heap private data 93 */ 94 struct ion_platform_heap { 95 enum ion_heap_type type; 96 unsigned int id; 97 const char *name; 98 ion_phys_addr_t base; 99 size_t size; 100 enum ion_memory_types memory_type; 101 unsigned int has_outer_cache; 102 void *extra_data; 103 void *priv; 104 }; 105 106 /** 107 * struct ion_platform_data - array of platform heaps passed from board file 108 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise. 109 * @nr: number of structures in the array 110 * @request_region: function to be called when the number of allocations goes 111 * from 0 -> 1 112 * @release_region: function to be called when the number of allocations goes 113 * from 1 -> 0 114 * @setup_region: function to be called upon ion registration 115 * @heaps: array of platform_heap structions 116 * 117 * Provided by the board file in the form of platform data to a platform device. 118 */ 119 struct ion_platform_data { 120 unsigned int has_outer_cache; 121 int nr; 122 int (*request_region)(void *); 123 int (*release_region)(void *); 124 void *(*setup_region)(void); 125 struct ion_platform_heap *heaps; 126 }; 127 128 #ifdef CONFIG_ION 129 130 /** 131 * ion_reserve() - reserve memory for ion heaps if applicable 132 * @data: platform data specifying starting physical address and 133 * size 134 * 135 * Calls memblock reserve to set aside memory for heaps that are 136 * located at specific memory addresses or of specfic sizes not 137 * managed by the kernel 138 */ 139 void ion_reserve(struct ion_platform_data *data); 140 141 /** 142 * ion_client_create() - allocate a client and returns it 143 * @dev: the global ion device 144 * @heap_mask: mask of heaps this client can allocate from 145 * @name: used for debugging 146 */ 147 struct ion_client *ion_client_create(struct ion_device *dev, 148 unsigned int heap_mask, const char *name); 149 150 /** 151 * msm_ion_client_create - allocate a client using the ion_device specified in 152 * drivers/gpu/ion/msm/msm_ion.c 153 * 154 * heap_mask and name are the same as ion_client_create, return values 155 * are the same as ion_client_create. 156 */ 157 158 struct ion_client *msm_ion_client_create(unsigned int heap_mask, 159 const char *name); 160 161 /** 162 * ion_client_destroy() - free's a client and all it's handles 163 * @client: the client 164 * 165 * Free the provided client and all it's resources including 166 * any handles it is holding. 167 */ 168 void ion_client_destroy(struct ion_client *client); 169 170 /** 171 * ion_alloc - allocate ion memory 172 * @client: the client 173 * @len: size of the allocation 174 * @align: requested allocation alignment, lots of hardware blocks have 175 * alignment requirements of some kind 176 * @heap_mask: mask of heaps to allocate from, if multiple bits are set 177 * heaps will be tried in order from lowest to highest order bit 178 * @flags: heap flags, the low 16 bits are consumed by ion, the high 16 179 * bits are passed on to the respective heap and can be heap 180 * custom 181 * 182 * Allocate memory in one of the heaps provided in heap mask and return 183 * an opaque handle to it. 184 */ 185 struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 186 size_t align, unsigned int heap_mask, 187 unsigned int flags); 188 189 /** 190 * ion_free - free a handle 191 * @client: the client 192 * @handle: the handle to free 193 * 194 * Free the provided handle. 195 */ 196 void ion_free(struct ion_client *client, struct ion_handle *handle); 197 198 /** 199 * ion_phys - returns the physical address and len of a handle 200 * @client: the client 201 * @handle: the handle 202 * @addr: a pointer to put the address in 203 * @len: a pointer to put the length in 204 * 205 * This function queries the heap for a particular handle to get the 206 * handle's physical address. It't output is only correct if 207 * a heap returns physically contiguous memory -- in other cases 208 * this api should not be implemented -- ion_sg_table should be used 209 * instead. Returns -EINVAL if the handle is invalid. This has 210 * no implications on the reference counting of the handle -- 211 * the returned value may not be valid if the caller is not 212 * holding a reference. 213 */ 214 int ion_phys(struct ion_client *client, struct ion_handle *handle, 215 ion_phys_addr_t *addr, size_t *len); 216 217 /** 218 * ion_map_dma - return an sg_table describing a handle 219 * @client: the client 220 * @handle: the handle 221 * 222 * This function returns the sg_table describing 223 * a particular ion handle. 224 */ 225 struct sg_table *ion_sg_table(struct ion_client *client, 226 struct ion_handle *handle); 227 228 /** 229 * ion_map_kernel - create mapping for the given handle 230 * @client: the client 231 * @handle: handle to map 232 * @flags: flags for this mapping 233 * 234 * Map the given handle into the kernel and return a kernel address that 235 * can be used to access this address. If no flags are specified, this 236 * will return a non-secure uncached mapping. 237 */ 238 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); 239 240 /** 241 * ion_unmap_kernel() - destroy a kernel mapping for a handle 242 * @client: the client 243 * @handle: handle to unmap 244 */ 245 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); 246 247 /** 248 * ion_share_dma_buf() - given an ion client, create a dma-buf fd 249 * @client: the client 250 * @handle: the handle 251 */ 252 int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle); 253 254 /** 255 * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle 256 * @client: the client 257 * @fd: the dma-buf fd 258 * 259 * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf, 260 * import that fd and return a handle representing it. If a dma-buf from 261 * another exporter is passed in this function will return ERR_PTR(-EINVAL) 262 */ 263 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd); 264 265 /** 266 * ion_handle_get_flags - get the flags for a given handle 267 * 268 * @client - client who allocated the handle 269 * @handle - handle to get the flags 270 * @flags - pointer to store the flags 271 * 272 * Gets the current flags for a handle. These flags indicate various options 273 * of the buffer (caching, security, etc.) 274 */ 275 int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle, 276 unsigned long *flags); 277 278 279 /** 280 * ion_map_iommu - map the given handle into an iommu 281 * 282 * @client - client who allocated the handle 283 * @handle - handle to map 284 * @domain_num - domain number to map to 285 * @partition_num - partition number to allocate iova from 286 * @align - alignment for the iova 287 * @iova_length - length of iova to map. If the iova length is 288 * greater than the handle length, the remaining 289 * address space will be mapped to a dummy buffer. 290 * @iova - pointer to store the iova address 291 * @buffer_size - pointer to store the size of the buffer 292 * @flags - flags for options to map 293 * @iommu_flags - flags specific to the iommu. 294 * 295 * Maps the handle into the iova space specified via domain number. Iova 296 * will be allocated from the partition specified via partition_num. 297 * Returns 0 on success, negative value on error. 298 */ 299 int ion_map_iommu(struct ion_client *client, struct ion_handle *handle, 300 int domain_num, int partition_num, unsigned long align, 301 unsigned long iova_length, unsigned long *iova, 302 unsigned long *buffer_size, 303 unsigned long flags, unsigned long iommu_flags); 304 305 306 /** 307 * ion_handle_get_size - get the allocated size of a given handle 308 * 309 * @client - client who allocated the handle 310 * @handle - handle to get the size 311 * @size - pointer to store the size 312 * 313 * gives the allocated size of a handle. returns 0 on success, negative 314 * value on error 315 * 316 * NOTE: This is intended to be used only to get a size to pass to map_iommu. 317 * You should *NOT* rely on this for any other usage. 318 */ 319 320 int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle, 321 unsigned long *size); 322 323 /** 324 * ion_unmap_iommu - unmap the handle from an iommu 325 * 326 * @client - client who allocated the handle 327 * @handle - handle to unmap 328 * @domain_num - domain to unmap from 329 * @partition_num - partition to unmap from 330 * 331 * Decrement the reference count on the iommu mapping. If the count is 332 * 0, the mapping will be removed from the iommu. 333 */ 334 void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle, 335 int domain_num, int partition_num); 336 337 338 /** 339 * ion_secure_heap - secure a heap 340 * 341 * @client - a client that has allocated from the heap heap_id 342 * @heap_id - heap id to secure. 343 * @version - version of content protection 344 * @data - extra data needed for protection 345 * 346 * Secure a heap 347 * Returns 0 on success 348 */ 349 int ion_secure_heap(struct ion_device *dev, int heap_id, int version, 350 void *data); 351 352 /** 353 * ion_unsecure_heap - un-secure a heap 354 * 355 * @client - a client that has allocated from the heap heap_id 356 * @heap_id - heap id to un-secure. 357 * @version - version of content protection 358 * @data - extra data needed for protection 359 * 360 * Un-secure a heap 361 * Returns 0 on success 362 */ 363 int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version, 364 void *data); 365 366 /** 367 * msm_ion_do_cache_op - do cache operations. 368 * 369 * @client - pointer to ION client. 370 * @handle - pointer to buffer handle. 371 * @vaddr - virtual address to operate on. 372 * @len - Length of data to do cache operation on. 373 * @cmd - Cache operation to perform: 374 * ION_IOC_CLEAN_CACHES 375 * ION_IOC_INV_CACHES 376 * ION_IOC_CLEAN_INV_CACHES 377 * 378 * Returns 0 on success 379 */ 380 int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle, 381 void *vaddr, unsigned long len, unsigned int cmd); 382 383 #else 384 static inline void ion_reserve(struct ion_platform_data *data) 385 { 386 387 } 388 389 static inline struct ion_client *ion_client_create(struct ion_device *dev, 390 unsigned int heap_mask, const char *name) 391 { 392 return ERR_PTR(-ENODEV); 393 } 394 395 static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask, 396 const char *name) 397 { 398 return ERR_PTR(-ENODEV); 399 } 400 401 static inline void ion_client_destroy(struct ion_client *client) { } 402 403 static inline struct ion_handle *ion_alloc(struct ion_client *client, 404 size_t len, size_t align, 405 unsigned int heap_mask, 406 unsigned int flags) 407 { 408 return ERR_PTR(-ENODEV); 409 } 410 411 static inline void ion_free(struct ion_client *client, 412 struct ion_handle *handle) { } 413 414 415 static inline int ion_phys(struct ion_client *client, 416 struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len) 417 { 418 return -ENODEV; 419 } 420 421 static inline struct sg_table *ion_sg_table(struct ion_client *client, 422 struct ion_handle *handle) 423 { 424 return ERR_PTR(-ENODEV); 425 } 426 427 static inline void *ion_map_kernel(struct ion_client *client, 428 struct ion_handle *handle, unsigned long flags) 429 { 430 return ERR_PTR(-ENODEV); 431 } 432 433 static inline void ion_unmap_kernel(struct ion_client *client, 434 struct ion_handle *handle) { } 435 436 static inline int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle) 437 { 438 return -ENODEV; 439 } 440 441 static inline struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) 442 { 443 return ERR_PTR(-ENODEV); 444 } 445 446 static inline int ion_handle_get_flags(struct ion_client *client, 447 struct ion_handle *handle, unsigned long *flags) 448 { 449 return -ENODEV; 450 } 451 452 static inline int ion_map_iommu(struct ion_client *client, 453 struct ion_handle *handle, int domain_num, 454 int partition_num, unsigned long align, 455 unsigned long iova_length, unsigned long *iova, 456 unsigned long *buffer_size, 457 unsigned long flags, 458 unsigned long iommu_flags) 459 { 460 return -ENODEV; 461 } 462 463 static inline void ion_unmap_iommu(struct ion_client *client, 464 struct ion_handle *handle, int domain_num, 465 int partition_num) 466 { 467 return; 468 } 469 470 static inline int ion_secure_heap(struct ion_device *dev, int heap_id, 471 int version, void *data) 472 { 473 return -ENODEV; 474 475 } 476 477 static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id, 478 int version, void *data) 479 { 480 return -ENODEV; 481 } 482 483 static inline int msm_ion_do_cache_op(struct ion_client *client, 484 struct ion_handle *handle, void *vaddr, 485 unsigned long len, unsigned int cmd) 486 { 487 return -ENODEV; 488 } 489 490 #endif /* CONFIG_ION */ 491 #endif /* __KERNEL__ */ 492 493 /** 494 * DOC: Ion Userspace API 495 * 496 * create a client by opening /dev/ion 497 * most operations handled via following ioctls 498 * 499 */ 500 501 /** 502 * struct ion_allocation_data - metadata passed from userspace for allocations 503 * @len: size of the allocation 504 * @align: required alignment of the allocation 505 * @heap_mask: mask of heaps to allocate from 506 * @flags: flags passed to heap 507 * @handle: pointer that will be populated with a cookie to use to refer 508 * to this allocation 509 * 510 * Provided by userspace as an argument to the ioctl 511 */ 512 struct ion_allocation_data { 513 size_t len; 514 size_t align; 515 unsigned int heap_mask; 516 unsigned int flags; 517 struct ion_handle *handle; 518 }; 519 520 /** 521 * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair 522 * @handle: a handle 523 * @fd: a file descriptor representing that handle 524 * 525 * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with 526 * the handle returned from ion alloc, and the kernel returns the file 527 * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace 528 * provides the file descriptor and the kernel returns the handle. 529 */ 530 struct ion_fd_data { 531 struct ion_handle *handle; 532 int fd; 533 }; 534 535 /** 536 * struct ion_handle_data - a handle passed to/from the kernel 537 * @handle: a handle 538 */ 539 struct ion_handle_data { 540 struct ion_handle *handle; 541 }; 542 543 /** 544 * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl 545 * @cmd: the custom ioctl function to call 546 * @arg: additional data to pass to the custom ioctl, typically a user 547 * pointer to a predefined structure 548 * 549 * This works just like the regular cmd and arg fields of an ioctl. 550 */ 551 struct ion_custom_data { 552 unsigned int cmd; 553 unsigned long arg; 554 }; 555 #define ION_IOC_MAGIC 'I' 556 557 /** 558 * DOC: ION_IOC_ALLOC - allocate memory 559 * 560 * Takes an ion_allocation_data struct and returns it with the handle field 561 * populated with the opaque handle for the allocation. 562 */ 563 #define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ 564 struct ion_allocation_data) 565 566 /** 567 * DOC: ION_IOC_FREE - free memory 568 * 569 * Takes an ion_handle_data struct and frees the handle. 570 */ 571 #define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) 572 573 /** 574 * DOC: ION_IOC_MAP - get a file descriptor to mmap 575 * 576 * Takes an ion_fd_data struct with the handle field populated with a valid 577 * opaque handle. Returns the struct with the fd field set to a file 578 * descriptor open in the current address space. This file descriptor 579 * can then be used as an argument to mmap. 580 */ 581 #define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) 582 583 /** 584 * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation 585 * 586 * Takes an ion_fd_data struct with the handle field populated with a valid 587 * opaque handle. Returns the struct with the fd field set to a file 588 * descriptor open in the current address space. This file descriptor 589 * can then be passed to another process. The corresponding opaque handle can 590 * be retrieved via ION_IOC_IMPORT. 591 */ 592 #define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) 593 594 /** 595 * DOC: ION_IOC_IMPORT - imports a shared file descriptor 596 * 597 * Takes an ion_fd_data struct with the fd field populated with a valid file 598 * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle 599 * filed set to the corresponding opaque handle. 600 */ 601 #define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) 602 603 /** 604 * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl 605 * 606 * Takes the argument of the architecture specific ioctl to call and 607 * passes appropriate userdata for that ioctl 608 */ 609 #define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) 610 611 /** 612 * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory 613 * 614 * Deprecated in favor of using the dma_buf api's correctly (syncing 615 * will happend automatically when the buffer is mapped to a device). 616 * If necessary should be used after touching a cached buffer from the cpu, 617 * this will make the buffer in memory coherent. 618 */ 619 #define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data) 620 #endif /* _LINUX_ION_H */ 621