1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * Framework for buffer objects that can be shared across devices/subsystems. 4 * 5 * Copyright(C) 2015 Intel Ltd 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published by 9 * the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef _DMA_BUF_UAPI_H_ 21 #define _DMA_BUF_UAPI_H_ 22 23 #if defined(__linux__) 24 25 #include <linux/types.h> 26 27 #else /* One of the BSDs */ 28 29 #include <stdint.h> 30 #include <sys/types.h> 31 32 typedef int8_t __s8; 33 typedef uint8_t __u8; 34 typedef int16_t __s16; 35 typedef uint16_t __u16; 36 typedef int32_t __s32; 37 typedef uint32_t __u32; 38 typedef int64_t __s64; 39 typedef uint64_t __u64; 40 41 #endif 42 43 /** 44 * struct dma_buf_sync - Synchronize with CPU access. 45 * 46 * When a DMA buffer is accessed from the CPU via mmap, it is not always 47 * possible to guarantee coherency between the CPU-visible map and underlying 48 * memory. To manage coherency, DMA_BUF_IOCTL_SYNC must be used to bracket 49 * any CPU access to give the kernel the chance to shuffle memory around if 50 * needed. 51 * 52 * Prior to accessing the map, the client must call DMA_BUF_IOCTL_SYNC 53 * with DMA_BUF_SYNC_START and the appropriate read/write flags. Once the 54 * access is complete, the client should call DMA_BUF_IOCTL_SYNC with 55 * DMA_BUF_SYNC_END and the same read/write flags. 56 * 57 * The synchronization provided via DMA_BUF_IOCTL_SYNC only provides cache 58 * coherency. It does not prevent other processes or devices from 59 * accessing the memory at the same time. If synchronization with a GPU or 60 * other device driver is required, it is the client's responsibility to 61 * wait for buffer to be ready for reading or writing before calling this 62 * ioctl with DMA_BUF_SYNC_START. Likewise, the client must ensure that 63 * follow-up work is not submitted to GPU or other device driver until 64 * after this ioctl has been called with DMA_BUF_SYNC_END? 65 * 66 * If the driver or API with which the client is interacting uses implicit 67 * synchronization, waiting for prior work to complete can be done via 68 * poll() on the DMA buffer file descriptor. If the driver or API requires 69 * explicit synchronization, the client may have to wait on a sync_file or 70 * other synchronization primitive outside the scope of the DMA buffer API. 71 */ 72 struct dma_buf_sync { 73 /** 74 * @flags: Set of access flags 75 * 76 * DMA_BUF_SYNC_START: 77 * Indicates the start of a map access session. 78 * 79 * DMA_BUF_SYNC_END: 80 * Indicates the end of a map access session. 81 * 82 * DMA_BUF_SYNC_READ: 83 * Indicates that the mapped DMA buffer will be read by the 84 * client via the CPU map. 85 * 86 * DMA_BUF_SYNC_WRITE: 87 * Indicates that the mapped DMA buffer will be written by the 88 * client via the CPU map. 89 * 90 * DMA_BUF_SYNC_RW: 91 * An alias for DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE. 92 */ 93 __u64 flags; 94 }; 95 96 #define DMA_BUF_SYNC_READ (1 << 0) 97 #define DMA_BUF_SYNC_WRITE (2 << 0) 98 #define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE) 99 #define DMA_BUF_SYNC_START (0 << 2) 100 #define DMA_BUF_SYNC_END (1 << 2) 101 #define DMA_BUF_SYNC_VALID_FLAGS_MASK \ 102 (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END) 103 104 #define DMA_BUF_NAME_LEN 32 105 106 /** 107 * struct dma_buf_export_sync_file - Get a sync_file from a dma-buf 108 * 109 * Userspace can perform a DMA_BUF_IOCTL_EXPORT_SYNC_FILE to retrieve the 110 * current set of fences on a dma-buf file descriptor as a sync_file. CPU 111 * waits via poll() or other driver-specific mechanisms typically wait on 112 * whatever fences are on the dma-buf at the time the wait begins. This 113 * is similar except that it takes a snapshot of the current fences on the 114 * dma-buf for waiting later instead of waiting immediately. This is 115 * useful for modern graphics APIs such as Vulkan which assume an explicit 116 * synchronization model but still need to inter-operate with dma-buf. 117 * 118 * The intended usage pattern is the following: 119 * 120 * 1. Export a sync_file with flags corresponding to the expected GPU usage 121 * via DMA_BUF_IOCTL_EXPORT_SYNC_FILE. 122 * 123 * 2. Submit rendering work which uses the dma-buf. The work should wait on 124 * the exported sync file before rendering and produce another sync_file 125 * when complete. 126 * 127 * 3. Import the rendering-complete sync_file into the dma-buf with flags 128 * corresponding to the GPU usage via DMA_BUF_IOCTL_IMPORT_SYNC_FILE. 129 * 130 * Unlike doing implicit synchronization via a GPU kernel driver's exec ioctl, 131 * the above is not a single atomic operation. If userspace wants to ensure 132 * ordering via these fences, it is the respnosibility of userspace to use 133 * locks or other mechanisms to ensure that no other context adds fences or 134 * submits work between steps 1 and 3 above. 135 */ 136 struct dma_buf_export_sync_file { 137 /** 138 * @flags: Read/write flags 139 * 140 * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. 141 * 142 * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, 143 * the returned sync file waits on any writers of the dma-buf to 144 * complete. Waiting on the returned sync file is equivalent to 145 * poll() with POLLIN. 146 * 147 * If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on 148 * any users of the dma-buf (read or write) to complete. Waiting 149 * on the returned sync file is equivalent to poll() with POLLOUT. 150 * If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this 151 * is equivalent to just DMA_BUF_SYNC_WRITE. 152 */ 153 __u32 flags; 154 /** @fd: Returned sync file descriptor */ 155 __s32 fd; 156 }; 157 158 /** 159 * struct dma_buf_import_sync_file - Insert a sync_file into a dma-buf 160 * 161 * Userspace can perform a DMA_BUF_IOCTL_IMPORT_SYNC_FILE to insert a 162 * sync_file into a dma-buf for the purposes of implicit synchronization 163 * with other dma-buf consumers. This allows clients using explicitly 164 * synchronized APIs such as Vulkan to inter-op with dma-buf consumers 165 * which expect implicit synchronization such as OpenGL or most media 166 * drivers/video. 167 */ 168 struct dma_buf_import_sync_file { 169 /** 170 * @flags: Read/write flags 171 * 172 * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. 173 * 174 * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, 175 * this inserts the sync_file as a read-only fence. Any subsequent 176 * implicitly synchronized writes to this dma-buf will wait on this 177 * fence but reads will not. 178 * 179 * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a 180 * write fence. All subsequent implicitly synchronized access to 181 * this dma-buf will wait on this fence. 182 */ 183 __u32 flags; 184 /** @fd: Sync file descriptor */ 185 __s32 fd; 186 }; 187 188 #define DMA_BUF_BASE 'b' 189 #define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) 190 191 /* 32/64bitness of this uapi was botched in android, there's no difference 192 * between them in actual uapi, they're just different numbers. 193 */ 194 #define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *) 195 #define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32) 196 #define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64) 197 #define DMA_BUF_IOCTL_EXPORT_SYNC_FILE _IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file) 198 #define DMA_BUF_IOCTL_IMPORT_SYNC_FILE _IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file) 199 200 #endif 201