1 /*
2  * Copyright (c) 2020 LK Trusty Authors. All Rights Reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <err.h>
25 #include <inttypes.h>
26 #include <kernel/vm.h>
27 #include <lib/extmem/extmem.h>
28 #include <sys/types.h>
29 #include <trace.h>
30 
31 #define LOCAL_TRACE 0
32 
ext_mem_obj_from_vmm_obj(struct vmm_obj * vmm_obj)33 static struct ext_mem_obj* ext_mem_obj_from_vmm_obj(struct vmm_obj* vmm_obj) {
34     return containerof(vmm_obj, struct ext_mem_obj, vmm_obj);
35 }
36 
ext_mem_obj_from_bst_node(struct bst_node * node)37 static struct ext_mem_obj* ext_mem_obj_from_bst_node(struct bst_node* node) {
38     return containerof(node, struct ext_mem_obj, node);
39 }
40 
ext_mem_obj_cmp(struct bst_node * a_bst,struct bst_node * b_bst)41 static int ext_mem_obj_cmp(struct bst_node* a_bst, struct bst_node* b_bst) {
42     struct ext_mem_obj* a = ext_mem_obj_from_bst_node(a_bst);
43     struct ext_mem_obj* b = ext_mem_obj_from_bst_node(b_bst);
44 
45     return a->id < b->id ? 1 : a->id > b->id ? -1 : 0;
46 }
47 
ext_mem_obj_initialize(struct ext_mem_obj * obj,struct obj_ref * ref,ext_mem_obj_id_t id,uint64_t tag,struct vmm_obj_ops * ops,uint arch_mmu_flags,size_t page_run_count)48 void ext_mem_obj_initialize(struct ext_mem_obj* obj,
49                             struct obj_ref* ref,
50                             ext_mem_obj_id_t id,
51                             uint64_t tag,
52                             struct vmm_obj_ops* ops,
53                             uint arch_mmu_flags,
54                             size_t page_run_count) {
55     obj->id = id;
56     obj->tag = tag;
57     obj->match_tag = 0;
58     obj->vmm_obj.ops = ops;
59     obj->arch_mmu_flags = arch_mmu_flags;
60     obj->page_run_count = page_run_count;
61     obj_init(&obj->vmm_obj.obj, ref);
62     bst_node_initialize(&obj->node);
63 }
64 
ext_mem_insert(struct bst_root * objs,struct ext_mem_obj * obj)65 bool ext_mem_insert(struct bst_root* objs, struct ext_mem_obj* obj) {
66     return bst_insert(objs, &obj->node, ext_mem_obj_cmp);
67 }
68 
ext_mem_lookup(struct bst_root * objs,ext_mem_obj_id_t id)69 struct ext_mem_obj* ext_mem_lookup(struct bst_root* objs, ext_mem_obj_id_t id) {
70     struct ext_mem_obj ref_obj;
71     ref_obj.id = id;
72     return bst_search_type(objs, &ref_obj, ext_mem_obj_cmp, struct ext_mem_obj,
73                            node);
74 }
75 
ext_mem_obj_set_match_tag(struct vmm_obj * obj,uint64_t match_tag)76 void ext_mem_obj_set_match_tag(struct vmm_obj* obj, uint64_t match_tag) {
77     struct ext_mem_obj* ext_obj = ext_mem_obj_from_vmm_obj(obj);
78 
79     ext_obj->match_tag = match_tag;
80 }
81 
ext_mem_obj_check_flags(struct vmm_obj * obj,uint * arch_mmu_flags)82 int ext_mem_obj_check_flags(struct vmm_obj* obj, uint* arch_mmu_flags) {
83     struct ext_mem_obj* ext_obj = ext_mem_obj_from_vmm_obj(obj);
84 
85     LTRACEF("obj 0x%" PRIx64 ", obj arch_mmu_flags 0x%x, arch_mmu_flags 0x%x\n",
86             ext_obj->id, ext_obj->arch_mmu_flags, *arch_mmu_flags);
87 
88     if (ext_obj->match_tag != ext_obj->tag) {
89         TRACEF("WARNING: tag mismatch: 0x%" PRIx64 " != 0x%" PRIx64 "\n",
90                ext_obj->match_tag, ext_obj->tag);
91         return ERR_ACCESS_DENIED;
92     }
93 
94     if (!(*arch_mmu_flags & ARCH_MMU_FLAG_PERM_RO) &&
95         (ext_obj->arch_mmu_flags & ARCH_MMU_FLAG_PERM_RO)) {
96         TRACEF("rw access denied. arch_mmu_flags=0x%x, ext_obj->flags=0x%x\n",
97                *arch_mmu_flags, ext_obj->arch_mmu_flags);
98         return ERR_ACCESS_DENIED;
99     }
100 
101     if (!(*arch_mmu_flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE) &&
102         (ext_obj->arch_mmu_flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE)) {
103         TRACEF("exec access denied. arch_mmu_flags=0x%x, ext_obj->flags=0x%x\n",
104                *arch_mmu_flags, ext_obj->arch_mmu_flags);
105         return ERR_ACCESS_DENIED;
106     }
107 
108     /*
109      * Memory types must be consistent with external mappings, so don't allow
110      * the caller to specify them.
111      */
112     if (*arch_mmu_flags & ARCH_MMU_FLAG_CACHE_MASK) {
113         TRACEF("cache attributes should come from vmm_obj, not from caller\n");
114         return ERR_INVALID_ARGS;
115     }
116 
117     if (*arch_mmu_flags & ARCH_MMU_FLAG_NS) {
118         TRACEF("ARCH_MMU_FLAG_NS should come from vmm_obj, not from caller\n");
119         return ERR_INVALID_ARGS;
120     }
121 
122     *arch_mmu_flags |= ext_obj->arch_mmu_flags;
123 
124     return 0;
125 }
126 
ext_mem_obj_get_page(struct vmm_obj * obj,size_t offset,paddr_t * paddr,size_t * paddr_size)127 int ext_mem_obj_get_page(struct vmm_obj* obj,
128                          size_t offset,
129                          paddr_t* paddr,
130                          size_t* paddr_size) {
131     struct ext_mem_obj* ext_obj = ext_mem_obj_from_vmm_obj(obj);
132     size_t index;
133     size_t page_offset;
134 
135     LTRACEF("offset %zd page_run_count %zd\n", offset, ext_obj->page_run_count);
136 
137     page_offset = offset;
138     index = 0;
139     while (index < ext_obj->page_run_count &&
140            ext_obj->page_runs[index].size <= page_offset) {
141         page_offset -= ext_obj->page_runs[index].size;
142         index++;
143     }
144 
145     if (index >= ext_obj->page_run_count) {
146         TRACEF("offset %zd out of range index %zd >= %zd\n", offset, index,
147                ext_obj->page_run_count);
148         return ERR_OUT_OF_RANGE;
149     }
150 
151     *paddr = ext_obj->page_runs[index].paddr + page_offset;
152     *paddr_size = ext_obj->page_runs[index].size - page_offset;
153     LTRACEF("offset %zd, index %zd/%zd -> paddr 0x%" PRIxPADDR ", size %zu\n",
154             offset, index, ext_obj->page_run_count, *paddr, *paddr_size);
155 
156     return 0;
157 }
158 
ext_mem_map_obj_id(vmm_aspace_t * aspace,const char * name,ext_mem_client_id_t client_id,ext_mem_obj_id_t mem_obj_id,uint64_t tag,size_t offset,size_t size,void ** ptr,uint8_t align_log2,uint vmm_flags,uint arch_mmu_flags)159 status_t ext_mem_map_obj_id(vmm_aspace_t* aspace,
160                             const char* name,
161                             ext_mem_client_id_t client_id,
162                             ext_mem_obj_id_t mem_obj_id,
163                             uint64_t tag,
164                             size_t offset,
165                             size_t size,
166                             void** ptr,
167                             uint8_t align_log2,
168                             uint vmm_flags,
169                             uint arch_mmu_flags) {
170     status_t err;
171     struct vmm_obj* vmm_obj = NULL;
172     struct obj_ref vmm_obj_ref = OBJ_REF_INITIAL_VALUE(vmm_obj_ref);
173 
174     DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
175 
176     err = ext_mem_get_vmm_obj(client_id, mem_obj_id, tag, size + offset,
177                               &vmm_obj, &vmm_obj_ref);
178     if (err) {
179         TRACEF("failed to get object, 0x%" PRIx64 ":0x%" PRIx64
180                ", to map for %s\n",
181                client_id, mem_obj_id, name);
182         return err;
183     }
184 
185     /* If tag is not 0, match_tag must be set before the object can be mapped */
186     ext_mem_obj_set_match_tag(vmm_obj, tag);
187 
188     err = vmm_alloc_obj(aspace, name, vmm_obj, offset, size, ptr, align_log2,
189                         vmm_flags, arch_mmu_flags);
190     vmm_obj_del_ref(vmm_obj, &vmm_obj_ref);
191     if (err) {
192         TRACEF("failed to map object, 0x%" PRIx64 ":0x%" PRIx64 ", for %s\n",
193                client_id, mem_obj_id, name);
194         return err;
195     }
196     LTRACEF("mapped 0x%" PRIx64 ":0x%" PRIx64 " at %p\n", client_id, mem_obj_id,
197             *ptr);
198     return err;
199 }
200