1 /*
2  * Copyright (c) 2019-2020 LK Trusty Authors. All Rights Reserved.
3  * Copyright (c) 2022, Arm Limited. All rights reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files
7  * (the "Software"), to deal in the Software without restriction,
8  * including without limitation the rights to use, copy, modify, merge,
9  * publish, distribute, sublicense, and/or sell copies of the Software,
10  * and to permit persons to whom the Software is furnished to do so,
11  * subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be
14  * included in all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #define LOCAL_TRACE 0
26 
27 #include <assert.h>
28 #include <err.h>
29 #include <interface/arm_ffa/arm_ffa.h>
30 #include <inttypes.h>
31 #include <kernel/mutex.h>
32 #include <kernel/vm.h>
33 #include <lib/arm_ffa/arm_ffa.h>
34 #include <lib/smc/smc.h>
35 #include <lk/init.h>
36 #include <lk/macros.h>
37 #include <string.h>
38 #include <sys/types.h>
39 #include <trace.h>
40 
41 static bool arm_ffa_init_is_success = false;
42 static uint16_t ffa_local_id;
43 static size_t ffa_buf_size;
44 static void* ffa_tx;
45 static void* ffa_rx;
46 static bool supports_ns_bit = false;
47 static bool supports_rx_release = false;
48 
49 static mutex_t ffa_rxtx_buffer_lock = MUTEX_INITIAL_VALUE(ffa_rxtx_buffer_lock);
50 
arm_ffa_is_init(void)51 bool arm_ffa_is_init(void) {
52     return arm_ffa_init_is_success;
53 }
54 
arm_ffa_call_id_get(uint16_t * id)55 static status_t arm_ffa_call_id_get(uint16_t* id) {
56     struct smc_ret8 smc_ret;
57 
58     smc_ret = smc8(SMC_FC_FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
59 
60     switch (smc_ret.r0) {
61     case SMC_FC_FFA_SUCCESS:
62     case SMC_FC64_FFA_SUCCESS:
63         if (smc_ret.r2 & ~0xFFFFUL) {
64             TRACEF("Unexpected FFA_ID_GET result: %lx\n", smc_ret.r2);
65             return ERR_NOT_VALID;
66         }
67         *id = (uint16_t)(smc_ret.r2 & 0xFFFF);
68         return NO_ERROR;
69 
70     case SMC_FC_FFA_ERROR:
71         if (smc_ret.r2 == (ulong)FFA_ERROR_NOT_SUPPORTED) {
72             return ERR_NOT_SUPPORTED;
73         } else {
74             TRACEF("Unexpected FFA_ERROR: %lx\n", smc_ret.r2);
75             return ERR_NOT_VALID;
76         }
77 
78     default:
79         TRACEF("Unexpected FFA SMC: %lx\n", smc_ret.r0);
80         return ERR_NOT_VALID;
81     }
82 }
83 
arm_ffa_call_version(uint16_t major,uint16_t minor,uint16_t * major_ret,uint16_t * minor_ret)84 static status_t arm_ffa_call_version(uint16_t major,
85                                      uint16_t minor,
86                                      uint16_t* major_ret,
87                                      uint16_t* minor_ret) {
88     struct smc_ret8 smc_ret;
89 
90     uint32_t version = FFA_VERSION(major, minor);
91     /* Bit 31 must be cleared. */
92     ASSERT(!(version >> 31));
93     smc_ret = smc8(SMC_FC_FFA_VERSION, version, 0, 0, 0, 0, 0, 0);
94     if (smc_ret.r0 == (ulong)FFA_ERROR_NOT_SUPPORTED) {
95         return ERR_NOT_SUPPORTED;
96     }
97     *major_ret = FFA_VERSION_TO_MAJOR(smc_ret.r0);
98     *minor_ret = FFA_VERSION_TO_MINOR(smc_ret.r0);
99 
100     return NO_ERROR;
101 }
102 
103 /* TODO: When adding support for FFA version 1.1 feature ids should be added. */
arm_ffa_call_features(ulong id,bool * is_implemented,ffa_features2_t * features2,ffa_features3_t * features3)104 static status_t arm_ffa_call_features(ulong id,
105                                       bool* is_implemented,
106                                       ffa_features2_t* features2,
107                                       ffa_features3_t* features3) {
108     struct smc_ret8 smc_ret;
109 
110     ASSERT(is_implemented);
111 
112     /*
113      * According to the FF-A spec section "Discovery of NS bit usage",
114      * NS_BIT is optionally set by a v1.0 SP such as Trusty, and must
115      * be set by a v1.1+ SP. Here, we set it unconditionally for the
116      * relevant feature.
117      */
118     bool request_ns_bit = (id == SMC_FC_FFA_MEM_RETRIEVE_REQ) ||
119                           (id == SMC_FC64_FFA_MEM_RETRIEVE_REQ);
120     smc_ret = smc8(SMC_FC_FFA_FEATURES, id,
121                    request_ns_bit ? FFA_FEATURES2_MEM_RETRIEVE_REQ_NS_BIT : 0,
122                    0, 0, 0, 0, 0);
123 
124     switch (smc_ret.r0) {
125     case SMC_FC_FFA_SUCCESS:
126     case SMC_FC64_FFA_SUCCESS:
127         *is_implemented = true;
128         if (features2) {
129             *features2 = (ffa_features2_t)smc_ret.r2;
130         }
131         if (features3) {
132             *features3 = (ffa_features3_t)smc_ret.r3;
133         }
134         return NO_ERROR;
135 
136     case SMC_FC_FFA_ERROR:
137         if (smc_ret.r2 == (ulong)FFA_ERROR_NOT_SUPPORTED) {
138             *is_implemented = false;
139             return NO_ERROR;
140         } else {
141             TRACEF("Unexpected FFA_ERROR: %lx\n", smc_ret.r2);
142             return ERR_NOT_VALID;
143         }
144 
145     default:
146         TRACEF("Unexpected FFA SMC: %lx\n", smc_ret.r0);
147         return ERR_NOT_VALID;
148     }
149 }
150 
151 /*
152  * Call with ffa_rxtx_buffer_lock acquired and the ffa_tx buffer already
153  * populated with struct ffa_mtd. Transmit in a single fragment.
154  */
arm_ffa_call_mem_retrieve_req(uint32_t * total_len,uint32_t * fragment_len)155 static status_t arm_ffa_call_mem_retrieve_req(uint32_t* total_len,
156                                               uint32_t* fragment_len) {
157     struct smc_ret8 smc_ret;
158     struct ffa_mtd* req = ffa_tx;
159     size_t len;
160 
161     DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
162 
163     len = offsetof(struct ffa_mtd, emad[0]) +
164           req->emad_count * sizeof(struct ffa_emad);
165 
166     smc_ret = smc8(SMC_FC_FFA_MEM_RETRIEVE_REQ, len, len, 0, 0, 0, 0, 0);
167 
168     long error;
169     switch (smc_ret.r0) {
170     case SMC_FC_FFA_MEM_RETRIEVE_RESP:
171         if (total_len) {
172             *total_len = (uint32_t)smc_ret.r1;
173         }
174         if (fragment_len) {
175             *fragment_len = (uint32_t)smc_ret.r2;
176         }
177         return NO_ERROR;
178     case SMC_FC_FFA_ERROR:
179         error = (long)smc_ret.r2;
180         switch (error) {
181         case FFA_ERROR_NOT_SUPPORTED:
182             return ERR_NOT_SUPPORTED;
183         case FFA_ERROR_INVALID_PARAMETERS:
184             return ERR_INVALID_ARGS;
185         case FFA_ERROR_NO_MEMORY:
186             return ERR_NO_MEMORY;
187         case FFA_ERROR_DENIED:
188             return ERR_BAD_STATE;
189         case FFA_ERROR_ABORTED:
190             return ERR_CANCELLED;
191         default:
192             TRACEF("Unknown error: 0x%lx\n", error);
193             return ERR_NOT_VALID;
194         }
195     default:
196         return ERR_NOT_VALID;
197     }
198 }
199 
arm_ffa_call_mem_frag_rx(uint64_t handle,uint32_t offset,uint32_t * fragment_len)200 static status_t arm_ffa_call_mem_frag_rx(uint64_t handle,
201                                          uint32_t offset,
202                                          uint32_t* fragment_len) {
203     struct smc_ret8 smc_ret;
204 
205     DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
206 
207     smc_ret = smc8(SMC_FC_FFA_MEM_FRAG_RX, (uint32_t)handle, handle >> 32,
208                    offset, 0, 0, 0, 0);
209 
210     /* FRAG_RX is followed by FRAG_TX on successful completion. */
211     switch (smc_ret.r0) {
212     case SMC_FC_FFA_MEM_FRAG_TX: {
213         uint64_t handle_out = smc_ret.r1 + ((uint64_t)smc_ret.r2 << 32);
214         if (handle != handle_out) {
215             TRACEF("Handle for response doesn't match the request, %" PRId64
216                    " != %" PRId64,
217                    handle, handle_out);
218             return ERR_NOT_VALID;
219         }
220         *fragment_len = smc_ret.r3;
221         return NO_ERROR;
222     }
223     case SMC_FC_FFA_ERROR:
224         switch ((int)smc_ret.r2) {
225         case FFA_ERROR_NOT_SUPPORTED:
226             return ERR_NOT_SUPPORTED;
227         case FFA_ERROR_INVALID_PARAMETERS:
228             return ERR_INVALID_ARGS;
229         case FFA_ERROR_ABORTED:
230             return ERR_CANCELLED;
231         default:
232             TRACEF("Unexpected error %d\n", (int)smc_ret.r2);
233             return ERR_NOT_VALID;
234         }
235     default:
236         TRACEF("Unexpected function id returned 0x%08lx\n", smc_ret.r0);
237         return ERR_NOT_VALID;
238     }
239 }
240 
arm_ffa_call_mem_relinquish(uint64_t handle,uint32_t flags,uint32_t endpoint_count,const ffa_endpoint_id16_t * endpoints)241 static status_t arm_ffa_call_mem_relinquish(
242         uint64_t handle,
243         uint32_t flags,
244         uint32_t endpoint_count,
245         const ffa_endpoint_id16_t* endpoints) {
246     struct smc_ret8 smc_ret;
247     struct ffa_mem_relinquish_descriptor* req = ffa_tx;
248 
249     if (!req) {
250         TRACEF("ERROR: no FF-A tx buffer\n");
251         return ERR_NOT_CONFIGURED;
252     }
253     ASSERT(endpoint_count <=
254            (ffa_buf_size - sizeof(struct ffa_mem_relinquish_descriptor)) /
255                    sizeof(ffa_endpoint_id16_t));
256 
257     mutex_acquire(&ffa_rxtx_buffer_lock);
258 
259     req->handle = handle;
260     req->flags = flags;
261     req->endpoint_count = endpoint_count;
262 
263     memcpy(req->endpoint_array, endpoints,
264            endpoint_count * sizeof(ffa_endpoint_id16_t));
265 
266     smc_ret = smc8(SMC_FC_FFA_MEM_RELINQUISH, 0, 0, 0, 0, 0, 0, 0);
267 
268     mutex_release(&ffa_rxtx_buffer_lock);
269 
270     switch (smc_ret.r0) {
271     case SMC_FC_FFA_SUCCESS:
272     case SMC_FC64_FFA_SUCCESS:
273         return NO_ERROR;
274 
275     case SMC_FC_FFA_ERROR:
276         switch ((int)smc_ret.r2) {
277         case FFA_ERROR_NOT_SUPPORTED:
278             return ERR_NOT_SUPPORTED;
279         case FFA_ERROR_INVALID_PARAMETERS:
280             return ERR_INVALID_ARGS;
281         case FFA_ERROR_NO_MEMORY:
282             return ERR_NO_MEMORY;
283         case FFA_ERROR_DENIED:
284             return ERR_BAD_STATE;
285         case FFA_ERROR_ABORTED:
286             return ERR_CANCELLED;
287         default:
288             TRACEF("Unexpected FFA_ERROR: %lx\n", smc_ret.r2);
289             return ERR_NOT_VALID;
290         }
291     default:
292         TRACEF("Unexpected FFA SMC: %lx\n", smc_ret.r0);
293         return ERR_NOT_VALID;
294     }
295 }
296 
arm_ffa_call_rxtx_map(paddr_t tx_paddr,paddr_t rx_paddr,size_t page_count)297 static status_t arm_ffa_call_rxtx_map(paddr_t tx_paddr,
298                                       paddr_t rx_paddr,
299                                       size_t page_count) {
300     struct smc_ret8 smc_ret;
301 
302     /* Page count specified in bits [0:5] */
303     ASSERT(page_count);
304     ASSERT(page_count < (1 << 6));
305 
306 #if ARCH_ARM64
307     smc_ret = smc8(SMC_FC64_FFA_RXTX_MAP, tx_paddr, rx_paddr, page_count, 0, 0,
308                    0, 0);
309 #else
310     smc_ret = smc8(SMC_FC_FFA_RXTX_MAP, tx_paddr, rx_paddr, page_count, 0, 0, 0,
311                    0);
312 #endif
313     switch (smc_ret.r0) {
314     case SMC_FC_FFA_SUCCESS:
315     case SMC_FC64_FFA_SUCCESS:
316         return NO_ERROR;
317 
318     case SMC_FC_FFA_ERROR:
319         switch ((int)smc_ret.r2) {
320         case FFA_ERROR_NOT_SUPPORTED:
321             return ERR_NOT_SUPPORTED;
322         case FFA_ERROR_INVALID_PARAMETERS:
323             return ERR_INVALID_ARGS;
324         case FFA_ERROR_NO_MEMORY:
325             return ERR_NO_MEMORY;
326         case FFA_ERROR_DENIED:
327             return ERR_ALREADY_EXISTS;
328         default:
329             TRACEF("Unexpected FFA_ERROR: %lx\n", smc_ret.r2);
330             return ERR_NOT_VALID;
331         }
332     default:
333         TRACEF("Unexpected FFA SMC: %lx\n", smc_ret.r0);
334         return ERR_NOT_VALID;
335     }
336 }
337 
arm_ffa_call_rx_release(void)338 static status_t arm_ffa_call_rx_release(void) {
339     struct smc_ret8 smc_ret;
340 
341     DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
342 
343     smc_ret = smc8(SMC_FC_FFA_RX_RELEASE, 0, 0, 0, 0, 0, 0, 0);
344     switch (smc_ret.r0) {
345     case SMC_FC_FFA_SUCCESS:
346     case SMC_FC64_FFA_SUCCESS:
347         return NO_ERROR;
348 
349     case SMC_FC_FFA_ERROR:
350         switch ((int)smc_ret.r2) {
351         case FFA_ERROR_NOT_SUPPORTED:
352             return ERR_NOT_SUPPORTED;
353         case FFA_ERROR_DENIED:
354             return ERR_BAD_STATE;
355         default:
356             return ERR_NOT_VALID;
357         }
358     default:
359         return ERR_NOT_VALID;
360     }
361 }
362 
arm_ffa_rx_release_is_implemented(bool * is_implemented)363 static status_t arm_ffa_rx_release_is_implemented(bool* is_implemented) {
364     bool is_implemented_val;
365     status_t res = arm_ffa_call_features(SMC_FC_FFA_RX_RELEASE,
366                                          &is_implemented_val, NULL, NULL);
367     if (res != NO_ERROR) {
368         TRACEF("Failed to query for feature FFA_RX_RELEASE, err = %d\n", res);
369         return res;
370     }
371     if (is_implemented) {
372         *is_implemented = is_implemented_val;
373     }
374     return NO_ERROR;
375 }
376 
arm_ffa_rxtx_map_is_implemented(bool * is_implemented,size_t * buf_size_log2)377 static status_t arm_ffa_rxtx_map_is_implemented(bool* is_implemented,
378                                                 size_t* buf_size_log2) {
379     ffa_features2_t features2;
380     bool is_implemented_val = false;
381     status_t res;
382 
383     ASSERT(is_implemented);
384 #if ARCH_ARM64
385     res = arm_ffa_call_features(SMC_FC64_FFA_RXTX_MAP, &is_implemented_val,
386                                 &features2, NULL);
387 #else
388     res = arm_ffa_call_features(SMC_FC_FFA_RXTX_MAP, &is_implemented_val,
389                                 &features2, NULL);
390 #endif
391     if (res != NO_ERROR) {
392         TRACEF("Failed to query for feature FFA_RXTX_MAP, err = %d\n", res);
393         return res;
394     }
395     if (!is_implemented_val) {
396         *is_implemented = false;
397         return NO_ERROR;
398     }
399     if (buf_size_log2) {
400         ulong buf_size_id = features2 & FFA_FEATURES2_RXTX_MAP_BUF_SIZE_MASK;
401         switch (buf_size_id) {
402         case FFA_FEATURES2_RXTX_MAP_BUF_SIZE_4K:
403             *buf_size_log2 = 12;
404             break;
405         case FFA_FEATURES2_RXTX_MAP_BUF_SIZE_16K:
406             *buf_size_log2 = 14;
407             break;
408         case FFA_FEATURES2_RXTX_MAP_BUF_SIZE_64K:
409             *buf_size_log2 = 16;
410             break;
411         default:
412             TRACEF("Unexpected rxtx buffer size identifier: %lx\n",
413                    buf_size_id);
414             return ERR_NOT_VALID;
415         }
416     }
417 
418     *is_implemented = true;
419     return NO_ERROR;
420 }
421 
arm_ffa_mem_retrieve_req_is_implemented(bool * is_implemented,bool * dyn_alloc_supp,bool * has_ns_bit,size_t * ref_count_num_bits)422 static status_t arm_ffa_mem_retrieve_req_is_implemented(
423         bool* is_implemented,
424         bool* dyn_alloc_supp,
425         bool* has_ns_bit,
426         size_t* ref_count_num_bits) {
427     ffa_features2_t features2;
428     ffa_features3_t features3;
429     bool is_implemented_val = false;
430     status_t res;
431 
432     ASSERT(is_implemented);
433 
434     res = arm_ffa_call_features(SMC_FC_FFA_MEM_RETRIEVE_REQ,
435                                 &is_implemented_val, &features2, &features3);
436     if (res != NO_ERROR) {
437         TRACEF("Failed to query for feature FFA_MEM_RETRIEVE_REQ, err = %d\n",
438                res);
439         return res;
440     }
441     if (!is_implemented_val) {
442         *is_implemented = false;
443         return NO_ERROR;
444     }
445     if (dyn_alloc_supp) {
446         *dyn_alloc_supp = !!(features2 & FFA_FEATURES2_MEM_DYNAMIC_BUFFER);
447     }
448     if (has_ns_bit) {
449         *has_ns_bit = !!(features2 & FFA_FEATURES2_MEM_RETRIEVE_REQ_NS_BIT);
450     }
451     if (ref_count_num_bits) {
452         *ref_count_num_bits =
453                 (features3 & FFA_FEATURES3_MEM_RETRIEVE_REQ_REFCOUNT_MASK) + 1;
454     }
455     *is_implemented = true;
456     return NO_ERROR;
457 }
458 
459 /* Helper function to set up the tx buffer with standard values
460    before calling FFA_MEM_RETRIEVE_REQ. */
arm_ffa_populate_receive_req_tx_buffer(uint16_t sender_id,uint64_t handle,uint64_t tag)461 static void arm_ffa_populate_receive_req_tx_buffer(uint16_t sender_id,
462                                                    uint64_t handle,
463                                                    uint64_t tag) {
464     struct ffa_mtd* req = ffa_tx;
465     DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
466 
467     memset(req, 0, sizeof(struct ffa_mtd));
468 
469     req->sender_id = sender_id;
470     req->handle = handle;
471     /* We must use the same tag as the one used by the sender to retrieve. */
472     req->tag = tag;
473 
474     /*
475      * We only support retrieving memory for ourselves for now.
476      * TODO: Also support stream endpoints. Possibly more than one.
477      */
478     req->emad_count = 1;
479     memset(req->emad, 0, sizeof(struct ffa_emad));
480     req->emad[0].mapd.endpoint_id = ffa_local_id;
481 }
482 
483 /* *desc_buffer is malloc'd and on success passes responsibility to free to
484    the caller. Populate the tx buffer before calling. */
arm_ffa_mem_retrieve(uint16_t sender_id,uint64_t handle,uint32_t * len,uint32_t * fragment_len)485 static status_t arm_ffa_mem_retrieve(uint16_t sender_id,
486                                      uint64_t handle,
487                                      uint32_t* len,
488                                      uint32_t* fragment_len) {
489     status_t res = NO_ERROR;
490 
491     DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
492     DEBUG_ASSERT(len);
493 
494     uint32_t len_out, fragment_len_out;
495     res = arm_ffa_call_mem_retrieve_req(&len_out, &fragment_len_out);
496     LTRACEF("total_len: %u, fragment_len: %u\n", len_out, fragment_len_out);
497     if (res != NO_ERROR) {
498         TRACEF("FF-A memory retrieve request failed, err = %d\n", res);
499         return res;
500     }
501     if (fragment_len_out > len_out) {
502         TRACEF("Fragment length larger than total length %u > %u\n",
503                fragment_len_out, len_out);
504         return ERR_IO;
505     }
506 
507     /* Check that the first fragment fits in our buffer */
508     if (fragment_len_out > ffa_buf_size) {
509         TRACEF("Fragment length %u larger than buffer size\n",
510                fragment_len_out);
511         return ERR_IO;
512     }
513 
514     if (fragment_len) {
515         *fragment_len = fragment_len_out;
516     }
517     if (len) {
518         *len = len_out;
519     }
520 
521     return NO_ERROR;
522 }
523 
arm_ffa_mem_address_range_get(struct arm_ffa_mem_frag_info * frag_info,size_t index,paddr_t * addr,size_t * size)524 status_t arm_ffa_mem_address_range_get(struct arm_ffa_mem_frag_info* frag_info,
525                                        size_t index,
526                                        paddr_t* addr,
527                                        size_t* size) {
528     uint32_t page_count;
529     size_t frag_idx;
530 
531     DEBUG_ASSERT(frag_info);
532 
533     if (index < frag_info->start_index ||
534         index >= frag_info->start_index + frag_info->count) {
535         return ERR_OUT_OF_RANGE;
536     }
537 
538     frag_idx = index - frag_info->start_index;
539 
540     page_count = frag_info->address_ranges[frag_idx].page_count;
541     LTRACEF("address %p, page_count 0x%x\n",
542             (void*)frag_info->address_ranges[frag_idx].address,
543             frag_info->address_ranges[frag_idx].page_count);
544     if (page_count < 1 || ((size_t)page_count > (SIZE_MAX / FFA_PAGE_SIZE))) {
545         TRACEF("bad page count 0x%x at %zu\n", page_count, index);
546         return ERR_IO;
547     }
548 
549     if (addr) {
550         *addr = (paddr_t)frag_info->address_ranges[frag_idx].address;
551     }
552     if (size) {
553         *size = page_count * FFA_PAGE_SIZE;
554     }
555 
556     return NO_ERROR;
557 }
558 
arm_ffa_mem_retrieve_start(uint16_t sender_id,uint64_t handle,uint64_t tag,uint32_t * address_range_count,uint * arch_mmu_flags,struct arm_ffa_mem_frag_info * frag_info)559 status_t arm_ffa_mem_retrieve_start(uint16_t sender_id,
560                                     uint64_t handle,
561                                     uint64_t tag,
562                                     uint32_t* address_range_count,
563                                     uint* arch_mmu_flags,
564                                     struct arm_ffa_mem_frag_info* frag_info) {
565     status_t res;
566     struct ffa_mtd* mtd;
567     struct ffa_emad* emad;
568     struct ffa_comp_mrd* comp_mrd;
569     uint32_t computed_len;
570     uint32_t header_size;
571 
572     uint32_t total_len;
573     uint32_t fragment_len;
574 
575     DEBUG_ASSERT(frag_info);
576 
577     mutex_acquire(&ffa_rxtx_buffer_lock);
578     arm_ffa_populate_receive_req_tx_buffer(sender_id, handle, tag);
579     res = arm_ffa_mem_retrieve(sender_id, handle, &total_len, &fragment_len);
580 
581     if (res != NO_ERROR) {
582         TRACEF("FF-A memory retrieve failed err=%d\n", res);
583         return res;
584     }
585 
586     if (fragment_len <
587         offsetof(struct ffa_mtd, emad) + sizeof(struct ffa_emad)) {
588         TRACEF("Fragment too short for memory transaction descriptor\n");
589         return ERR_IO;
590     }
591 
592     mtd = ffa_rx;
593     emad = mtd->emad;
594 
595     /*
596      * We don't retrieve the memory on behalf of anyone else, so we only
597      * expect one receiver address range descriptor.
598      */
599     if (mtd->emad_count != 1) {
600         TRACEF("unexpected response count %d != 1\n", mtd->emad_count);
601         return ERR_IO;
602     }
603 
604     LTRACEF("comp_mrd_offset: %u\n", emad->comp_mrd_offset);
605     if (emad->comp_mrd_offset + sizeof(*comp_mrd) > fragment_len) {
606         TRACEF("Fragment length %u too short for comp_mrd_offset %u\n",
607                fragment_len, emad->comp_mrd_offset);
608         return ERR_IO;
609     }
610 
611     comp_mrd = ffa_rx + emad->comp_mrd_offset;
612 
613     uint32_t address_range_count_out = comp_mrd->address_range_count;
614     frag_info->address_ranges = comp_mrd->address_range_array;
615     LTRACEF("address_range_count: %u\n", address_range_count_out);
616 
617     computed_len = emad->comp_mrd_offset +
618                    offsetof(struct ffa_comp_mrd, address_range_array) +
619                    sizeof(struct ffa_cons_mrd) * comp_mrd->address_range_count;
620     if (total_len != computed_len) {
621         TRACEF("Reported length %u != computed length %u\n", total_len,
622                computed_len);
623         return ERR_IO;
624     }
625 
626     header_size = emad->comp_mrd_offset +
627                   offsetof(struct ffa_comp_mrd, address_range_array);
628     frag_info->count =
629             (fragment_len - header_size) / sizeof(struct ffa_cons_mrd);
630     LTRACEF("Descriptors in fragment %u\n", frag_info->count);
631 
632     if (frag_info->count * sizeof(struct ffa_cons_mrd) + header_size !=
633         fragment_len) {
634         TRACEF("fragment length %u, contains partial descriptor\n",
635                fragment_len);
636         return ERR_IO;
637     }
638 
639     frag_info->received_len = fragment_len;
640     frag_info->start_index = 0;
641 
642     uint arch_mmu_flags_out = 0;
643 
644     switch (mtd->flags & FFA_MTD_FLAG_TYPE_MASK) {
645     case FFA_MTD_FLAG_TYPE_SHARE_MEMORY:
646         /*
647          * If memory is shared, assume it is not safe to execute out of. This
648          * specifically indicates that another party may have access to the
649          * memory.
650          */
651         arch_mmu_flags_out |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
652         break;
653     case FFA_MTD_FLAG_TYPE_LEND_MEMORY:
654         break;
655     case FFA_MTD_FLAG_TYPE_DONATE_MEMORY:
656         TRACEF("Unexpected donate memory transaction type is not supported\n");
657         return ERR_NOT_IMPLEMENTED;
658     default:
659         TRACEF("Unknown memory transaction type: 0x%x\n", mtd->flags);
660         return ERR_NOT_VALID;
661     }
662 
663     switch (mtd->memory_region_attributes & ~FFA_MEM_ATTR_NONSECURE) {
664     case FFA_MEM_ATTR_DEVICE_NGNRE:
665         arch_mmu_flags_out |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
666         break;
667     case FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED:
668         arch_mmu_flags_out |= ARCH_MMU_FLAG_UNCACHED;
669         break;
670     case (FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB | FFA_MEM_ATTR_INNER_SHAREABLE):
671         arch_mmu_flags_out |= ARCH_MMU_FLAG_CACHED;
672         break;
673     default:
674         TRACEF("Invalid memory attributes, 0x%x\n",
675                mtd->memory_region_attributes);
676         return ERR_NOT_VALID;
677     }
678 
679     if (!(emad->mapd.memory_access_permissions & FFA_MEM_PERM_RW)) {
680         arch_mmu_flags_out |= ARCH_MMU_FLAG_PERM_RO;
681     }
682     if (emad->mapd.memory_access_permissions & FFA_MEM_PERM_NX) {
683         /*
684          * Don't allow executable mappings if the stage 2 page tables don't
685          * allow it. The hardware allows the stage 2 NX bit to only apply to
686          * EL1, not EL0, but neither FF-A nor LK can currently express this, so
687          * disallow both if FFA_MEM_PERM_NX is set.
688          */
689         arch_mmu_flags_out |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
690     }
691 
692     if (!supports_ns_bit ||
693         (mtd->memory_region_attributes & FFA_MEM_ATTR_NONSECURE)) {
694         arch_mmu_flags_out |= ARCH_MMU_FLAG_NS;
695         /* Regardless of origin, we don't want to execute out of NS memory. */
696         arch_mmu_flags_out |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
697     }
698 
699     if (arch_mmu_flags) {
700         *arch_mmu_flags = arch_mmu_flags_out;
701     }
702     if (address_range_count) {
703         *address_range_count = address_range_count_out;
704     }
705 
706     return res;
707 }
708 
709 /* This assumes that the fragment is completely composed of memory
710    region descriptors (struct ffa_cons_mrd) */
arm_ffa_mem_retrieve_next_frag(uint64_t handle,struct arm_ffa_mem_frag_info * frag_info)711 status_t arm_ffa_mem_retrieve_next_frag(
712         uint64_t handle,
713         struct arm_ffa_mem_frag_info* frag_info) {
714     status_t res;
715     uint32_t fragment_len;
716 
717     mutex_acquire(&ffa_rxtx_buffer_lock);
718 
719     res = arm_ffa_call_mem_frag_rx(handle, frag_info->received_len,
720                                    &fragment_len);
721 
722     if (res != NO_ERROR) {
723         TRACEF("Failed to get memory retrieve fragment, err = %d\n", res);
724         return res;
725     }
726 
727     frag_info->received_len += fragment_len;
728     frag_info->start_index += frag_info->count;
729 
730     frag_info->count = fragment_len / sizeof(struct ffa_cons_mrd);
731     if (frag_info->count * sizeof(struct ffa_cons_mrd) != fragment_len) {
732         TRACEF("fragment length %u, contains partial descriptor\n",
733                fragment_len);
734         return ERR_IO;
735     }
736 
737     frag_info->address_ranges = ffa_rx;
738 
739     return NO_ERROR;
740 }
741 
arm_ffa_rx_release(void)742 status_t arm_ffa_rx_release(void) {
743     status_t res;
744     ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
745 
746     if (!supports_rx_release) {
747         res = NO_ERROR;
748     } else {
749         res = arm_ffa_call_rx_release();
750     }
751 
752     mutex_release(&ffa_rxtx_buffer_lock);
753 
754     if (res == ERR_NOT_SUPPORTED) {
755         TRACEF("Tried to release rx buffer when the operation is not supported!\n");
756     } else if (res != NO_ERROR) {
757         TRACEF("Failed to release rx buffer, err = %d\n", res);
758         return res;
759     }
760     return NO_ERROR;
761 }
762 
arm_ffa_mem_relinquish(uint64_t handle)763 status_t arm_ffa_mem_relinquish(uint64_t handle) {
764     status_t res;
765 
766     /* As flags are set to 0 no request to zero the memory is made */
767     res = arm_ffa_call_mem_relinquish(handle, 0, 1, &ffa_local_id);
768     if (res != NO_ERROR) {
769         TRACEF("Failed to relinquish memory region, err = %d\n", res);
770     }
771 
772     return res;
773 }
774 
arm_ffa_setup(void)775 static status_t arm_ffa_setup(void) {
776     status_t res;
777     uint16_t ver_major_ret;
778     uint16_t ver_minor_ret;
779     bool is_implemented;
780     size_t buf_size_log2;
781     size_t ref_count_num_bits;
782     size_t arch_page_count;
783     size_t ffa_page_count;
784     size_t count;
785     paddr_t tx_paddr;
786     paddr_t rx_paddr;
787     void* tx_vaddr;
788     void* rx_vaddr;
789     struct list_node page_list = LIST_INITIAL_VALUE(page_list);
790 
791     res = arm_ffa_call_version(FFA_CURRENT_VERSION_MAJOR,
792                                FFA_CURRENT_VERSION_MINOR, &ver_major_ret,
793                                &ver_minor_ret);
794     if (res != NO_ERROR) {
795         TRACEF("No compatible FF-A version found\n");
796         return res;
797     } else if (FFA_CURRENT_VERSION_MAJOR != ver_major_ret ||
798                FFA_CURRENT_VERSION_MINOR > ver_minor_ret) {
799         /* When trusty supports more FF-A versions downgrade may be possible */
800         TRACEF("Incompatible FF-A interface version, %" PRIu16 ".%" PRIu16 "\n",
801                ver_major_ret, ver_minor_ret);
802         return ERR_NOT_SUPPORTED;
803     }
804 
805     res = arm_ffa_call_id_get(&ffa_local_id);
806     if (res != NO_ERROR) {
807         TRACEF("Failed to get FF-A partition id (err=%d)\n", res);
808         return res;
809     }
810 
811     res = arm_ffa_rx_release_is_implemented(&is_implemented);
812     if (res != NO_ERROR) {
813         TRACEF("Error checking if FFA_RX_RELEASE is implemented (err=%d)\n",
814                res);
815         return res;
816     }
817     if (is_implemented) {
818         supports_rx_release = true;
819     } else {
820         LTRACEF("FFA_RX_RELEASE is not implemented\n");
821     }
822 
823     res = arm_ffa_rxtx_map_is_implemented(&is_implemented, &buf_size_log2);
824     if (res != NO_ERROR) {
825         TRACEF("Error checking if FFA_RXTX_MAP is implemented (err=%d)\n", res);
826         return res;
827     }
828     if (!is_implemented) {
829         TRACEF("FFA_RXTX_MAP is not implemented\n");
830         return ERR_NOT_SUPPORTED;
831     }
832 
833     res = arm_ffa_mem_retrieve_req_is_implemented(
834             &is_implemented, NULL, &supports_ns_bit, &ref_count_num_bits);
835     if (res != NO_ERROR) {
836         TRACEF("Error checking if FFA_MEM_RETRIEVE_REQ is implemented (err=%d)\n",
837                res);
838         return res;
839     }
840     if (!is_implemented) {
841         TRACEF("FFA_MEM_RETRIEVE_REQ is not implemented\n");
842         return ERR_NOT_SUPPORTED;
843     }
844 
845     if (ref_count_num_bits < 64) {
846         /*
847          * Expect 64 bit reference count. If we don't have it, future calls to
848          * SMC_FC_FFA_MEM_RETRIEVE_REQ can fail if we receive the same handle
849          * multiple times. Warn about this, but don't return an error as we only
850          * receive each handle once in the typical case.
851          */
852         TRACEF("Warning FFA_MEM_RETRIEVE_REQ does not have 64 bit reference count (%zu)\n",
853                ref_count_num_bits);
854     }
855 
856     ffa_buf_size = 1U << buf_size_log2;
857     ASSERT((ffa_buf_size % FFA_PAGE_SIZE) == 0);
858 
859     arch_page_count = DIV_ROUND_UP(ffa_buf_size, PAGE_SIZE);
860     ffa_page_count = ffa_buf_size / FFA_PAGE_SIZE;
861     count = pmm_alloc_contiguous(arch_page_count, buf_size_log2, &tx_paddr,
862                                  &page_list);
863     if (count != arch_page_count) {
864         TRACEF("Failed to allocate tx buffer %zx!=%zx\n", count,
865                arch_page_count);
866         res = ERR_NO_MEMORY;
867         goto err_alloc_tx;
868     }
869     tx_vaddr = paddr_to_kvaddr(tx_paddr);
870     ASSERT(tx_vaddr);
871 
872     count = pmm_alloc_contiguous(arch_page_count, buf_size_log2, &rx_paddr,
873                                  &page_list);
874     if (count != arch_page_count) {
875         TRACEF("Failed to allocate rx buffer %zx!=%zx\n", count,
876                arch_page_count);
877         res = ERR_NO_MEMORY;
878         goto err_alloc_rx;
879     }
880     rx_vaddr = paddr_to_kvaddr(rx_paddr);
881     ASSERT(rx_vaddr);
882 
883     res = arm_ffa_call_rxtx_map(tx_paddr, rx_paddr, ffa_page_count);
884     if (res != NO_ERROR) {
885         TRACEF("Failed to map tx @ %p, rx @ %p, page count 0x%zx (err=%d)\n",
886                (void*)tx_paddr, (void*)rx_paddr, ffa_page_count, res);
887         goto err_rxtx_map;
888     }
889 
890     ffa_tx = tx_vaddr;
891     ffa_rx = rx_vaddr;
892 
893     return res;
894 
895 err_rxtx_map:
896 err_alloc_rx:
897     pmm_free(&page_list);
898 err_alloc_tx:
899     /* pmm_alloc_contiguous leaves the page list unchanged on failure */
900 
901     return res;
902 }
903 
arm_ffa_init(uint level)904 static void arm_ffa_init(uint level) {
905     status_t res;
906 
907     res = arm_ffa_setup();
908 
909     if (res == NO_ERROR) {
910         arm_ffa_init_is_success = true;
911     } else {
912         TRACEF("Failed to initialize FF-A (err=%d)\n", res);
913     }
914 }
915 
916 LK_INIT_HOOK(arm_ffa_init, arm_ffa_init, LK_INIT_LEVEL_PLATFORM - 2);
917