1/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
33#include <assert_macros.S>
34
35	.globl	get_afflvl_shift
36	.globl	mpidr_mask_lower_afflvls
37	.globl	eret
38	.globl	smc
39
40	.globl	zeromem16
41	.globl	memcpy16
42
43	.globl	disable_mmu_el3
44	.globl	disable_mmu_icache_el3
45
46#if SUPPORT_VFP
47	.globl	enable_vfp
48#endif
49
50func get_afflvl_shift
51	cmp	x0, #3
52	cinc	x0, x0, eq
53	mov	x1, #MPIDR_AFFLVL_SHIFT
54	lsl	x0, x0, x1
55	ret
56
57func mpidr_mask_lower_afflvls
58	cmp	x1, #3
59	cinc	x1, x1, eq
60	mov	x2, #MPIDR_AFFLVL_SHIFT
61	lsl	x2, x1, x2
62	lsr	x0, x0, x2
63	lsl	x0, x0, x2
64	ret
65
66
67func eret
68	eret
69
70
71func smc
72	smc	#0
73
74/* -----------------------------------------------------------------------
75 * void zeromem16(void *mem, unsigned int length);
76 *
77 * Initialise a memory region to 0.
78 * The memory address must be 16-byte aligned.
79 * -----------------------------------------------------------------------
80 */
81func zeromem16
82#if ASM_ASSERTION
83	tst	x0, #0xf
84	ASM_ASSERT(eq)
85#endif
86	add	x2, x0, x1
87/* zero 16 bytes at a time */
88z_loop16:
89	sub	x3, x2, x0
90	cmp	x3, #16
91	b.lt	z_loop1
92	stp	xzr, xzr, [x0], #16
93	b	z_loop16
94/* zero byte per byte */
95z_loop1:
96	cmp	x0, x2
97	b.eq	z_end
98	strb	wzr, [x0], #1
99	b	z_loop1
100z_end:	ret
101
102
103/* --------------------------------------------------------------------------
104 * void memcpy16(void *dest, const void *src, unsigned int length)
105 *
106 * Copy length bytes from memory area src to memory area dest.
107 * The memory areas should not overlap.
108 * Destination and source addresses must be 16-byte aligned.
109 * --------------------------------------------------------------------------
110 */
111func memcpy16
112#if ASM_ASSERTION
113	orr	x3, x0, x1
114	tst	x3, #0xf
115	ASM_ASSERT(eq)
116#endif
117/* copy 16 bytes at a time */
118m_loop16:
119	cmp	x2, #16
120	b.lt	m_loop1
121	ldp	x3, x4, [x1], #16
122	stp	x3, x4, [x0], #16
123	sub	x2, x2, #16
124	b	m_loop16
125/* copy byte per byte */
126m_loop1:
127	cbz	x2, m_end
128	ldrb	w3, [x1], #1
129	strb	w3, [x0], #1
130	subs	x2, x2, #1
131	b.ne	m_loop1
132m_end:	ret
133
134/* ---------------------------------------------------------------------------
135 * Disable the MMU at EL3
136 * This is implemented in assembler to ensure that the data cache is cleaned
137 * and invalidated after the MMU is disabled without any intervening cacheable
138 * data accesses
139 * ---------------------------------------------------------------------------
140 */
141
142func disable_mmu_el3
143	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
144do_disable_mmu:
145	mrs	x0, sctlr_el3
146	bic	x0, x0, x1
147	msr	sctlr_el3, x0
148	isb				// ensure MMU is off
149	mov	x0, #DCCISW		// DCache clean and invalidate
150	b	dcsw_op_all
151
152
153func disable_mmu_icache_el3
154	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
155	b	do_disable_mmu
156
157/* ---------------------------------------------------------------------------
158 * Enable the use of VFP at EL3
159 * ---------------------------------------------------------------------------
160 */
161#if SUPPORT_VFP
162func enable_vfp
163	mrs	x0, cpacr_el1
164	orr	x0, x0, #CPACR_VFP_BITS
165	msr	cpacr_el1, x0
166	mrs	x0, cptr_el3
167	mov	x1, #AARCH64_CPTR_TFP
168	bic	x0, x0, x1
169	msr	cptr_el3, x0
170	isb
171	ret
172#endif
173