1 // Copyright 2019 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 
15 #include <cstdint>
16 #include <span>
17 #include <type_traits>
18 
19 #include "gtest/gtest.h"
20 #include "pw_cpu_exception/entry.h"
21 #include "pw_cpu_exception/handler.h"
22 #include "pw_cpu_exception/support.h"
23 #include "pw_cpu_exception_cortex_m/cpu_state.h"
24 
25 namespace pw::cpu_exception {
26 namespace {
27 
28 // CMSIS/Cortex-M/ARMv7 related constants.
29 // These values are from the ARMv7-M Architecture Reference Manual DDI 0403E.b.
30 // https://static.docs.arm.com/ddi0403/e/DDI0403E_B_armv7m_arm.pdf
31 
32 // Exception ISR number. (ARMv7-M Section B1.5.2)
33 constexpr uint32_t kHardFaultIsrNum = 0x3u;
34 constexpr uint32_t kMemFaultIsrNum = 0x4u;
35 constexpr uint32_t kBusFaultIsrNum = 0x5u;
36 constexpr uint32_t kUsageFaultIsrNum = 0x6u;
37 
38 // Masks for individual bits of HFSR. (ARMv7-M Section B3.2.16)
39 constexpr uint32_t kForcedHardfaultMask = 0x1u << 30;
40 
41 // Masks for individual bits of CFSR. (ARMv7-M Section B3.2.15)
42 constexpr uint32_t kUsageFaultStart = 0x1u << 16;
43 constexpr uint32_t kUnalignedFaultMask = kUsageFaultStart << 8;
44 constexpr uint32_t kDivByZeroFaultMask = kUsageFaultStart << 9;
45 
46 // CCR flags. (ARMv7-M Section B3.2.8)
47 constexpr uint32_t kUnalignedTrapEnableMask = 0x1u << 3;
48 constexpr uint32_t kDivByZeroTrapEnableMask = 0x1u << 4;
49 
50 // Masks for individual bits of SHCSR. (ARMv7-M Section B3.2.13)
51 constexpr uint32_t kMemFaultEnableMask = 0x1 << 16;
52 constexpr uint32_t kBusFaultEnableMask = 0x1 << 17;
53 constexpr uint32_t kUsageFaultEnableMask = 0x1 << 18;
54 
55 // Bit masks for an exception return value. (ARMv7-M Section B1.5.8)
56 constexpr uint32_t kExcReturnBasicFrameMask = (0x1u << 4);
57 
58 // CPCAR mask that enables FPU. (ARMv7-M Section B3.2.20)
59 constexpr uint32_t kFpuEnableMask = (0xFu << 20);
60 
61 // Memory mapped registers. (ARMv7-M Section B3.2.2, Table B3-4)
62 volatile uint32_t& cortex_m_vtor =
63     *reinterpret_cast<volatile uint32_t*>(0xE000ED08u);
64 volatile uint32_t& cortex_m_ccr =
65     *reinterpret_cast<volatile uint32_t*>(0xE000ED14u);
66 volatile uint32_t& cortex_m_shcsr =
67     *reinterpret_cast<volatile uint32_t*>(0xE000ED24u);
68 volatile uint32_t& cortex_m_cfsr =
69     *reinterpret_cast<volatile uint32_t*>(0xE000ED28u);
70 volatile uint32_t& cortex_m_hfsr =
71     *reinterpret_cast<volatile uint32_t*>(0xE000ED2Cu);
72 volatile uint32_t& cortex_m_cpacr =
73     *reinterpret_cast<volatile uint32_t*>(0xE000ED88u);
74 
75 // Begin a critical section that must not be interrupted.
76 // This function disables interrupts to prevent any sort of context switch until
77 // the critical section ends. This is done by setting PRIMASK to 1 using the cps
78 // instruction.
79 //
80 // Returns the state of PRIMASK before it was disabled.
BeginCriticalSection()81 inline uint32_t BeginCriticalSection() {
82   uint32_t previous_state;
83   asm volatile(
84       " mrs %[previous_state], primask              \n"
85       " cpsid i                                     \n"
86       // clang-format off
87       : /*output=*/[previous_state]"=r"(previous_state)
88       : /*input=*/
89       : /*clobbers=*/"memory"
90       // clang-format on
91   );
92   return previous_state;
93 }
94 
95 // Ends a critical section.
96 // Restore previous previous state produced by BeginCriticalSection().
97 // Note: This does not always re-enable interrupts.
EndCriticalSection(uint32_t previous_state)98 inline void EndCriticalSection(uint32_t previous_state) {
99   asm volatile(
100       // clang-format off
101       "msr primask, %0"
102       : /*output=*/
103       : /*input=*/"r"(previous_state)
104       : /*clobbers=*/"memory"
105       // clang-format on
106   );
107 }
108 
EnableFpu()109 void EnableFpu() {
110 #if defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
111   // TODO(pwbug/17): Replace when Pigweed config system is added.
112   cortex_m_cpacr |= kFpuEnableMask;
113 #endif  // defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
114 }
115 
DisableFpu()116 void DisableFpu() {
117 #if defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
118   // TODO(pwbug/17): Replace when Pigweed config system is added.
119   cortex_m_cpacr &= ~kFpuEnableMask;
120 #endif  // defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
121 }
122 
123 // Counter that is incremented if the test's exception handler correctly handles
124 // a triggered exception.
125 size_t exceptions_handled = 0;
126 
127 // Global variable that triggers a single nested fault on a fault.
128 bool trigger_nested_fault = false;
129 
130 // Allow up to kMaxFaultDepth faults before determining the device is
131 // unrecoverable.
132 constexpr size_t kMaxFaultDepth = 2;
133 
134 // Variable to prevent more than kMaxFaultDepth nested crashes.
135 size_t current_fault_depth = 0;
136 
137 // Faulting pw_cpu_exception_State is copied here so values can be validated
138 // after exiting exception handler.
139 pw_cpu_exception_State captured_states[kMaxFaultDepth] = {};
140 pw_cpu_exception_State& captured_state = captured_states[0];
141 
142 // Flag used to check if the contents of std::span matches the captured state.
143 bool span_matches = false;
144 
145 // Variable to be manipulated by function that uses floating
146 // point to test that exceptions push Fpu state correctly.
147 // Note: don't use double because a cortex-m4f with fpv4-sp-d16
148 // will result in gcc generating code to use the software floating
149 // point support for double.
150 volatile float float_test_value;
151 
152 // Magic pattern to help identify if the exception handler's
153 // pw_cpu_exception_State pointer was pointing to captured CPU state that was
154 // pushed onto the stack when the faulting context uses the VFP. Has to be
155 // computed at runtime because it uses values only available at link time.
156 const float kFloatTestPattern = 12.345f * 67.89f;
157 
158 volatile float fpu_lhs_val = 12.345f;
159 volatile float fpu_rhs_val = 67.89f;
160 
161 // This macro provides a calculation that equals kFloatTestPattern.
162 #define _PW_TEST_FPU_OPERATION (fpu_lhs_val * fpu_rhs_val)
163 
164 // Magic pattern to help identify if the exception handler's
165 // pw_cpu_exception_State pointer was pointing to captured CPU state that was
166 // pushed onto the stack.
167 constexpr uint32_t kMagicPattern = 0xDEADBEEF;
168 
169 // This pattern serves a purpose similar to kMagicPattern, but is used for
170 // testing a nested fault to ensure both pw_cpu_exception_State objects are
171 // correctly captured.
172 constexpr uint32_t kNestedMagicPattern = 0x900DF00D;
173 
174 // The manually captured PC won't be the exact same as the faulting PC. This is
175 // the maximum tolerated distance between the two to allow the test to pass.
176 constexpr int32_t kMaxPcDistance = 4;
177 
178 // In-memory interrupt service routine vector table.
179 using InterruptVectorTable = std::aligned_storage_t<512, 512>;
180 InterruptVectorTable ram_vector_table;
181 
182 // Forward declaration of the exception handler.
183 void TestingExceptionHandler(pw_cpu_exception_State*);
184 
185 // Populate the device's registers with testable values, then trigger exception.
BeginBaseFaultTest()186 void BeginBaseFaultTest() {
187   // Make sure divide by zero causes a fault.
188   cortex_m_ccr |= kDivByZeroTrapEnableMask;
189   uint32_t magic = kMagicPattern;
190   asm volatile(
191       " mov r0, %[magic]                                      \n"
192       " mov r1, #0                                            \n"
193       " mov r2, pc                                            \n"
194       " mov r3, lr                                            \n"
195       // This instruction divides by zero.
196       " udiv r1, r1, r1                                       \n"
197       // clang-format off
198       : /*output=*/
199       : /*input=*/[magic]"r"(magic)
200       : /*clobbers=*/"r0", "r1", "r2", "r3"
201       // clang-format on
202   );
203 
204   // Check that the stack align bit was not set.
205   EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit, 0u);
206 }
207 
208 // Populate the device's registers with testable values, then trigger exception.
BeginNestedFaultTest()209 void BeginNestedFaultTest() {
210   // Make sure divide by zero causes a fault.
211   cortex_m_ccr |= kUnalignedTrapEnableMask;
212   volatile uint32_t magic = kNestedMagicPattern;
213   asm volatile(
214       " mov r0, %[magic]                                      \n"
215       " mov r1, #0                                            \n"
216       " mov r2, pc                                            \n"
217       " mov r3, lr                                            \n"
218       // This instruction does an unaligned read.
219       " ldrh r1, [%[magic_addr], 1]                           \n"
220       // clang-format off
221       : /*output=*/
222       : /*input=*/[magic]"r"(magic), [magic_addr]"r"(&magic)
223       : /*clobbers=*/"r0", "r1", "r2", "r3"
224       // clang-format on
225   );
226 }
227 
228 // Populate the device's registers with testable values, then trigger exception.
229 // This version causes stack to not be 4-byte aligned initially, testing
230 // the fault handlers correction for psp.
BeginBaseFaultUnalignedStackTest()231 void BeginBaseFaultUnalignedStackTest() {
232   // Make sure divide by zero causes a fault.
233   cortex_m_ccr |= kDivByZeroTrapEnableMask;
234   uint32_t magic = kMagicPattern;
235   asm volatile(
236       // Push one register to cause $sp to be no longer 8-byte aligned,
237       // assuming it started 8-byte aligned as expected.
238       " push {r0}                                             \n"
239       " mov r0, %[magic]                                      \n"
240       " mov r1, #0                                            \n"
241       " mov r2, pc                                            \n"
242       " mov r3, lr                                            \n"
243       // This instruction divides by zero. Our fault handler should
244       // ultimately advance the pc to the pop instruction.
245       " udiv r1, r1, r1                                       \n"
246       " pop {r0}                                              \n"
247       // clang-format off
248       : /*output=*/
249       : /*input=*/[magic]"r"(magic)
250       : /*clobbers=*/"r0", "r1", "r2", "r3"
251       // clang-format on
252   );
253 
254   // Check that the stack align bit was set.
255   EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit,
256             kPsrExtraStackAlignBit);
257 }
258 
259 // Populate some of the extended set of captured registers, then trigger
260 // exception.
BeginExtendedFaultTest()261 void BeginExtendedFaultTest() {
262   // Make sure divide by zero causes a fault.
263   cortex_m_ccr |= kDivByZeroTrapEnableMask;
264   uint32_t magic = kMagicPattern;
265   volatile uint32_t local_msp = 0;
266   volatile uint32_t local_psp = 0;
267   asm volatile(
268       " mov r4, %[magic]                                      \n"
269       " mov r5, #0                                            \n"
270       " mov r11, %[magic]                                     \n"
271       " mrs %[local_msp], msp                                 \n"
272       " mrs %[local_psp], psp                                 \n"
273       // This instruction divides by zero.
274       " udiv r5, r5, r5                                       \n"
275       // clang-format off
276       : /*output=*/[local_msp]"=r"(local_msp), [local_psp]"=r"(local_psp)
277       : /*input=*/[magic]"r"(magic)
278       : /*clobbers=*/"r0", "r4", "r5", "r11", "memory"
279       // clang-format on
280   );
281 
282   // Check that the stack align bit was not set.
283   EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit, 0u);
284 
285   // Check that the captured stack pointers matched the ones in the context of
286   // the fault.
287   EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.msp), local_msp);
288   EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.psp), local_psp);
289 }
290 
291 // Populate some of the extended set of captured registers, then trigger
292 // exception.
293 // This version causes stack to not be 4-byte aligned initially, testing
294 // the fault handlers correction for psp.
BeginExtendedFaultUnalignedStackTest()295 void BeginExtendedFaultUnalignedStackTest() {
296   // Make sure divide by zero causes a fault.
297   cortex_m_ccr |= kDivByZeroTrapEnableMask;
298   uint32_t magic = kMagicPattern;
299   volatile uint32_t local_msp = 0;
300   volatile uint32_t local_psp = 0;
301   asm volatile(
302       // Push one register to cause $sp to be no longer 8-byte aligned,
303       // assuming it started 8-byte aligned as expected.
304       " push {r0}                                             \n"
305       " mov r4, %[magic]                                      \n"
306       " mov r5, #0                                            \n"
307       " mov r11, %[magic]                                     \n"
308       " mrs %[local_msp], msp                                 \n"
309       " mrs %[local_psp], psp                                 \n"
310       // This instruction divides by zero. Our fault handler should
311       // ultimately advance the pc to the pop instruction.
312       " udiv r5, r5, r5                                       \n"
313       " pop {r0}                                              \n"
314       // clang-format off
315       : /*output=*/[local_msp]"=r"(local_msp), [local_psp]"=r"(local_psp)
316       : /*input=*/[magic]"r"(magic)
317       : /*clobbers=*/"r0", "r4", "r5", "r11", "memory"
318       // clang-format on
319   );
320 
321   // Check that the stack align bit was set.
322   EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit,
323             kPsrExtraStackAlignBit);
324 
325   // Check that the captured stack pointers matched the ones in the context of
326   // the fault.
327   EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.msp), local_msp);
328   EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.psp), local_psp);
329 }
330 
InstallVectorTableEntries()331 void InstallVectorTableEntries() {
332   uint32_t prev_state = BeginCriticalSection();
333   // If vector table is installed already, this is done.
334   if (cortex_m_vtor == reinterpret_cast<uint32_t>(&ram_vector_table)) {
335     EndCriticalSection(prev_state);
336     return;
337   }
338   // Copy table to new location since it's not guaranteed that we can write to
339   // the original one.
340   std::memcpy(&ram_vector_table,
341               reinterpret_cast<uint32_t*>(cortex_m_vtor),
342               sizeof(ram_vector_table));
343 
344   // Override exception handling vector table entries.
345   uint32_t* exception_entry_addr =
346       reinterpret_cast<uint32_t*>(pw_cpu_exception_Entry);
347   uint32_t** interrupts = reinterpret_cast<uint32_t**>(&ram_vector_table);
348   interrupts[kHardFaultIsrNum] = exception_entry_addr;
349   interrupts[kMemFaultIsrNum] = exception_entry_addr;
350   interrupts[kBusFaultIsrNum] = exception_entry_addr;
351   interrupts[kUsageFaultIsrNum] = exception_entry_addr;
352 
353   // Update Vector Table Offset Register (VTOR) to point to new vector table.
354   cortex_m_vtor = reinterpret_cast<uint32_t>(&ram_vector_table);
355   EndCriticalSection(prev_state);
356 }
357 
EnableAllFaultHandlers()358 void EnableAllFaultHandlers() {
359   cortex_m_shcsr |=
360       kMemFaultEnableMask | kBusFaultEnableMask | kUsageFaultEnableMask;
361 }
362 
Setup(bool use_fpu)363 void Setup(bool use_fpu) {
364   if (use_fpu) {
365     EnableFpu();
366   } else {
367     DisableFpu();
368   }
369   pw_cpu_exception_SetHandler(TestingExceptionHandler);
370   EnableAllFaultHandlers();
371   InstallVectorTableEntries();
372   exceptions_handled = 0;
373   current_fault_depth = 0;
374   captured_state = {};
375   float_test_value = 0.0f;
376   trigger_nested_fault = false;
377 }
378 
TEST(FaultEntry,BasicFault)379 TEST(FaultEntry, BasicFault) {
380   Setup(/*use_fpu=*/false);
381   BeginBaseFaultTest();
382   ASSERT_EQ(exceptions_handled, 1u);
383   // captured_state values must be cast since they're in a packed struct.
384   EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r0), kMagicPattern);
385   EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r1), 0u);
386   // PC is manually saved in r2 before the exception occurs (where PC is also
387   // stored). Ensure these numbers are within a reasonable distance.
388   int32_t captured_pc_distance =
389       captured_state.base.pc - captured_state.base.r2;
390   EXPECT_LT(captured_pc_distance, kMaxPcDistance);
391   EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r3),
392             static_cast<uint32_t>(captured_state.base.lr));
393 }
394 
TEST(FaultEntry,BasicUnalignedStackFault)395 TEST(FaultEntry, BasicUnalignedStackFault) {
396   Setup(/*use_fpu=*/false);
397   BeginBaseFaultUnalignedStackTest();
398   ASSERT_EQ(exceptions_handled, 1u);
399   // captured_state values must be cast since they're in a packed struct.
400   EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r0), kMagicPattern);
401   EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r1), 0u);
402   // PC is manually saved in r2 before the exception occurs (where PC is also
403   // stored). Ensure these numbers are within a reasonable distance.
404   int32_t captured_pc_distance =
405       captured_state.base.pc - captured_state.base.r2;
406   EXPECT_LT(captured_pc_distance, kMaxPcDistance);
407   EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r3),
408             static_cast<uint32_t>(captured_state.base.lr));
409 }
410 
TEST(FaultEntry,ExtendedFault)411 TEST(FaultEntry, ExtendedFault) {
412   Setup(/*use_fpu=*/false);
413   BeginExtendedFaultTest();
414   ASSERT_EQ(exceptions_handled, 1u);
415   ASSERT_TRUE(span_matches);
416   const CortexMExtraRegisters& extended_registers = captured_state.extended;
417   // captured_state values must be cast since they're in a packed struct.
418   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
419   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
420   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
421 
422   // Check expected values for this crash.
423   EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
424             static_cast<uint32_t>(kDivByZeroFaultMask));
425   EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
426 }
427 
TEST(FaultEntry,ExtendedUnalignedStackFault)428 TEST(FaultEntry, ExtendedUnalignedStackFault) {
429   Setup(/*use_fpu=*/false);
430   BeginExtendedFaultUnalignedStackTest();
431   ASSERT_EQ(exceptions_handled, 1u);
432   ASSERT_TRUE(span_matches);
433   const CortexMExtraRegisters& extended_registers = captured_state.extended;
434   // captured_state values must be cast since they're in a packed struct.
435   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
436   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
437   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
438 
439   // Check expected values for this crash.
440   EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
441             static_cast<uint32_t>(kDivByZeroFaultMask));
442   EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
443 }
444 
TEST(FaultEntry,NestedFault)445 TEST(FaultEntry, NestedFault) {
446   // Due to the way nesting is handled, captured_states[0] is the nested fault
447   // since that fault must be handled *FIRST*. After that fault is handled, the
448   // original fault can be correctly handled afterwards (captured into
449   // captured_states[1]).
450 
451   Setup(/*use_fpu=*/false);
452   trigger_nested_fault = true;
453   BeginBaseFaultTest();
454   ASSERT_EQ(exceptions_handled, 2u);
455 
456   // captured_state values must be cast since they're in a packed struct.
457   EXPECT_EQ(static_cast<uint32_t>(captured_states[1].base.r0), kMagicPattern);
458   EXPECT_EQ(static_cast<uint32_t>(captured_states[1].base.r1), 0u);
459   // PC is manually saved in r2 before the exception occurs (where PC is also
460   // stored). Ensure these numbers are within a reasonable distance.
461   int32_t captured_pc_distance =
462       captured_states[1].base.pc - captured_states[1].base.r2;
463   EXPECT_LT(captured_pc_distance, kMaxPcDistance);
464   EXPECT_EQ(static_cast<uint32_t>(captured_states[1].base.r3),
465             static_cast<uint32_t>(captured_states[1].base.lr));
466 
467   // NESTED STATE
468   // captured_state values must be cast since they're in a packed struct.
469   EXPECT_EQ(static_cast<uint32_t>(captured_states[0].base.r0),
470             kNestedMagicPattern);
471   EXPECT_EQ(static_cast<uint32_t>(captured_states[0].base.r1), 0u);
472   // PC is manually saved in r2 before the exception occurs (where PC is also
473   // stored). Ensure these numbers are within a reasonable distance.
474   captured_pc_distance =
475       captured_states[0].base.pc - captured_states[0].base.r2;
476   EXPECT_LT(captured_pc_distance, kMaxPcDistance);
477   EXPECT_EQ(static_cast<uint32_t>(captured_states[0].base.r3),
478             static_cast<uint32_t>(captured_states[0].base.lr));
479 }
480 
481 // TODO(pwbug/17): Replace when Pigweed config system is added.
482 // Disable tests that rely on hardware FPU if this module wasn't built with
483 // hardware FPU support.
484 #if defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
485 
486 // Populate some of the extended set of captured registers, then trigger
487 // exception. This function uses floating point to validate float context
488 // is pushed correctly.
BeginExtendedFaultFloatTest()489 void BeginExtendedFaultFloatTest() {
490   float_test_value = _PW_TEST_FPU_OPERATION;
491   BeginExtendedFaultTest();
492 }
493 
494 // Populate some of the extended set of captured registers, then trigger
495 // exception.
496 // This version causes stack to not be 4-byte aligned initially, testing
497 // the fault handlers correction for psp.
498 // This function uses floating point to validate float context
499 // is pushed correctly.
BeginExtendedFaultUnalignedStackFloatTest()500 void BeginExtendedFaultUnalignedStackFloatTest() {
501   float_test_value = _PW_TEST_FPU_OPERATION;
502   BeginExtendedFaultUnalignedStackTest();
503 }
504 
TEST(FaultEntry,FloatFault)505 TEST(FaultEntry, FloatFault) {
506   Setup(/*use_fpu=*/true);
507   BeginExtendedFaultFloatTest();
508   ASSERT_EQ(exceptions_handled, 1u);
509   const CortexMExtraRegisters& extended_registers = captured_state.extended;
510   // captured_state values must be cast since they're in a packed struct.
511   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
512   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
513   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
514 
515   // Check expected values for this crash.
516   EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
517             static_cast<uint32_t>(kDivByZeroFaultMask));
518   EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
519 
520   // Check fpu state was pushed during exception
521   EXPECT_FALSE(extended_registers.exc_return & kExcReturnBasicFrameMask);
522 
523   // Check float_test_value is correct
524   EXPECT_EQ(float_test_value, kFloatTestPattern);
525 }
526 
TEST(FaultEntry,FloatUnalignedStackFault)527 TEST(FaultEntry, FloatUnalignedStackFault) {
528   Setup(/*use_fpu=*/true);
529   BeginExtendedFaultUnalignedStackFloatTest();
530   ASSERT_EQ(exceptions_handled, 1u);
531   ASSERT_TRUE(span_matches);
532   const CortexMExtraRegisters& extended_registers = captured_state.extended;
533   // captured_state values must be cast since they're in a packed struct.
534   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
535   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
536   EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
537 
538   // Check expected values for this crash.
539   EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
540             static_cast<uint32_t>(kDivByZeroFaultMask));
541   EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
542 
543   // Check fpu state was pushed during exception.
544   EXPECT_FALSE(extended_registers.exc_return & kExcReturnBasicFrameMask);
545 
546   // Check float_test_value is correct
547   EXPECT_EQ(float_test_value, kFloatTestPattern);
548 }
549 
550 #endif  // defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
551 
TestingExceptionHandler(pw_cpu_exception_State * state)552 void TestingExceptionHandler(pw_cpu_exception_State* state) {
553   if (++current_fault_depth > kMaxFaultDepth) {
554     volatile bool loop = true;
555     while (loop) {
556       // Hit unexpected nested crash, prevent further nesting.
557     }
558   }
559 
560   if (trigger_nested_fault) {
561     // Disable nesting before triggering the nested fault to prevent infinite
562     // recursive crashes.
563     trigger_nested_fault = false;
564     BeginNestedFaultTest();
565   }
566   // Logging may require FPU (fpu instructions in vsnprintf()), so re-enable
567   // asap.
568   EnableFpu();
569 
570   // Disable traps. Must be disabled before EXPECT, as memcpy() can do unaligned
571   // operations.
572   cortex_m_ccr &= ~kUnalignedTrapEnableMask;
573   cortex_m_ccr &= ~kDivByZeroTrapEnableMask;
574 
575   // Clear HFSR forced (nested) hard fault mask if set. This will only be
576   // set by the nested fault test.
577   EXPECT_EQ(state->extended.hfsr, cortex_m_hfsr);
578   if (cortex_m_hfsr & kForcedHardfaultMask) {
579     cortex_m_hfsr = kForcedHardfaultMask;
580   }
581 
582   if (cortex_m_cfsr & kUnalignedFaultMask) {
583     // Copy captured state to check later.
584     std::memcpy(&captured_states[exceptions_handled],
585                 state,
586                 sizeof(pw_cpu_exception_State));
587 
588     // Disable unaligned read/write trapping to "handle" exception.
589     cortex_m_cfsr = kUnalignedFaultMask;
590     exceptions_handled++;
591     return;
592   } else if (cortex_m_cfsr & kDivByZeroFaultMask) {
593     // Copy captured state to check later.
594     std::memcpy(&captured_states[exceptions_handled],
595                 state,
596                 sizeof(pw_cpu_exception_State));
597 
598     // Ensure std::span compares to be the same.
599     std::span<const uint8_t> state_span = RawFaultingCpuState(*state);
600     EXPECT_EQ(state_span.size(), sizeof(pw_cpu_exception_State));
601     if (std::memcmp(state, state_span.data(), state_span.size()) == 0) {
602       span_matches = true;
603     } else {
604       span_matches = false;
605     }
606 
607     // Disable divide-by-zero trapping to "handle" exception.
608     cortex_m_cfsr = kDivByZeroFaultMask;
609     exceptions_handled++;
610     return;
611   }
612 
613   EXPECT_EQ(state->extended.shcsr, cortex_m_shcsr);
614 
615   // If an unexpected exception occurred, just enter an infinite loop.
616   while (true) {
617   }
618 }
619 
620 }  // namespace
621 }  // namespace pw::cpu_exception
622