1 /** @file
2 Code for Processor S3 restoration
3 
4 Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution.  The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9 
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12 
13 **/
14 
15 #include "PiSmmCpuDxeSmm.h"
16 
17 typedef struct {
18   UINTN             Lock;
19   VOID              *StackStart;
20   UINTN             StackSize;
21   VOID              *ApFunction;
22   IA32_DESCRIPTOR   GdtrProfile;
23   IA32_DESCRIPTOR   IdtrProfile;
24   UINT32            BufferStart;
25   UINT32            Cr3;
26 } MP_CPU_EXCHANGE_INFO;
27 
28 typedef struct {
29   UINT8 *RendezvousFunnelAddress;
30   UINTN PModeEntryOffset;
31   UINTN FlatJumpOffset;
32   UINTN Size;
33   UINTN LModeEntryOffset;
34   UINTN LongJumpOffset;
35 } MP_ASSEMBLY_ADDRESS_MAP;
36 
37 /**
38   Get starting address and size of the rendezvous entry for APs.
39   Information for fixing a jump instruction in the code is also returned.
40 
41   @param AddressMap  Output buffer for address map information.
42 **/
43 VOID *
44 EFIAPI
45 AsmGetAddressMap (
46   MP_ASSEMBLY_ADDRESS_MAP                     *AddressMap
47   );
48 
49 #define LEGACY_REGION_SIZE    (2 * 0x1000)
50 #define LEGACY_REGION_BASE    (0xA0000 - LEGACY_REGION_SIZE)
51 #define MSR_SPIN_LOCK_INIT_NUM 15
52 
53 ACPI_CPU_DATA                mAcpiCpuData;
54 UINT32                       mNumberToFinish;
55 MP_CPU_EXCHANGE_INFO         *mExchangeInfo;
56 BOOLEAN                      mRestoreSmmConfigurationInS3 = FALSE;
57 VOID                         *mGdtForAp = NULL;
58 VOID                         *mIdtForAp = NULL;
59 VOID                         *mMachineCheckHandlerForAp = NULL;
60 MP_MSR_LOCK                  *mMsrSpinLocks = NULL;
61 UINTN                        mMsrSpinLockCount = MSR_SPIN_LOCK_INIT_NUM;
62 UINTN                        mMsrCount = 0;
63 
64 /**
65   Get MSR spin lock by MSR index.
66 
67   @param  MsrIndex       MSR index value.
68 
69   @return Pointer to MSR spin lock.
70 
71 **/
72 SPIN_LOCK *
GetMsrSpinLockByIndex(IN UINT32 MsrIndex)73 GetMsrSpinLockByIndex (
74   IN UINT32      MsrIndex
75   )
76 {
77   UINTN     Index;
78   for (Index = 0; Index < mMsrCount; Index++) {
79     if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
80       return &mMsrSpinLocks[Index].SpinLock;
81     }
82   }
83   return NULL;
84 }
85 
86 /**
87   Initialize MSR spin lock by MSR index.
88 
89   @param  MsrIndex       MSR index value.
90 
91 **/
92 VOID
InitMsrSpinLockByIndex(IN UINT32 MsrIndex)93 InitMsrSpinLockByIndex (
94   IN UINT32      MsrIndex
95   )
96 {
97   UINTN    NewMsrSpinLockCount;
98 
99   if (mMsrSpinLocks == NULL) {
100     mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * mMsrSpinLockCount);
101     ASSERT (mMsrSpinLocks != NULL);
102   }
103   if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
104     //
105     // Initialize spin lock for MSR programming
106     //
107     mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
108     InitializeSpinLock (&mMsrSpinLocks[mMsrCount].SpinLock);
109     mMsrCount ++;
110     if (mMsrCount == mMsrSpinLockCount) {
111       //
112       // If MSR spin lock buffer is full, enlarge it
113       //
114       NewMsrSpinLockCount = mMsrSpinLockCount + MSR_SPIN_LOCK_INIT_NUM;
115       mMsrSpinLocks = ReallocatePool (
116                         sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
117                         sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
118                         mMsrSpinLocks
119                         );
120       mMsrSpinLockCount = NewMsrSpinLockCount;
121     }
122   }
123 }
124 
125 /**
126   Sync up the MTRR values for all processors.
127 
128   @param MtrrTable  Table holding fixed/variable MTRR values to be loaded.
129 **/
130 VOID
131 EFIAPI
LoadMtrrData(EFI_PHYSICAL_ADDRESS MtrrTable)132 LoadMtrrData (
133   EFI_PHYSICAL_ADDRESS       MtrrTable
134   )
135 /*++
136 
137 Routine Description:
138 
139   Sync up the MTRR values for all processors.
140 
141 Arguments:
142 
143 Returns:
144     None
145 
146 --*/
147 {
148   MTRR_SETTINGS   *MtrrSettings;
149 
150   MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
151   MtrrSetAllMtrrs (MtrrSettings);
152 }
153 
154 /**
155   Programs registers for the calling processor.
156 
157   This function programs registers for the calling processor.
158 
159   @param  RegisterTable Pointer to register table of the running processor.
160 
161 **/
162 VOID
SetProcessorRegister(IN CPU_REGISTER_TABLE * RegisterTable)163 SetProcessorRegister (
164   IN CPU_REGISTER_TABLE        *RegisterTable
165   )
166 {
167   CPU_REGISTER_TABLE_ENTRY  *RegisterTableEntry;
168   UINTN                     Index;
169   UINTN                     Value;
170   SPIN_LOCK                 *MsrSpinLock;
171 
172   //
173   // Traverse Register Table of this logical processor
174   //
175   RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
176   for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
177     //
178     // Check the type of specified register
179     //
180     switch (RegisterTableEntry->RegisterType) {
181     //
182     // The specified register is Control Register
183     //
184     case ControlRegister:
185       switch (RegisterTableEntry->Index) {
186       case 0:
187         Value = AsmReadCr0 ();
188         Value = (UINTN) BitFieldWrite64 (
189                           Value,
190                           RegisterTableEntry->ValidBitStart,
191                           RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
192                           (UINTN) RegisterTableEntry->Value
193                           );
194         AsmWriteCr0 (Value);
195         break;
196       case 2:
197         Value = AsmReadCr2 ();
198         Value = (UINTN) BitFieldWrite64 (
199                           Value,
200                           RegisterTableEntry->ValidBitStart,
201                           RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
202                           (UINTN) RegisterTableEntry->Value
203                           );
204         AsmWriteCr2 (Value);
205         break;
206       case 3:
207         Value = AsmReadCr3 ();
208         Value = (UINTN) BitFieldWrite64 (
209                           Value,
210                           RegisterTableEntry->ValidBitStart,
211                           RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
212                           (UINTN) RegisterTableEntry->Value
213                           );
214         AsmWriteCr3 (Value);
215         break;
216       case 4:
217         Value = AsmReadCr4 ();
218         Value = (UINTN) BitFieldWrite64 (
219                           Value,
220                           RegisterTableEntry->ValidBitStart,
221                           RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
222                           (UINTN) RegisterTableEntry->Value
223                           );
224         AsmWriteCr4 (Value);
225         break;
226       default:
227         break;
228       }
229       break;
230     //
231     // The specified register is Model Specific Register
232     //
233     case Msr:
234       //
235       // If this function is called to restore register setting after INIT signal,
236       // there is no need to restore MSRs in register table.
237       //
238       if (RegisterTableEntry->ValidBitLength >= 64) {
239         //
240         // If length is not less than 64 bits, then directly write without reading
241         //
242         AsmWriteMsr64 (
243           RegisterTableEntry->Index,
244           RegisterTableEntry->Value
245           );
246       } else {
247         //
248         // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
249         // to make sure MSR read/write operation is atomic.
250         //
251         MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
252         AcquireSpinLock (MsrSpinLock);
253         //
254         // Set the bit section according to bit start and length
255         //
256         AsmMsrBitFieldWrite64 (
257           RegisterTableEntry->Index,
258           RegisterTableEntry->ValidBitStart,
259           RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
260           RegisterTableEntry->Value
261           );
262         ReleaseSpinLock (MsrSpinLock);
263       }
264       break;
265     //
266     // Enable or disable cache
267     //
268     case CacheControl:
269       //
270       // If value of the entry is 0, then disable cache.  Otherwise, enable cache.
271       //
272       if (RegisterTableEntry->Value == 0) {
273         AsmDisableCache ();
274       } else {
275         AsmEnableCache ();
276       }
277       break;
278 
279     default:
280       break;
281     }
282   }
283 }
284 
285 /**
286   AP initialization before SMBASE relocation in the S3 boot path.
287 **/
288 VOID
EarlyMPRendezvousProcedure(VOID)289 EarlyMPRendezvousProcedure (
290   VOID
291   )
292 {
293   CPU_REGISTER_TABLE         *RegisterTableList;
294   UINT32                     InitApicId;
295   UINTN                      Index;
296 
297   LoadMtrrData (mAcpiCpuData.MtrrTable);
298 
299   //
300   // Find processor number for this CPU.
301   //
302   RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
303   InitApicId = GetInitialApicId ();
304   for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
305     if (RegisterTableList[Index].InitialApicId == InitApicId) {
306       SetProcessorRegister (&RegisterTableList[Index]);
307       break;
308     }
309   }
310 
311   //
312   // Count down the number with lock mechanism.
313   //
314   InterlockedDecrement (&mNumberToFinish);
315 }
316 
317 /**
318   AP initialization after SMBASE relocation in the S3 boot path.
319 **/
320 VOID
MPRendezvousProcedure(VOID)321 MPRendezvousProcedure (
322   VOID
323   )
324 {
325   CPU_REGISTER_TABLE         *RegisterTableList;
326   UINT32                     InitApicId;
327   UINTN                      Index;
328 
329   ProgramVirtualWireMode ();
330   DisableLvtInterrupts ();
331 
332   RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
333   InitApicId = GetInitialApicId ();
334   for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
335     if (RegisterTableList[Index].InitialApicId == InitApicId) {
336       SetProcessorRegister (&RegisterTableList[Index]);
337       break;
338     }
339   }
340 
341   //
342   // Count down the number with lock mechanism.
343   //
344   InterlockedDecrement (&mNumberToFinish);
345 }
346 
347 /**
348   Prepares startup vector for APs.
349 
350   This function prepares startup vector for APs.
351 
352   @param  WorkingBuffer  The address of the work buffer.
353 **/
354 VOID
PrepareApStartupVector(EFI_PHYSICAL_ADDRESS WorkingBuffer)355 PrepareApStartupVector (
356   EFI_PHYSICAL_ADDRESS  WorkingBuffer
357   )
358 {
359   EFI_PHYSICAL_ADDRESS                        StartupVector;
360   MP_ASSEMBLY_ADDRESS_MAP                     AddressMap;
361 
362   //
363   // Get the address map of startup code for AP,
364   // including code size, and offset of long jump instructions to redirect.
365   //
366   ZeroMem (&AddressMap, sizeof (AddressMap));
367   AsmGetAddressMap (&AddressMap);
368 
369   StartupVector = WorkingBuffer;
370 
371   //
372   // Copy AP startup code to startup vector, and then redirect the long jump
373   // instructions for mode switching.
374   //
375   CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
376   *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
377   if (AddressMap.LongJumpOffset != 0) {
378     *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
379   }
380 
381   //
382   // Get the start address of exchange data between BSP and AP.
383   //
384   mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
385   ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
386 
387   CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
388   CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
389 
390   //
391   // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
392   //
393   CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);
394   CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);
395   CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);
396 
397   mExchangeInfo->StackStart  = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
398   mExchangeInfo->StackSize   = mAcpiCpuData.StackSize;
399   mExchangeInfo->BufferStart = (UINT32) StartupVector;
400   mExchangeInfo->Cr3         = (UINT32) (AsmReadCr3 ());
401 }
402 
403 /**
404   The function is invoked before SMBASE relocation in S3 path to restores CPU status.
405 
406   The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
407   and restores MTRRs for both BSP and APs.
408 
409 **/
410 VOID
EarlyInitializeCpu(VOID)411 EarlyInitializeCpu (
412   VOID
413   )
414 {
415   CPU_REGISTER_TABLE         *RegisterTableList;
416   UINT32                     InitApicId;
417   UINTN                      Index;
418 
419   LoadMtrrData (mAcpiCpuData.MtrrTable);
420 
421   //
422   // Find processor number for this CPU.
423   //
424   RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
425   InitApicId = GetInitialApicId ();
426   for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
427     if (RegisterTableList[Index].InitialApicId == InitApicId) {
428       SetProcessorRegister (&RegisterTableList[Index]);
429       break;
430     }
431   }
432 
433   ProgramVirtualWireMode ();
434 
435   PrepareApStartupVector (mAcpiCpuData.StartupVector);
436 
437   mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
438   mExchangeInfo->ApFunction  = (VOID *) (UINTN) EarlyMPRendezvousProcedure;
439 
440   //
441   // Send INIT IPI - SIPI to all APs
442   //
443   SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
444 
445   while (mNumberToFinish > 0) {
446     CpuPause ();
447   }
448 }
449 
450 /**
451   The function is invoked after SMBASE relocation in S3 path to restores CPU status.
452 
453   The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
454   data saved by normal boot path for both BSP and APs.
455 
456 **/
457 VOID
InitializeCpu(VOID)458 InitializeCpu (
459   VOID
460   )
461 {
462   CPU_REGISTER_TABLE         *RegisterTableList;
463   UINT32                     InitApicId;
464   UINTN                      Index;
465 
466   RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
467   InitApicId = GetInitialApicId ();
468   for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
469     if (RegisterTableList[Index].InitialApicId == InitApicId) {
470       SetProcessorRegister (&RegisterTableList[Index]);
471       break;
472     }
473   }
474 
475   mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
476   //
477   // StackStart was updated when APs were waken up in EarlyInitializeCpu.
478   // Re-initialize StackAddress to original beginning address.
479   //
480   mExchangeInfo->StackStart  = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
481   mExchangeInfo->ApFunction  = (VOID *) (UINTN) MPRendezvousProcedure;
482 
483   //
484   // Send INIT IPI - SIPI to all APs
485   //
486   SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
487 
488   while (mNumberToFinish > 0) {
489     CpuPause ();
490   }
491 }
492