EFR32 Mighty Gecko 13 Software Documentation  efr32mg13-doc-5.1.2
em_core.c
Go to the documentation of this file.
1 /***************************************************************************/
32 #include "em_core.h"
33 #include "em_assert.h"
34 
35 #if defined(EMLIB_USER_CONFIG)
36 #include "emlib_config.h"
37 #endif
38 
39 /***************************************************************************/
44 /***************************************************************************/
263 /*******************************************************************************
264  ******************************* DEFINES ***********************************
265  ******************************************************************************/
266 
267 #if !defined(CORE_ATOMIC_BASE_PRIORITY_LEVEL)
268 
271 #define CORE_ATOMIC_BASE_PRIORITY_LEVEL 3
272 #endif
273 
274 #if !defined(CORE_ATOMIC_METHOD)
275 
278 #define CORE_ATOMIC_METHOD CORE_ATOMIC_METHOD_PRIMASK
279 #endif
280 
281 #if !defined(CORE_INTERRUPT_ENTRY)
282 // Some RTOS's must be notified on interrupt entry (and exit).
283 // Use this macro at the start of all your interrupt handlers.
284 // Reimplement the macro in emlib_config.h to suit the needs of your RTOS.
287 #define CORE_INTERRUPT_ENTRY()
288 #endif
289 
290 #if !defined(CORE_INTERRUPT_EXIT)
291 
293 #define CORE_INTERRUPT_EXIT()
294 #endif
295 
296 // Compile time sanity check.
297 #if (CORE_ATOMIC_METHOD != CORE_ATOMIC_METHOD_PRIMASK) \
298  && (CORE_ATOMIC_METHOD != CORE_ATOMIC_METHOD_BASEPRI)
299 #error "em_core: Undefined ATOMIC IRQ handling strategy."
300 #endif
301 
302 /*******************************************************************************
303  ****************************** FUNCTIONS **********************************
304  ******************************************************************************/
305 
306 /***************************************************************************/
314 {
315  __disable_irq();
316 }
317 
318 /***************************************************************************/
325 {
326  __enable_irq();
327 }
328 
329 /***************************************************************************/
340 {
341  CORE_irqState_t irqState = __get_PRIMASK();
342  __disable_irq();
343  return irqState;
344 }
345 
346 /***************************************************************************/
356 {
357  if (irqState == 0) {
358  __enable_irq();
359  }
360 }
361 
362 /***************************************************************************/
371 {
372  if (__get_PRIMASK() & 1) {
373  __enable_irq();
374  __disable_irq();
375  }
376 }
377 
378 /***************************************************************************/
391 {
392 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
393  __set_BASEPRI(CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8 - __NVIC_PRIO_BITS));
394 #else
395  __disable_irq();
396 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
397 }
398 
399 /***************************************************************************/
415 {
416 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
417  __set_BASEPRI(0);
418 #else
419  __enable_irq();
420 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
421 }
422 
423 /***************************************************************************/
438 {
439 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
440  CORE_irqState_t irqState = __get_BASEPRI();
441  __set_BASEPRI(CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8 - __NVIC_PRIO_BITS));
442  return irqState;
443 #else
444  CORE_irqState_t irqState = __get_PRIMASK();
445  __disable_irq();
446  return irqState;
447 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
448 }
449 
450 /***************************************************************************/
464 {
465 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
466  __set_BASEPRI(irqState);
467 #else
468  if (irqState == 0) {
469  __enable_irq();
470  }
471 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
472 }
473 
474 /***************************************************************************/
487 {
488 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
489  CORE_irqState_t basepri = __get_BASEPRI();
490  if (basepri >= (CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8 - __NVIC_PRIO_BITS))) {
491  __set_BASEPRI(0);
492  __set_BASEPRI(basepri);
493  }
494 #else
495  if (__get_PRIMASK() & 1) {
496  __enable_irq();
497  __disable_irq();
498  }
499 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
500 }
501 
502 /***************************************************************************/
516  const CORE_nvicMask_t *disable)
517 {
519  *nvicState = *(CORE_nvicMask_t*)&NVIC->ICER[0];
520  *(CORE_nvicMask_t*)&NVIC->ICER[0] = *disable;
521  )
522 }
523 
524 /***************************************************************************/
532 {
534  *(CORE_nvicMask_t*)&NVIC->ICER[0] = *disable;
535  )
536 }
537 
538 /***************************************************************************/
546 {
548  *(CORE_nvicMask_t*)&NVIC->ISER[0] = *enable;
549  )
550 }
551 
552 /***************************************************************************/
564 {
565  CORE_nvicMask_t nvicMask;
566 
567  // Get current NVIC enable mask.
569  nvicMask = *(CORE_nvicMask_t*)&NVIC->ISER[0];
570  )
571 
572  // Make a mask with bits set for those interrupts that are currently
573  // disabled but are set in the enable mask.
574 #if (CORE_NVIC_REG_WORDS == 1)
575  nvicMask.a[0] &= enable->a[0];
576  nvicMask.a[0] = ~nvicMask.a[0] & enable->a[0];
577 
578  if (nvicMask.a[0] != 0) {
579 
580 #elif (CORE_NVIC_REG_WORDS == 2)
581  nvicMask.a[0] &= enable->a[0];
582  nvicMask.a[1] &= enable->a[1];
583  nvicMask.a[0] = ~nvicMask.a[0] & enable->a[0];
584  nvicMask.a[1] = ~nvicMask.a[1] & enable->a[1];
585 
586  if ((nvicMask.a[0] != 0) || (nvicMask.a[1] != 0)) {
587 
588 #elif (CORE_NVIC_REG_WORDS == 3)
589  nvicMask.a[0] &= enable->a[0];
590  nvicMask.a[1] &= enable->a[1];
591  nvicMask.a[2] &= enable->a[2];
592  nvicMask.a[0] = ~nvicMask.a[0] & enable->a[0];
593  nvicMask.a[1] = ~nvicMask.a[1] & enable->a[1];
594  nvicMask.a[2] = ~nvicMask.a[2] & enable->a[2];
595 
596  if ((nvicMask.a[0] != 0) || (nvicMask.a[1] != 0) || (nvicMask.a[2] != 0)) {
597 #endif
598 
599  // Enable previously disabled interrupts.
600  *(CORE_nvicMask_t*)&NVIC->ISER[0] = nvicMask;
601 
602  // Disable those interrupts again.
603  *(CORE_nvicMask_t*)&NVIC->ICER[0] = nvicMask;
604  }
605 }
606 
607 /***************************************************************************/
618 {
619  EFM_ASSERT((irqN >= 0) && (irqN < EXT_IRQ_COUNT));
620  mask->a[irqN >> 5] |= 1 << (irqN & 0x1F);
621 }
622 
623 /***************************************************************************/
634 {
635  EFM_ASSERT((irqN >= 0) && (irqN < EXT_IRQ_COUNT));
636  mask->a[irqN >> 5] &= ~(1 << (irqN & 0x1F));
637 }
638 
639 /***************************************************************************/
648 {
649  return (SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0;
650 }
651 
652 /***************************************************************************/
663 {
664  uint32_t irqPri, activeIrq;
665 
666 #if (__CORTEX_M >= 3)
667  uint32_t basepri;
668 
669  EFM_ASSERT((irqN >= MemoryManagement_IRQn) && (irqN < EXT_IRQ_COUNT));
670 #else
671  EFM_ASSERT((irqN >= SVCall_IRQn) && (irqN < EXT_IRQ_COUNT));
672 #endif
673 
674  if (__get_PRIMASK() & 1) {
675  return true; // All IRQ's are disabled
676  }
677 
678  if (CORE_NvicIRQDisabled(irqN)) {
679  return true; // The IRQ in question is disabled
680  }
681 
682  irqPri = NVIC_GetPriority(irqN);
683 #if (__CORTEX_M >= 3)
684  basepri = __get_BASEPRI();
685  if ((basepri != 0)
686  && (irqPri >= (basepri >> (8 - __NVIC_PRIO_BITS)))) {
687  return true; // The IRQ in question has too low
688  } // priority vs. BASEPRI
689 #endif
690 
691  // Check if already in an interrupt handler, if so an interrupt with
692  // higher priority (lower priority value) can preempt.
693  activeIrq = (SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) >> SCB_ICSR_VECTACTIVE_Pos;
694  if ((activeIrq != 0)
695  && (irqPri >= NVIC_GetPriority((IRQn_Type)(activeIrq - 16)))) {
696  return true; // The IRQ in question has too low
697  } // priority vs. current active IRQ
698 
699  return false;
700 }
701 
702 /***************************************************************************/
710 {
711 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_PRIMASK)
712  return (__get_PRIMASK() & 1) == 1;
713 
714 #elif (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
715  return ((__get_PRIMASK() & 1) == 1)
716  || (__get_BASEPRI() >= (CORE_ATOMIC_BASE_PRIORITY_LEVEL
717  << (8 - __NVIC_PRIO_BITS)));
718 #endif
719 }
720 
721 /***************************************************************************/
729 {
731  *mask = *(CORE_nvicMask_t*)&NVIC->ISER[0];
732  )
733 }
734 
735 /***************************************************************************/
746 {
747  CORE_nvicMask_t nvicMask;
748 
750  nvicMask = *(CORE_nvicMask_t*)&NVIC->ISER[0];
751  )
752 
753 
754 #if (CORE_NVIC_REG_WORDS == 1)
755  return (mask->a[0] & nvicMask.a[0]) == 0;
756 
757 #elif (CORE_NVIC_REG_WORDS == 2)
758  return ((mask->a[0] & nvicMask.a[0]) == 0)
759  && ((mask->a[1] & nvicMask.a[1]) == 0);
760 
761 #elif (CORE_NVIC_REG_WORDS == 3)
762  return ((mask->a[0] & nvicMask.a[0]) == 0)
763  && ((mask->a[1] & nvicMask.a[1]) == 0)
764  && ((mask->a[2] & nvicMask.a[2]) == 0);
765 #endif
766 }
767 
768 /***************************************************************************/
779 {
780  CORE_nvicMask_t *mask;
781 
782  EFM_ASSERT((irqN >= 0) && (irqN < EXT_IRQ_COUNT));
783  mask = (CORE_nvicMask_t*)&NVIC->ISER[0];
784  return (mask->a[irqN >> 5] & (1 << (irqN & 0x1F))) == 0;
785 }
786 
787 /***************************************************************************/
801 {
802  EFM_ASSERT((irqN >= -16) && (irqN < EXT_IRQ_COUNT));
803  return (void*)(((uint32_t*)SCB->VTOR)[irqN+16]);
804 }
805 
806 /***************************************************************************/
819 void CORE_SetNvicRamTableHandler(IRQn_Type irqN, void *handler)
820 {
821  EFM_ASSERT((irqN >= -16) && (irqN < EXT_IRQ_COUNT));
822  ((uint32_t*)SCB->VTOR)[irqN+16] = (uint32_t)handler;
823 }
824 
825 /***************************************************************************/
856 void CORE_InitNvicVectorTable(uint32_t *sourceTable,
857  uint32_t sourceSize,
858  uint32_t *targetTable,
859  uint32_t targetSize,
860  void *defaultHandler,
861  bool overwriteActive)
862 {
863  uint32_t i;
864 
865  // ASSERT on non SRAM based target table.
866  EFM_ASSERT(((uint32_t)targetTable >= RAM_MEM_BASE)
867  && ((uint32_t)targetTable < (RAM_MEM_BASE + RAM_MEM_SIZE)));
868 
869  // ASSERT if misaligned with respect to VTOR register implementation.
870 #if defined(SCB_VTOR_TBLBASE_Msk)
871  EFM_ASSERT(((uint32_t)targetTable & ~(SCB_VTOR_TBLOFF_Msk
872  | SCB_VTOR_TBLBASE_Msk)) == 0);
873 #else
874  EFM_ASSERT(((uint32_t)targetTable & ~SCB_VTOR_TBLOFF_Msk) == 0);
875 #endif
876 
877  // ASSERT if misaligned with respect to vector table size.
878  // Vector table address must be aligned at its size rounded up to nearest 2^n.
879  EFM_ASSERT(((uint32_t)targetTable
880  & ((1 << (32 - __CLZ((targetSize * 4) - 1))) - 1)) == 0);
881 
882  for (i=0; i<targetSize; i++) {
883  if (overwriteActive) { // Overwrite target entries ?
884  if (i<sourceSize) { // targetSize <= sourceSize
885  targetTable[i] = sourceTable[i];
886  } else { // targetSize > sourceSize
887  targetTable[i] = (uint32_t)defaultHandler;
888  }
889  } else { // Overwrite target entries which are 0
890  if (i<sourceSize) { // targetSize <= sourceSize
891  if (targetTable[i] == 0) {
892  targetTable[i] = sourceTable[i];
893  }
894  } else { // targetSize > sourceSize
895  if (targetTable[i] == 0) {
896  targetTable[i] = (uint32_t)defaultHandler;
897  }
898  }
899  }
900  }
901  SCB->VTOR = (uint32_t)targetTable;
902 }
903 
bool CORE_NvicIRQDisabled(IRQn_Type irqN)
Check if a NVIC interrupt is disabled.
Definition: em_core.c:778
void CORE_SetNvicRamTableHandler(IRQn_Type irqN, void *handler)
Utility function to set the handler for a specific interrupt.
Definition: em_core.c:819
void CORE_CriticalDisableIrq(void)
Disable interrupts.
Definition: em_core.c:313
Emlib peripheral API "assert" implementation.
uint32_t a[CORE_NVIC_REG_WORDS]
Definition: em_core.h:224
void CORE_YieldAtomic(void)
Brief interrupt enable/disable sequence to allow handling of pending interrupts.
Definition: em_core.c:486
uint32_t CORE_irqState_t
Definition: em_core.h:220
void CORE_YieldNvicMask(const CORE_nvicMask_t *enable)
Brief NVIC interrupt enable/disable sequence to allow handling of pending interrupts.
Definition: em_core.c:563
#define RAM_MEM_BASE
void CORE_NvicMaskClearIRQ(IRQn_Type irqN, CORE_nvicMask_t *mask)
Utility function to clear an IRQn bit in a NVIC enable/disable mask.
Definition: em_core.c:633
void CORE_NvicDisableMask(const CORE_nvicMask_t *disable)
Disable NVIC interrupts.
Definition: em_core.c:531
bool CORE_InIrqContext(void)
Check if current cpu operation mode is handler mode.
Definition: em_core.c:647
#define EXT_IRQ_COUNT
bool CORE_GetNvicMaskDisableState(const CORE_nvicMask_t *mask)
Get NVIC disable state for a given mask.
Definition: em_core.c:745
void CORE_ExitAtomic(CORE_irqState_t irqState)
Exit an ATOMIC section.
Definition: em_core.c:463
#define CORE_CRITICAL_SECTION(yourcode)
Definition: em_core.h:94
void CORE_YieldCritical(void)
Brief interrupt enable/disable sequence to allow handling of pending interrupts.
Definition: em_core.c:370
enum IRQn IRQn_Type
#define SL_WEAK
Macro for defining a weak symbol.
Definition: em_common.h:171
void CORE_CriticalEnableIrq(void)
Enable interrupts.
Definition: em_core.c:324
#define CORE_ATOMIC_BASE_PRIORITY_LEVEL
Definition: em_core.c:271
Core interrupt handling API.
CORE_irqState_t CORE_EnterCritical(void)
Enter a CRITICAL section.
Definition: em_core.c:339
void CORE_GetNvicEnabledMask(CORE_nvicMask_t *mask)
Get current NVIC enable mask state.
Definition: em_core.c:728
void CORE_EnterNvicMask(CORE_nvicMask_t *nvicState, const CORE_nvicMask_t *disable)
Enter a NVIC mask section.
Definition: em_core.c:515
#define __NVIC_PRIO_BITS
bool CORE_IrqIsBlocked(IRQn_Type irqN)
Check if a specific interrupt is disabled or blocked.
Definition: em_core.c:662
bool CORE_IrqIsDisabled(void)
Check if interrupts are disabled.
Definition: em_core.c:709
void CORE_InitNvicVectorTable(uint32_t *sourceTable, uint32_t sourceSize, uint32_t *targetTable, uint32_t targetSize, void *defaultHandler, bool overwriteActive)
Initialize an interrupt vector table by copying table entries from a source to a target table...
Definition: em_core.c:856
void * CORE_GetNvicRamTableHandler(IRQn_Type irqN)
Utility function to get the handler for a specific interrupt.
Definition: em_core.c:800
void CORE_AtomicEnableIrq(void)
Enable interrupts.
Definition: em_core.c:414
void CORE_NvicMaskSetIRQ(IRQn_Type irqN, CORE_nvicMask_t *mask)
Utility function to set an IRQn bit in a NVIC enable/disable mask.
Definition: em_core.c:617
void CORE_NvicEnableMask(const CORE_nvicMask_t *enable)
Set current NVIC interrupt enable mask.
Definition: em_core.c:545
void CORE_ExitCritical(CORE_irqState_t irqState)
Exit a CRITICAL section.
Definition: em_core.c:355
void CORE_AtomicDisableIrq(void)
Disable interrupts.
Definition: em_core.c:390
CORE_irqState_t CORE_EnterAtomic(void)
Enter an ATOMIC section.
Definition: em_core.c:437
#define RAM_MEM_SIZE