TNeo  BETA v1.08-11-g97e5a6d
tn_arch_pic32.h
Go to the documentation of this file.
1 /*******************************************************************************
2  *
3  * TNeo: real-time kernel initially based on TNKernel
4  *
5  * TNKernel: copyright 2004, 2013 Yuri Tiomkin.
6  * PIC32-specific routines: copyright 2013, 2014 Anders Montonen.
7  * TNeo: copyright 2014 Dmitry Frank.
8  *
9  * TNeo was born as a thorough review and re-implementation of
10  * TNKernel. The new kernel has well-formed code, inherited bugs are fixed
11  * as well as new features being added, and it is tested carefully with
12  * unit-tests.
13  *
14  * API is changed somewhat, so it's not 100% compatible with TNKernel,
15  * hence the new name: TNeo.
16  *
17  * Permission to use, copy, modify, and distribute this software in source
18  * and binary forms and its documentation for any purpose and without fee
19  * is hereby granted, provided that the above copyright notice appear
20  * in all copies and that both that copyright notice and this permission
21  * notice appear in supporting documentation.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE DMITRY FRANK AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DMITRY FRANK OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  ******************************************************************************/
36 
37 /**
38  *
39  * \file
40  *
41  * PIC32 architecture-dependent routines
42  *
43  */
44 
45 #ifndef _TN_ARCH_PIC32_H
46 #define _TN_ARCH_PIC32_H
47 
48 
49 /*******************************************************************************
50  * INCLUDED FILES
51  ******************************************************************************/
52 
53 #include "../../core/tn_cfg_dispatch.h"
54 
55 //-- include macros for atomic assess to structure bit fields so that
56 // application can use it too.
57 #include "tn_arch_pic32_bfa.h"
58 
59 
60 
61 #ifdef __cplusplus
62 extern "C" { /*}*/
63 #endif
64 
65 
66 /*******************************************************************************
67  * PROTECTED GLOBAL DATA
68  ******************************************************************************/
69 
70 /// current interrupt nesting count. Used by macros
71 /// `tn_p32_soft_isr()` and `tn_p32_srs_isr()`.
72 extern volatile int tn_p32_int_nest_count;
73 
74 /// saved task stack pointer. Needed when switching stack pointer from
75 /// task stack to interrupt stack.
76 extern void *tn_p32_user_sp;
77 
78 /// saved ISR stack pointer. Needed when switching stack pointer from
79 /// interrupt stack to task stack.
80 extern void *tn_p32_int_sp;
81 
82 
83 
84 
85 
86 
87 
88 
89 #ifndef DOXYGEN_SHOULD_SKIP_THIS
90 
91 #define _TN_PIC32_INTSAVE_DATA_INVALID 0xffffffff
92 
93 #if TN_DEBUG
94 # define _TN_PIC32_INTSAVE_CHECK() \
95 { \
96  if (TN_INTSAVE_VAR == _TN_PIC32_INTSAVE_DATA_INVALID){ \
97  _TN_FATAL_ERROR(""); \
98  } \
99 }
100 #else
101 # define _TN_PIC32_INTSAVE_CHECK() /* nothing */
102 #endif
103 
104 /**
105  * FFS - find first set bit. Used in `_find_next_task_to_run()` function.
106  * Say, for `0xa8` it should return `3`.
107  *
108  * May be not defined: in this case, naive algorithm will be used.
109  */
110 #define _TN_FFS(x) (32 - __builtin_clz((x) & (0 - (x))))
111 
112 /**
113  * Used by the kernel as a signal that something really bad happened.
114  * Indicates TNeo bugs as well as illegal kernel usage
115  * (e.g. sleeping in the idle task callback)
116  *
117  * Typically, set to assembler instruction that causes debugger to halt.
118  */
119 #define _TN_FATAL_ERRORF(error_msg, ...) \
120  {__asm__ volatile(" sdbbp 0"); __asm__ volatile ("nop");}
121 
122 /**
123  * \def TN_ARCH_STK_ATTR_BEFORE
124  *
125  * Compiler-specific attribute that should be placed **before** declaration of
126  * array used for stack. It is needed because there are often additional
127  * restrictions applied to alignment of stack, so, to meet them, stack arrays
128  * need to be declared with these macros.
129  *
130  * @see TN_ARCH_STK_ATTR_AFTER
131  */
132 
133 /**
134  * \def TN_ARCH_STK_ATTR_AFTER
135  *
136  * Compiler-specific attribute that should be placed **after** declaration of
137  * array used for stack. It is needed because there are often additional
138  * restrictions applied to alignment of stack, so, to meet them, stack arrays
139  * need to be declared with these macros.
140  *
141  * @see TN_ARCH_STK_ATTR_BEFORE
142  */
143 
144 #if defined (__XC32)
145 # define TN_ARCH_STK_ATTR_BEFORE
146 # define TN_ARCH_STK_ATTR_AFTER __attribute__((aligned(0x8)))
147 #else
148 # error "Unknown compiler"
149 #endif
150 
151 /**
152  * Minimum task's stack size, in words, not in bytes; includes a space for
153  * context plus for parameters passed to task's body function.
154  */
155 #define TN_MIN_STACK_SIZE (36 + _TN_STACK_OVERFLOW_SIZE_ADD)
156 
157 /**
158  * Width of `int` type.
159  */
160 #define TN_INT_WIDTH 32
161 
162 /**
163  * Unsigned integer type whose size is equal to the size of CPU register.
164  * Typically it's plain `unsigned int`.
165  */
166 typedef unsigned int TN_UWord;
167 
168 /**
169  * Unsigned integer type that is able to store pointers.
170  * We need it because some platforms don't define `uintptr_t`.
171  * Typically it's `unsigned int`.
172  */
173 typedef unsigned int TN_UIntPtr;
174 
175 /**
176  * Maximum number of priorities available, this value usually matches
177  * `#TN_INT_WIDTH`.
178  *
179  * @see TN_PRIORITIES_CNT
180  */
181 #define TN_PRIORITIES_MAX_CNT TN_INT_WIDTH
182 
183 /**
184  * Value for infinite waiting, usually matches `ULONG_MAX`,
185  * because `#TN_TickCnt` is declared as `unsigned long`.
186  */
187 #define TN_WAIT_INFINITE (TN_TickCnt)0xFFFFFFFF
188 
189 /**
190  * Value for initializing the task's stack
191  */
192 #define TN_FILL_STACK_VAL 0xFEEDFACE
193 
194 
195 
196 
197 /**
198  * Variable name that is used for storing interrupts state
199  * by macros TN_INTSAVE_DATA and friends
200  */
201 #define TN_INTSAVE_VAR tn_save_status_reg
202 
203 /**
204  * Declares variable that is used by macros `TN_INT_DIS_SAVE()` and
205  * `TN_INT_RESTORE()` for storing status register value.
206  *
207  * It is good idea to initially set it to some invalid value,
208  * and if TN_DEBUG is non-zero, check it in TN_INT_RESTORE().
209  * Then, we can catch bugs if someone tries to restore interrupts status
210  * without saving it first.
211  *
212  * @see `TN_INT_DIS_SAVE()`
213  * @see `TN_INT_RESTORE()`
214  */
215 #define TN_INTSAVE_DATA \
216  TN_UWord TN_INTSAVE_VAR = _TN_PIC32_INTSAVE_DATA_INVALID;
217 
218 /**
219  * The same as `#TN_INTSAVE_DATA` but for using in ISR together with
220  * `TN_INT_IDIS_SAVE()`, `TN_INT_IRESTORE()`.
221  *
222  * @see `TN_INT_IDIS_SAVE()`
223  * @see `TN_INT_IRESTORE()`
224  */
225 #define TN_INTSAVE_DATA_INT TN_INTSAVE_DATA
226 
227 /**
228  * \def TN_INT_DIS_SAVE()
229  *
230  * Disable interrupts and return previous value of status register,
231  * atomically. Similar `tn_arch_sr_save_int_dis()`, but implemented
232  * as a macro, so it is potentially faster.
233  *
234  * Uses `#TN_INTSAVE_DATA` as a temporary storage.
235  *
236  * @see `#TN_INTSAVE_DATA`
237  * @see `tn_arch_sr_save_int_dis()`
238  */
239 
240 /**
241  * \def TN_INT_RESTORE()
242  *
243  * Restore previously saved status register.
244  * Similar to `tn_arch_sr_restore()`, but implemented as a macro,
245  * so it is potentially faster.
246  *
247  * Uses `#TN_INTSAVE_DATA` as a temporary storage.
248  *
249  * @see `#TN_INTSAVE_DATA`
250  * @see `tn_arch_sr_save_int_dis()`
251  */
252 
253 #ifdef __mips16
254 # define TN_INT_DIS_SAVE() TN_INTSAVE_VAR = tn_arch_sr_save_int_dis()
255 # define TN_INT_RESTORE() _TN_PIC32_INTSAVE_CHECK(); \
256  tn_arch_sr_restore(TN_INTSAVE_VAR)
257 #else
258 # define TN_INT_DIS_SAVE() __asm__ __volatile__( \
259  "di %0; ehb" \
260  : "=d" (TN_INTSAVE_VAR) \
261  )
262 # define TN_INT_RESTORE() _TN_PIC32_INTSAVE_CHECK(); \
263  __builtin_mtc0(12, 0, TN_INTSAVE_VAR)
264 #endif
265 
266 /**
267  * The same as `TN_INT_DIS_SAVE()` but for using in ISR.
268  *
269  * Uses `#TN_INTSAVE_DATA_INT` as a temporary storage.
270  *
271  * @see `#TN_INTSAVE_DATA_INT`
272  */
273 #define TN_INT_IDIS_SAVE() TN_INT_DIS_SAVE()
274 
275 /**
276  * The same as `TN_INT_RESTORE()` but for using in ISR.
277  *
278  * Uses `#TN_INTSAVE_DATA_INT` as a temporary storage.
279  *
280  * @see `#TN_INTSAVE_DATA_INT`
281  */
282 #define TN_INT_IRESTORE() TN_INT_RESTORE()
283 
284 /**
285  * Returns nonzero if interrupts are disabled, zero otherwise.
286  */
287 #define TN_IS_INT_DISABLED() ((__builtin_mfc0(12, 0) & 1) == 0)
288 
289 /**
290  * Pend context switch from interrupt.
291  */
292 #define _TN_CONTEXT_SWITCH_IPEND_IF_NEEDED() \
293  _tn_context_switch_pend_if_needed()
294 
295 /**
296  * Converts size in bytes to size in `#TN_UWord`.
297  * For 32-bit platforms, we should shift it by 2 bit to the right;
298  * for 16-bit platforms, we should shift it by 1 bit to the right.
299  */
300 #define _TN_SIZE_BYTES_TO_UWORDS(size_in_bytes) ((size_in_bytes) >> 2)
301 
302 #if TN_FORCED_INLINE
303 # define _TN_INLINE inline __attribute__ ((always_inline))
304 #else
305 # define _TN_INLINE inline
306 #endif
307 
308 #define _TN_STATIC_INLINE static _TN_INLINE
309 
310 #define _TN_VOLATILE_WORKAROUND /* nothing */
311 
312 #define _TN_ARCH_STACK_PT_TYPE _TN_ARCH_STACK_PT_TYPE__FULL
313 #define _TN_ARCH_STACK_DIR _TN_ARCH_STACK_DIR__DESC
314 
315 #endif //-- DOXYGEN_SHOULD_SKIP_THIS
316 
317 
318 
319 
320 
321 
322 
323 
324 
325 
326 // ---------------------------------------------------------------------------
327 
328 /**
329  * Interrupt handler wrapper macro for software context saving.
330  *
331  * Usage looks like the following:
332  *
333  * tn_p32_soft_isr(_TIMER_1_VECTOR)
334  * {
335  * INTClearFlag(INT_T1);
336  *
337  * //-- do something useful
338  * }
339  *
340  * Note that you should not use `__ISR(_TIMER_1_VECTOR)` macro for that.
341  *
342  * @param vec interrupt vector number, such as `_TIMER_1_VECTOR`, etc.
343  */
344 #define tn_p32_soft_isr(vec) \
345 __attribute__((__noinline__)) void _func##vec(void); \
346 void __attribute__((naked, nomips16)) \
347  __attribute__((vector(vec))) \
348  _isr##vec(void) \
349 { \
350  asm volatile(".set push"); \
351  asm volatile(".set mips32r2"); \
352  asm volatile(".set nomips16"); \
353  asm volatile(".set noreorder"); \
354  asm volatile(".set noat"); \
355  \
356  asm volatile("rdpgpr $sp, $sp"); \
357  \
358  /* Increase interrupt nesting count */ \
359  asm volatile("lui $k0, %hi(tn_p32_int_nest_count)"); \
360  asm volatile("lw $k1, %lo(tn_p32_int_nest_count)($k0)"); \
361  asm volatile("addiu $k1, $k1, 1"); \
362  asm volatile("sw $k1, %lo(tn_p32_int_nest_count)($k0)"); \
363  asm volatile("ori $k0, $zero, 1"); \
364  asm volatile("bne $k1, $k0, 1f"); \
365  \
366  /* Swap stack pointers if nesting count is one */ \
367  asm volatile("lui $k0, %hi(tn_p32_user_sp)"); \
368  asm volatile("sw $sp, %lo(tn_p32_user_sp)($k0)"); \
369  asm volatile("lui $k0, %hi(tn_p32_int_sp)"); \
370  asm volatile("lw $sp, %lo(tn_p32_int_sp)($k0)"); \
371  \
372  asm volatile("1:"); \
373  /* Save context on stack */ \
374  asm volatile("addiu $sp, $sp, -92"); \
375  asm volatile("mfc0 $k1, $14"); /* c0_epc*/ \
376  asm volatile("mfc0 $k0, $12, 2"); /* c0_srsctl*/ \
377  asm volatile("sw $k1, 84($sp)"); \
378  asm volatile("sw $k0, 80($sp)"); \
379  asm volatile("mfc0 $k1, $12"); /* c0_status*/ \
380  asm volatile("sw $k1, 88($sp)"); \
381  \
382  /* Enable nested interrupts */ \
383  asm volatile("mfc0 $k0, $13"); /* c0_cause*/ \
384  asm volatile("ins $k1, $zero, 1, 15"); \
385  asm volatile("ext $k0, $k0, 10, 6"); \
386  asm volatile("ins $k1, $k0, 10, 6"); \
387  asm volatile("mtc0 $k1, $12"); /* c0_status*/ \
388  \
389  /* Save caller-save registers on stack */ \
390  asm volatile("sw $ra, 76($sp)"); \
391  asm volatile("sw $t9, 72($sp)"); \
392  asm volatile("sw $t8, 68($sp)"); \
393  asm volatile("sw $t7, 64($sp)"); \
394  asm volatile("sw $t6, 60($sp)"); \
395  asm volatile("sw $t5, 56($sp)"); \
396  asm volatile("sw $t4, 52($sp)"); \
397  asm volatile("sw $t3, 48($sp)"); \
398  asm volatile("sw $t2, 44($sp)"); \
399  asm volatile("sw $t1, 40($sp)"); \
400  asm volatile("sw $t0, 36($sp)"); \
401  asm volatile("sw $a3, 32($sp)"); \
402  asm volatile("sw $a2, 28($sp)"); \
403  asm volatile("sw $a1, 24($sp)"); \
404  asm volatile("sw $a0, 20($sp)"); \
405  asm volatile("sw $v1, 16($sp)"); \
406  asm volatile("sw $v0, 12($sp)"); \
407  asm volatile("sw $at, 8($sp)"); \
408  asm volatile("mfhi $v0"); \
409  asm volatile("mflo $v1"); \
410  asm volatile("sw $v0, 4($sp)"); \
411  \
412  /* Call ISR */ \
413  asm volatile("la $t0, _func"#vec); \
414  asm volatile("jalr $t0"); \
415  asm volatile("sw $v1, 0($sp)"); \
416  \
417  /* Restore registers */ \
418  asm volatile("lw $v1, 0($sp)"); \
419  asm volatile("lw $v0, 4($sp)"); \
420  asm volatile("mtlo $v1"); \
421  asm volatile("mthi $v0"); \
422  asm volatile("lw $at, 8($sp)"); \
423  asm volatile("lw $v0, 12($sp)"); \
424  asm volatile("lw $v1, 16($sp)"); \
425  asm volatile("lw $a0, 20($sp)"); \
426  asm volatile("lw $a1, 24($sp)"); \
427  asm volatile("lw $a2, 28($sp)"); \
428  asm volatile("lw $a3, 32($sp)"); \
429  asm volatile("lw $t0, 36($sp)"); \
430  asm volatile("lw $t1, 40($sp)"); \
431  asm volatile("lw $t2, 44($sp)"); \
432  asm volatile("lw $t3, 48($sp)"); \
433  asm volatile("lw $t4, 52($sp)"); \
434  asm volatile("lw $t5, 56($sp)"); \
435  asm volatile("lw $t6, 60($sp)"); \
436  asm volatile("lw $t7, 64($sp)"); \
437  asm volatile("lw $t8, 68($sp)"); \
438  asm volatile("lw $t9, 72($sp)"); \
439  asm volatile("lw $ra, 76($sp)"); \
440  \
441  asm volatile("di"); \
442  asm volatile("ehb"); \
443  \
444  /* Restore context */ \
445  asm volatile("lw $k0, 84($sp)"); \
446  asm volatile("mtc0 $k0, $14"); /* c0_epc */ \
447  asm volatile("lw $k0, 80($sp)"); \
448  asm volatile("mtc0 $k0, $12, 2"); /* c0_srsctl */ \
449  asm volatile("addiu $sp, $sp, 92"); \
450  \
451  /* Decrease interrupt nesting count */ \
452  asm volatile("lui $k0, %hi(tn_p32_int_nest_count)"); \
453  asm volatile("lw $k1, %lo(tn_p32_int_nest_count)($k0)"); \
454  asm volatile("addiu $k1, $k1, -1"); \
455  asm volatile("sw $k1, %lo(tn_p32_int_nest_count)($k0)"); \
456  asm volatile("bne $k1, $zero, 1f"); \
457  asm volatile("lw $k1, -4($sp)"); \
458  \
459  /* Swap stack pointers if nesting count is zero */ \
460  asm volatile("lui $k0, %hi(tn_p32_int_sp)"); \
461  asm volatile("sw $sp, %lo(tn_p32_int_sp)($k0)"); \
462  asm volatile("lui $k0, %hi(tn_p32_user_sp)"); \
463  asm volatile("lw $sp, %lo(tn_p32_user_sp)($k0)"); \
464  \
465  asm volatile("1:"); \
466  asm volatile("wrpgpr $sp, $sp"); \
467  asm volatile("mtc0 $k1, $12"); /* c0_status */ \
468  asm volatile("eret"); \
469  \
470  asm volatile(".set pop"); \
471  \
472 } __attribute((__noinline__)) void _func##vec(void)
473 
474 
475 
476 
477 /**
478  * Interrupt handler wrapper macro for shadow register context saving.
479  *
480  * Usage looks like the following:
481  *
482  * tn_p32_srs_isr(_INT_UART_1_VECTOR)
483  * {
484  * INTClearFlag(INT_U1);
485  *
486  * //-- do something useful
487  * }
488  *
489  * Note that you should not use `__ISR(_INT_UART_1_VECTOR)` macro for that.
490  *
491  * @param vec interrupt vector number, such as `_TIMER_1_VECTOR`, etc.
492  */
493 #define tn_p32_srs_isr(vec) \
494 __attribute__((__noinline__)) void _func##vec(void); \
495 void __attribute__((naked, nomips16)) \
496  __attribute__((vector(vec))) \
497  _isr##vec(void) \
498 { \
499  asm volatile(".set push"); \
500  asm volatile(".set mips32r2"); \
501  asm volatile(".set nomips16"); \
502  asm volatile(".set noreorder"); \
503  asm volatile(".set noat"); \
504  \
505  asm volatile("rdpgpr $sp, $sp"); \
506  \
507  /* Increase interrupt nesting count */ \
508  asm volatile("lui $k0, %hi(tn_p32_int_nest_count)"); \
509  asm volatile("lw $k1, %lo(tn_p32_int_nest_count)($k0)"); \
510  asm volatile("addiu $k1, $k1, 1"); \
511  asm volatile("sw $k1, %lo(tn_p32_int_nest_count)($k0)"); \
512  asm volatile("ori $k0, $zero, 1"); \
513  asm volatile("bne $k1, $k0, 1f"); \
514  \
515  /* Swap stack pointers if nesting count is one */ \
516  asm volatile("lui $k0, %hi(tn_p32_user_sp)"); \
517  asm volatile("sw $sp, %lo(tn_p32_user_sp)($k0)"); \
518  asm volatile("lui $k0, %hi(tn_p32_int_sp)"); \
519  asm volatile("lw $sp, %lo(tn_p32_int_sp)($k0)"); \
520  \
521  asm volatile("1:"); \
522  /* Save context on stack */ \
523  asm volatile("addiu $sp, $sp, -20"); \
524  asm volatile("mfc0 $k1, $14"); /* c0_epc */ \
525  asm volatile("mfc0 $k0, $12, 2"); /* c0_srsctl */ \
526  asm volatile("sw $k1, 12($sp)"); \
527  asm volatile("sw $k0, 8($sp)"); \
528  asm volatile("mfc0 $k1, $12"); /* c0_status */ \
529  asm volatile("sw $k1, 16($sp)"); \
530  \
531  /* Enable nested interrupts */ \
532  asm volatile("mfc0 $k0, $13"); /* c0_cause */ \
533  asm volatile("ins $k1, $zero, 1, 15"); \
534  asm volatile("ext $k0, $k0, 10, 6"); \
535  asm volatile("ins $k1, $k0, 10, 6"); \
536  asm volatile("mtc0 $k1, $12"); /* c0_status */ \
537  \
538  /* Save caller-save registers on stack */ \
539  asm volatile("mfhi $v0"); \
540  asm volatile("mflo $v1"); \
541  asm volatile("sw $v0, 4($sp)"); \
542  \
543  /* Call ISR */ \
544  asm volatile("la $t0, _func"#vec); \
545  asm volatile("jalr $t0"); \
546  asm volatile("sw $v1, 0($sp)"); \
547  \
548  /* Restore registers */ \
549  asm volatile("lw $v1, 0($sp)"); \
550  asm volatile("lw $v0, 4($sp)"); \
551  asm volatile("mtlo $v1"); \
552  asm volatile("mthi $v0"); \
553  \
554  asm volatile("di"); \
555  asm volatile("ehb"); \
556  \
557  /* Restore context */ \
558  asm volatile("lw $k0, 12($sp)"); \
559  asm volatile("mtc0 $k0, $14"); /* c0_epc */ \
560  asm volatile("lw $k0, 8($sp)"); \
561  asm volatile("mtc0 $k0, $12, 2"); /* c0_srsctl */ \
562  asm volatile("addiu $sp, $sp, 20"); \
563  \
564  /* Decrease interrupt nesting count */ \
565  asm volatile("lui $k0, %hi(tn_p32_int_nest_count)"); \
566  asm volatile("lw $k1, %lo(tn_p32_int_nest_count)($k0)"); \
567  asm volatile("addiu $k1, $k1, -1"); \
568  asm volatile("sw $k1, %lo(tn_p32_int_nest_count)($k0)"); \
569  asm volatile("bne $k1, $zero, 1f"); \
570  asm volatile("lw $k1, -4($sp)"); \
571  \
572  /* Swap stack pointers if nesting count is zero */ \
573  asm volatile("lui $k0, %hi(tn_p32_int_sp)"); \
574  asm volatile("sw $sp, %lo(tn_p32_int_sp)($k0)"); \
575  asm volatile("lui $k0, %hi(tn_p32_user_sp)"); \
576  asm volatile("lw $sp, %lo(tn_p32_user_sp)($k0)"); \
577  \
578  asm volatile("1:"); \
579  asm volatile("wrpgpr $sp, $sp"); \
580  asm volatile("mtc0 $k1, $12"); /* c0_status */ \
581  asm volatile("eret"); \
582  \
583  asm volatile(".set pop"); \
584  \
585 } __attribute((__noinline__)) void _func##vec(void)
586 
587 
588 /**
589  * For compatibility with old projects, old name of `tn_p32_soft_isr()` macro
590  * is kept; please don't use it in new code.
591  */
592 #define tn_soft_isr tn_p32_soft_isr
593 
594 /**
595  * For compatibility with old projects, old name of `tn_p32_srs_isr()` macro
596  * is kept; please don't use it in new code.
597  */
598 #define tn_srs_isr tn_p32_srs_isr
599 
600 #ifdef __cplusplus
601 } /* extern "C" */
602 #endif
603 
604 #endif // _TN_ARCH_PIC32_H
605 
void * tn_p32_user_sp
saved task stack pointer.
volatile int tn_p32_int_nest_count
current interrupt nesting count.
void * tn_p32_int_sp
saved ISR stack pointer.
unsigned int TN_UIntPtr
Unsigned integer type that is able to store pointers.
Atomic bit-field access macros for PIC24/dsPIC.
unsigned int TN_UWord
Unsigned integer type whose size is equal to the size of CPU register.