1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
|
.section .shared
.global _isr_stubs
_isr_stubs:
.set i, 0
.rept 256
.set _stub_start, .
cli
.if i != 8 && i != 10 && i != 11 && i != 12 && i != 13 && i != 14 && i != 17 && i != 21 && i != 29 && i != 30
/* no error code was pushed - push anything, just to line up the stack */
push %rbx
.endif
call _isr_stage2
.if . - _stub_start > 8
.error "isr stubs over maximum size"
.abort
.endif
.align 8, 0xCC
.set i, i + 1
.endr
_isr_stage2:
/* pushal order, without %esp. 15 in total */
push %rax
push %rcx
push %rdx
push %rbx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
// TODO FXSAVE might be required on interrupts too?
// on interrupts - no
// on syscalls - yes
// have fun figuring that one out
// basically if you're context switching, FXSAVE
/* first argument: vector nr. computed from the return address at offset
* 15*8 = 120 */
mov 120(%rsp), %rdi
sub $_isr_stubs, %rdi
shr $3, %rdi
/* second argument: IRET stack frame */
lea 136(%rsp), %rsi
// load kernel paging
mov %cr3, %rbx
push %rbx
mov $pml4_identity, %rbx
mov %rbx, %cr3
mov %rsp, %rbp
mov $_isr_big_stack, %rsp
call isr_stage3
mov %rbp, %rsp
pop %rax // restore old cr3
mov %rax, %cr3
// restore registers
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rbx
pop %rdx
pop %rcx
pop %rax
add $16, %rsp /* skip return address from call and the error code */
iretq
.align 8
// TODO stack overflow check
.skip 256 // seems to be enough
.global _isr_mini_stack
_isr_mini_stack:
|