8 * The legacy x87 FPU state format, as saved by FSAVE and
9 * restored by the FRSTOR instructions:
12 u32 cwd; /* FPU Control Word */
13 u32 swd; /* FPU Status Word */
14 u32 twd; /* FPU Tag Word */
15 u32 fip; /* FPU IP Offset */
16 u32 fcs; /* FPU IP Selector */
17 u32 foo; /* FPU Operand Pointer Offset */
18 u32 fos; /* FPU Operand Pointer Selector */
20 /* 8*10 bytes for each FP-reg = 80 bytes: */
23 /* Software status information [not touched by FSAVE]: */
28 * The legacy fx SSE/MMX FPU state format, as saved by FXSAVE and
29 * restored by the FXRSTOR instructions. It's similar to the FSAVE
30 * format, but differs in some areas, plus has extensions at
31 * the end for the XMM registers.
34 u16 cwd; /* Control Word */
35 u16 swd; /* Status Word */
36 u16 twd; /* Tag Word */
37 u16 fop; /* Last Instruction Opcode */
40 u64 rip; /* Instruction Pointer */
41 u64 rdp; /* Data Pointer */
44 u32 fip; /* FPU IP Offset */
45 u32 fcs; /* FPU IP Selector */
46 u32 foo; /* FPU Operand Offset */
47 u32 fos; /* FPU Operand Selector */
50 u32 mxcsr; /* MXCSR Register State */
51 u32 mxcsr_mask; /* MXCSR Mask */
53 /* 8*16 bytes for each FP-reg = 128 bytes: */
56 /* 16*16 bytes for each XMM-reg = 256 bytes: */
66 } __attribute__((aligned(16)));
68 /* Default value for fxregs_state.mxcsr: */
69 #define MXCSR_DEFAULT 0x1f80
72 * Software based FPU emulation state. This is arbitrary really,
73 * it matches the x87 format to make it easier to understand:
83 /* 8*10 bytes for each FP-reg = 80 bytes: */
91 struct math_emu_info *info;
96 * List of XSAVE features Linux knows about:
102 * Values above here are "legacy states".
103 * Those below are "extended states".
115 #define XFEATURE_MASK_FP (1 << XFEATURE_FP)
116 #define XFEATURE_MASK_SSE (1 << XFEATURE_SSE)
117 #define XFEATURE_MASK_YMM (1 << XFEATURE_YMM)
118 #define XFEATURE_MASK_BNDREGS (1 << XFEATURE_BNDREGS)
119 #define XFEATURE_MASK_BNDCSR (1 << XFEATURE_BNDCSR)
120 #define XFEATURE_MASK_OPMASK (1 << XFEATURE_OPMASK)
121 #define XFEATURE_MASK_ZMM_Hi256 (1 << XFEATURE_ZMM_Hi256)
122 #define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM)
124 #define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
125 #define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \
126 | XFEATURE_MASK_ZMM_Hi256 \
127 | XFEATURE_MASK_Hi16_ZMM)
129 #define FIRST_EXTENDED_XFEATURE XFEATURE_YMM
132 * There are 16x 256-bit AVX registers named YMM0-YMM15.
133 * The low 128 bits are aliased to the 16 SSE registers (XMM0-XMM15)
134 * and are stored in 'struct fxregs_state::xmm_space[]'.
136 * The high 128 bits are stored here:
137 * 16x 128 bits == 256 bytes.
143 /* Intel MPX support: */
155 struct bndreg bndreg[4];
156 struct bndcsr bndcsr;
159 struct xstate_header {
163 } __attribute__((packed));
166 * This is our most modern FPU state format, as saved by the XSAVE
167 * and restored by the XRSTOR instructions.
169 * It consists of a legacy fxregs portion, an xstate header and
170 * subsequent areas as defined by the xstate header. Not all CPUs
171 * support all the extensions, so the size of the extended area
172 * can vary quite a bit between CPUs.
175 struct fxregs_state i387;
176 struct xstate_header header;
177 u8 extended_state_area[0];
178 } __attribute__ ((packed, aligned (64)));
181 * This is a union of all the possible FPU state formats
182 * put together, so that we can pick the right one runtime.
184 * The size of the structure is determined by the largest
185 * member - which is the xsave area. The padding is there
186 * to ensure that statically-allocated task_structs (just
187 * the init_task today) have enough space.
190 struct fregs_state fsave;
191 struct fxregs_state fxsave;
192 struct swregs_state soft;
193 struct xregs_state xsave;
194 u8 __padding[PAGE_SIZE];
198 * Highest level per task FPU state data structure that
199 * contains the FPU register state plus various FPU
206 * Records the last CPU on which this context was loaded into
207 * FPU registers. (In the lazy-restore case we might be
208 * able to reuse FPU registers across multiple context switches
209 * this way, if no intermediate task used the FPU.)
211 * A value of -1 is used to indicate that the FPU state in context
212 * memory is newer than the FPU state in registers, and that the
213 * FPU state should be reloaded next time the task is run.
215 unsigned int last_cpu;
220 * This flag indicates whether this context is active: if the task
221 * is not running then we can restore from this context, if the task
222 * is running then we should save into this context.
224 unsigned char fpstate_active;
229 * This flag determines whether a given context is actively
230 * loaded into the FPU's registers and that those registers
231 * represent the task's current FPU state.
233 * Note the interaction with fpstate_active:
235 * # task does not use the FPU:
236 * fpstate_active == 0
238 * # task uses the FPU and regs are active:
239 * fpstate_active == 1 && fpregs_active == 1
241 * # the regs are inactive but still match fpstate:
242 * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
244 * The third state is what we use for the lazy restore optimization
245 * on lazy-switching CPUs.
247 unsigned char fpregs_active;
252 * This counter contains the number of consecutive context switches
253 * during which the FPU stays used. If this is over a threshold, the
254 * lazy FPU restore logic becomes eager, to save the trap overhead.
255 * This is an unsigned char so that after 256 iterations the counter
256 * wraps and the context switch behavior turns lazy again; this is to
257 * deal with bursty apps that only use the FPU for a short time:
259 unsigned char counter;
263 * In-memory copy of all FPU registers that we save/restore
264 * over context switches. If the task is using the FPU then
265 * the registers in the FPU are more recent than this state
266 * copy. If the task context-switches away then they get
267 * saved here and represent the FPU state.
269 * After context switches there may be a (short) time period
270 * during which the in-FPU hardware registers are unchanged
271 * and still perfectly match this state, if the tasks
272 * scheduled afterwards are not using the FPU.
274 * This is the 'lazy restore' window of optimization, which
275 * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
277 * We detect whether a subsequent task uses the FPU via setting
278 * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
280 * During this window, if the task gets scheduled again, we
281 * might be able to skip having to do a restore from this
282 * memory buffer to the hardware registers - at the cost of
283 * incurring the overhead of #NM fault traps.
285 * Note that on modern CPUs that support the XSAVEOPT (or other
286 * optimized XSAVE instructions), we don't use #NM traps anymore,
287 * as the hardware can track whether FPU registers need saving
288 * or not. On such CPUs we activate the non-lazy ('eagerfpu')
289 * logic, which unconditionally saves/restores all FPU state
290 * across context switches. (if FPU state exists.)
292 union fpregs_state state;
294 * WARNING: 'state' is dynamically-sized. Do not put
295 * anything after it here.
299 #endif /* _ASM_X86_FPU_H */