qemu
454 строки · 13.6 Кб
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3* LoongArch emulation of Linux signals
4*
5* Copyright (c) 2021 Loongson Technology Corporation Limited
6*/
7
8#include "qemu/osdep.h"
9#include "qemu.h"
10#include "user-internals.h"
11#include "signal-common.h"
12#include "linux-user/trace.h"
13#include "target/loongarch/internals.h"
14#include "target/loongarch/vec.h"
15#include "vdso-asmoffset.h"
16
17/* FP context was used */
18#define SC_USED_FP (1 << 0)
19
20struct target_sigcontext {
21abi_ulong sc_pc;
22abi_ulong sc_regs[32];
23abi_uint sc_flags;
24abi_ulong sc_extcontext[0] QEMU_ALIGNED(16);
25};
26
27QEMU_BUILD_BUG_ON(sizeof(struct target_sigcontext) != sizeof_sigcontext);
28QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_pc)
29!= offsetof_sigcontext_pc);
30QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_regs)
31!= offsetof_sigcontext_gr);
32
33#define FPU_CTX_MAGIC 0x46505501
34#define FPU_CTX_ALIGN 8
35struct target_fpu_context {
36abi_ulong regs[32];
37abi_ulong fcc;
38abi_uint fcsr;
39} QEMU_ALIGNED(FPU_CTX_ALIGN);
40
41QEMU_BUILD_BUG_ON(offsetof(struct target_fpu_context, regs)
42!= offsetof_fpucontext_fr);
43
44#define LSX_CTX_MAGIC 0x53580001
45#define LSX_CTX_ALIGN 16
46struct target_lsx_context {
47abi_ulong regs[2 * 32];
48abi_ulong fcc;
49abi_uint fcsr;
50} QEMU_ALIGNED(LSX_CTX_ALIGN);
51
52#define LASX_CTX_MAGIC 0x41535801
53#define LASX_CTX_ALIGN 32
54struct target_lasx_context {
55abi_ulong regs[4 * 32];
56abi_ulong fcc;
57abi_uint fcsr;
58} QEMU_ALIGNED(LASX_CTX_ALIGN);
59
60#define CONTEXT_INFO_ALIGN 16
61struct target_sctx_info {
62abi_uint magic;
63abi_uint size;
64abi_ulong padding;
65} QEMU_ALIGNED(CONTEXT_INFO_ALIGN);
66
67QEMU_BUILD_BUG_ON(sizeof(struct target_sctx_info) != sizeof_sctx_info);
68
69struct target_ucontext {
70abi_ulong tuc_flags;
71abi_ptr tuc_link;
72target_stack_t tuc_stack;
73target_sigset_t tuc_sigmask;
74uint8_t __unused[1024 / 8 - sizeof(target_sigset_t)];
75struct target_sigcontext tuc_mcontext;
76};
77
78struct target_rt_sigframe {
79struct target_siginfo rs_info;
80struct target_ucontext rs_uc;
81};
82
83QEMU_BUILD_BUG_ON(sizeof(struct target_rt_sigframe)
84!= sizeof_rt_sigframe);
85QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, rs_uc.tuc_mcontext)
86!= offsetof_sigcontext);
87
88/*
89* These two structures are not present in guest memory, are private
90* to the signal implementation, but are largely copied from the
91* kernel's signal implementation.
92*/
93struct ctx_layout {
94void *haddr;
95abi_ptr gaddr;
96unsigned int size;
97};
98
99struct extctx_layout {
100unsigned long size;
101unsigned int flags;
102struct ctx_layout fpu;
103struct ctx_layout lsx;
104struct ctx_layout lasx;
105struct ctx_layout end;
106};
107
108static abi_ptr extframe_alloc(struct extctx_layout *extctx,
109struct ctx_layout *sctx, unsigned size,
110unsigned align, abi_ptr orig_sp)
111{
112abi_ptr sp = orig_sp;
113
114sp -= sizeof(struct target_sctx_info) + size;
115align = MAX(align, CONTEXT_INFO_ALIGN);
116sp = ROUND_DOWN(sp, align);
117sctx->gaddr = sp;
118
119size = orig_sp - sp;
120sctx->size = size;
121extctx->size += size;
122
123return sp;
124}
125
126static abi_ptr setup_extcontext(CPULoongArchState *env,
127struct extctx_layout *extctx, abi_ptr sp)
128{
129memset(extctx, 0, sizeof(struct extctx_layout));
130
131/* Grow down, alloc "end" context info first. */
132sp = extframe_alloc(extctx, &extctx->end, 0, CONTEXT_INFO_ALIGN, sp);
133
134/* For qemu, there is no lazy fp context switch, so fp always present. */
135extctx->flags = SC_USED_FP;
136
137if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE)) {
138sp = extframe_alloc(extctx, &extctx->lasx,
139sizeof(struct target_lasx_context), LASX_CTX_ALIGN, sp);
140} else if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE)) {
141sp = extframe_alloc(extctx, &extctx->lsx,
142sizeof(struct target_lsx_context), LSX_CTX_ALIGN, sp);
143} else {
144sp = extframe_alloc(extctx, &extctx->fpu,
145sizeof(struct target_fpu_context), FPU_CTX_ALIGN, sp);
146}
147
148return sp;
149}
150
151static void setup_sigframe(CPULoongArchState *env,
152struct target_sigcontext *sc,
153struct extctx_layout *extctx)
154{
155struct target_sctx_info *info;
156int i;
157
158__put_user(extctx->flags, &sc->sc_flags);
159__put_user(env->pc, &sc->sc_pc);
160__put_user(0, &sc->sc_regs[0]);
161for (i = 1; i < 32; ++i) {
162__put_user(env->gpr[i], &sc->sc_regs[i]);
163}
164
165/*
166* Set extension context
167*/
168
169if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE)) {
170struct target_lasx_context *lasx_ctx;
171info = extctx->lasx.haddr;
172
173__put_user(LASX_CTX_MAGIC, &info->magic);
174__put_user(extctx->lasx.size, &info->size);
175
176lasx_ctx = (struct target_lasx_context *)(info + 1);
177
178for (i = 0; i < 32; ++i) {
179__put_user(env->fpr[i].vreg.UD(0), &lasx_ctx->regs[4 * i]);
180__put_user(env->fpr[i].vreg.UD(1), &lasx_ctx->regs[4 * i + 1]);
181__put_user(env->fpr[i].vreg.UD(2), &lasx_ctx->regs[4 * i + 2]);
182__put_user(env->fpr[i].vreg.UD(3), &lasx_ctx->regs[4 * i + 3]);
183}
184__put_user(read_fcc(env), &lasx_ctx->fcc);
185__put_user(env->fcsr0, &lasx_ctx->fcsr);
186} else if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE)) {
187struct target_lsx_context *lsx_ctx;
188info = extctx->lsx.haddr;
189
190__put_user(LSX_CTX_MAGIC, &info->magic);
191__put_user(extctx->lsx.size, &info->size);
192
193lsx_ctx = (struct target_lsx_context *)(info + 1);
194
195for (i = 0; i < 32; ++i) {
196__put_user(env->fpr[i].vreg.UD(0), &lsx_ctx->regs[2 * i]);
197__put_user(env->fpr[i].vreg.UD(1), &lsx_ctx->regs[2 * i + 1]);
198}
199__put_user(read_fcc(env), &lsx_ctx->fcc);
200__put_user(env->fcsr0, &lsx_ctx->fcsr);
201} else {
202struct target_fpu_context *fpu_ctx;
203info = extctx->fpu.haddr;
204
205__put_user(FPU_CTX_MAGIC, &info->magic);
206__put_user(extctx->fpu.size, &info->size);
207
208fpu_ctx = (struct target_fpu_context *)(info + 1);
209
210for (i = 0; i < 32; ++i) {
211__put_user(env->fpr[i].vreg.UD(0), &fpu_ctx->regs[i]);
212}
213__put_user(read_fcc(env), &fpu_ctx->fcc);
214__put_user(env->fcsr0, &fpu_ctx->fcsr);
215}
216
217/*
218* Set end context
219*/
220info = extctx->end.haddr;
221__put_user(0, &info->magic);
222__put_user(0, &info->size);
223}
224
225static bool parse_extcontext(struct extctx_layout *extctx, abi_ptr frame)
226{
227memset(extctx, 0, sizeof(*extctx));
228
229while (1) {
230abi_uint magic, size;
231
232if (get_user_u32(magic, frame) || get_user_u32(size, frame + 4)) {
233return false;
234}
235
236switch (magic) {
237case 0: /* END */
238extctx->end.gaddr = frame;
239extctx->end.size = size;
240extctx->size += size;
241return true;
242
243case FPU_CTX_MAGIC:
244if (size < (sizeof(struct target_sctx_info) +
245sizeof(struct target_fpu_context))) {
246return false;
247}
248extctx->fpu.gaddr = frame;
249extctx->fpu.size = size;
250extctx->size += size;
251break;
252case LSX_CTX_MAGIC:
253if (size < (sizeof(struct target_sctx_info) +
254sizeof(struct target_lsx_context))) {
255return false;
256}
257extctx->lsx.gaddr = frame;
258extctx->lsx.size = size;
259extctx->size += size;
260break;
261case LASX_CTX_MAGIC:
262if (size < (sizeof(struct target_sctx_info) +
263sizeof(struct target_lasx_context))) {
264return false;
265}
266extctx->lasx.gaddr = frame;
267extctx->lasx.size = size;
268extctx->size += size;
269break;
270default:
271return false;
272}
273
274frame += size;
275}
276}
277
278static void restore_sigframe(CPULoongArchState *env,
279struct target_sigcontext *sc,
280struct extctx_layout *extctx)
281{
282int i;
283abi_ulong fcc;
284
285__get_user(env->pc, &sc->sc_pc);
286for (i = 1; i < 32; ++i) {
287__get_user(env->gpr[i], &sc->sc_regs[i]);
288}
289
290if (extctx->lasx.haddr) {
291struct target_lasx_context *lasx_ctx =
292extctx->lasx.haddr + sizeof(struct target_sctx_info);
293
294for (i = 0; i < 32; ++i) {
295__get_user(env->fpr[i].vreg.UD(0), &lasx_ctx->regs[4 * i]);
296__get_user(env->fpr[i].vreg.UD(1), &lasx_ctx->regs[4 * i + 1]);
297__get_user(env->fpr[i].vreg.UD(2), &lasx_ctx->regs[4 * i + 2]);
298__get_user(env->fpr[i].vreg.UD(3), &lasx_ctx->regs[4 * i + 3]);
299}
300__get_user(fcc, &lasx_ctx->fcc);
301write_fcc(env, fcc);
302__get_user(env->fcsr0, &lasx_ctx->fcsr);
303restore_fp_status(env);
304} else if (extctx->lsx.haddr) {
305struct target_lsx_context *lsx_ctx =
306extctx->lsx.haddr + sizeof(struct target_sctx_info);
307
308for (i = 0; i < 32; ++i) {
309__get_user(env->fpr[i].vreg.UD(0), &lsx_ctx->regs[2 * i]);
310__get_user(env->fpr[i].vreg.UD(1), &lsx_ctx->regs[2 * i + 1]);
311}
312__get_user(fcc, &lsx_ctx->fcc);
313write_fcc(env, fcc);
314__get_user(env->fcsr0, &lsx_ctx->fcsr);
315restore_fp_status(env);
316} else if (extctx->fpu.haddr) {
317struct target_fpu_context *fpu_ctx =
318extctx->fpu.haddr + sizeof(struct target_sctx_info);
319
320for (i = 0; i < 32; ++i) {
321__get_user(env->fpr[i].vreg.UD(0), &fpu_ctx->regs[i]);
322}
323__get_user(fcc, &fpu_ctx->fcc);
324write_fcc(env, fcc);
325__get_user(env->fcsr0, &fpu_ctx->fcsr);
326restore_fp_status(env);
327}
328}
329
330/*
331* Determine which stack to use.
332*/
333static abi_ptr get_sigframe(struct target_sigaction *ka,
334CPULoongArchState *env,
335struct extctx_layout *extctx)
336{
337abi_ulong sp;
338
339sp = target_sigsp(get_sp_from_cpustate(env), ka);
340sp = ROUND_DOWN(sp, 16);
341sp = setup_extcontext(env, extctx, sp);
342sp -= sizeof(struct target_rt_sigframe);
343
344assert(QEMU_IS_ALIGNED(sp, 16));
345
346return sp;
347}
348
349void setup_rt_frame(int sig, struct target_sigaction *ka,
350target_siginfo_t *info,
351target_sigset_t *set, CPULoongArchState *env)
352{
353struct target_rt_sigframe *frame;
354struct extctx_layout extctx;
355abi_ptr frame_addr;
356int i;
357
358frame_addr = get_sigframe(ka, env, &extctx);
359trace_user_setup_rt_frame(env, frame_addr);
360
361frame = lock_user(VERIFY_WRITE, frame_addr,
362sizeof(*frame) + extctx.size, 0);
363if (!frame) {
364force_sigsegv(sig);
365return;
366}
367
368if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE)) {
369extctx.lasx.haddr = (void *)frame + (extctx.lasx.gaddr - frame_addr);
370extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr);
371} else if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE)) {
372extctx.lsx.haddr = (void *)frame + (extctx.lsx.gaddr - frame_addr);
373extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr);
374} else {
375extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr);
376extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr);
377}
378
379frame->rs_info = *info;
380
381__put_user(0, &frame->rs_uc.tuc_flags);
382__put_user(0, &frame->rs_uc.tuc_link);
383target_save_altstack(&frame->rs_uc.tuc_stack, env);
384
385setup_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx);
386
387for (i = 0; i < TARGET_NSIG_WORDS; i++) {
388__put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
389}
390
391env->gpr[4] = sig;
392env->gpr[5] = frame_addr + offsetof(struct target_rt_sigframe, rs_info);
393env->gpr[6] = frame_addr + offsetof(struct target_rt_sigframe, rs_uc);
394env->gpr[3] = frame_addr;
395env->gpr[1] = default_rt_sigreturn;
396
397env->pc = ka->_sa_handler;
398unlock_user(frame, frame_addr, sizeof(*frame) + extctx.size);
399}
400
401long do_rt_sigreturn(CPULoongArchState *env)
402{
403struct target_rt_sigframe *frame;
404struct extctx_layout extctx;
405abi_ulong frame_addr;
406sigset_t blocked;
407
408frame_addr = env->gpr[3];
409trace_user_do_rt_sigreturn(env, frame_addr);
410
411if (!parse_extcontext(&extctx, frame_addr + sizeof(*frame))) {
412goto badframe;
413}
414
415frame = lock_user(VERIFY_READ, frame_addr,
416sizeof(*frame) + extctx.size, 1);
417if (!frame) {
418goto badframe;
419}
420
421if (extctx.lasx.gaddr) {
422extctx.lasx.haddr = (void *)frame + (extctx.lasx.gaddr - frame_addr);
423} else if (extctx.lsx.gaddr) {
424extctx.lsx.haddr = (void *)frame + (extctx.lsx.gaddr - frame_addr);
425} else if (extctx.fpu.gaddr) {
426extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr);
427}
428
429target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
430set_sigmask(&blocked);
431
432restore_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx);
433
434target_restore_altstack(&frame->rs_uc.tuc_stack, env);
435
436unlock_user(frame, frame_addr, 0);
437return -QEMU_ESIGRETURN;
438
439badframe:
440force_sig(TARGET_SIGSEGV);
441return -QEMU_ESIGRETURN;
442}
443
444void setup_sigtramp(abi_ulong sigtramp_page)
445{
446uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0);
447assert(tramp != NULL);
448
449__put_user(0x03822c0b, tramp + 0); /* ori a7, zero, 0x8b */
450__put_user(0x002b0000, tramp + 1); /* syscall 0 */
451
452default_rt_sigreturn = sigtramp_page;
453unlock_user(tramp, sigtramp_page, 8);
454}
455