jdk
492 строки · 16.8 Кб
1/*
2* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
3* Copyright (c) 2014, Red Hat Inc. All rights reserved.
4* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5*
6* This code is free software; you can redistribute it and/or modify it
7* under the terms of the GNU General Public License version 2 only, as
8* published by the Free Software Foundation.
9*
10* This code is distributed in the hope that it will be useful, but WITHOUT
11* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13* version 2 for more details (a copy is included in the LICENSE file that
14* accompanied this code).
15*
16* You should have received a copy of the GNU General Public License version
17* 2 along with this work; if not, write to the Free Software Foundation,
18* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19*
20* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21* or visit www.oracle.com if you need additional information or have any
22* questions.
23*
24*/
25
26// no precompiled headers
27#include "asm/macroAssembler.hpp"
28#include "classfile/vmSymbols.hpp"
29#include "code/codeCache.hpp"
30#include "code/vtableStubs.hpp"
31#include "code/nativeInst.hpp"
32#include "interpreter/interpreter.hpp"
33#include "jvm.h"
34#include "memory/allocation.inline.hpp"
35#include "os_linux.hpp"
36#include "os_posix.hpp"
37#include "prims/jniFastGetField.hpp"
38#include "prims/jvm_misc.hpp"
39#include "runtime/arguments.hpp"
40#include "runtime/frame.inline.hpp"
41#include "runtime/interfaceSupport.inline.hpp"
42#include "runtime/java.hpp"
43#include "runtime/javaCalls.hpp"
44#include "runtime/mutexLocker.hpp"
45#include "runtime/osThread.hpp"
46#include "runtime/safepointMechanism.hpp"
47#include "runtime/sharedRuntime.hpp"
48#include "runtime/stubRoutines.hpp"
49#include "runtime/javaThread.hpp"
50#include "runtime/timer.hpp"
51#include "signals_posix.hpp"
52#include "utilities/debug.hpp"
53#include "utilities/events.hpp"
54#include "utilities/vmError.hpp"
55
56// put OS-includes here
57# include <sys/types.h>
58# include <sys/mman.h>
59# include <pthread.h>
60# include <signal.h>
61# include <errno.h>
62# include <dlfcn.h>
63# include <stdlib.h>
64# include <stdio.h>
65# include <unistd.h>
66# include <sys/resource.h>
67# include <pthread.h>
68# include <sys/stat.h>
69# include <sys/time.h>
70# include <sys/utsname.h>
71# include <sys/socket.h>
72# include <sys/wait.h>
73# include <pwd.h>
74# include <poll.h>
75# include <ucontext.h>
76
77#define REG_FP 29
78#define REG_LR 30
79
80NOINLINE address os::current_stack_pointer() {
81return (address)__builtin_frame_address(0);
82}
83
84char* os::non_memory_address_word() {
85// Must never look like an address returned by reserve_memory,
86// even in its subfields (as defined by the CPU immediate fields,
87// if the CPU splits constants across multiple instructions).
88
89return (char*) 0xffffffffffff;
90}
91
92address os::Posix::ucontext_get_pc(const ucontext_t * uc) {
93return (address)uc->uc_mcontext.pc;
94}
95
96void os::Posix::ucontext_set_pc(ucontext_t * uc, address pc) {
97uc->uc_mcontext.pc = (intptr_t)pc;
98}
99
100intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
101return (intptr_t*)uc->uc_mcontext.sp;
102}
103
104intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) {
105return (intptr_t*)uc->uc_mcontext.regs[REG_FP];
106}
107
108address os::fetch_frame_from_context(const void* ucVoid,
109intptr_t** ret_sp, intptr_t** ret_fp) {
110
111address epc;
112const ucontext_t* uc = (const ucontext_t*)ucVoid;
113
114if (uc != nullptr) {
115epc = os::Posix::ucontext_get_pc(uc);
116if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
117if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc);
118} else {
119epc = nullptr;
120if (ret_sp) *ret_sp = (intptr_t *)nullptr;
121if (ret_fp) *ret_fp = (intptr_t *)nullptr;
122}
123
124return epc;
125}
126
127frame os::fetch_frame_from_context(const void* ucVoid) {
128intptr_t* sp;
129intptr_t* fp;
130address epc = fetch_frame_from_context(ucVoid, &sp, &fp);
131if (!is_readable_pointer(epc)) {
132// Try to recover from calling into bad memory
133// Assume new frame has not been set up, the same as
134// compiled frame stack bang
135return fetch_compiled_frame_from_context(ucVoid);
136}
137return frame(sp, fp, epc);
138}
139
140frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
141const ucontext_t* uc = (const ucontext_t*)ucVoid;
142// In compiled code, the stack banging is performed before LR
143// has been saved in the frame. LR is live, and SP and FP
144// belong to the caller.
145intptr_t* fp = os::Linux::ucontext_get_fp(uc);
146intptr_t* sp = os::Linux::ucontext_get_sp(uc);
147address pc = (address)(uc->uc_mcontext.regs[REG_LR]
148- NativeInstruction::instruction_size);
149return frame(sp, fp, pc);
150}
151
152// By default, gcc always saves frame pointer rfp on this stack. This
153// may get turned off by -fomit-frame-pointer.
154// The "Procedure Call Standard for the Arm 64-bit Architecture" doesn't
155// specify a location for the frame record within a stack frame (6.4.6).
156// GCC currently chooses to save it at the top of the frame (lowest address).
157// This means that using fr->sender_sp() to set the caller's frame _unextended_sp,
158// as we do in x86, is wrong. Using fr->link() instead only makes sense for
159// native frames. Setting a correct value for _unextended_sp is important
160// if this value is later used to get that frame's caller. This will happen
161// if we end up calling frame::sender_for_compiled_frame(), which will be the
162// case if the _pc is associated with a CodeBlob that has a _frame_size > 0
163// (nmethod, runtime stub, safepoint stub, etc).
164frame os::get_sender_for_C_frame(frame* fr) {
165address pc = fr->sender_pc();
166CodeBlob* cb = CodeCache::find_blob(pc);
167bool use_codeblob = cb != nullptr && cb->frame_size() > 0;
168assert(!use_codeblob || !Interpreter::contains(pc), "should not be an interpreter frame");
169intptr_t* sender_sp = use_codeblob ? (fr->link() + frame::metadata_words - cb->frame_size()) : fr->link();
170return frame(sender_sp, sender_sp, fr->link(), pc, cb, true /* allow_cb_null */);
171}
172
173NOINLINE frame os::current_frame() {
174intptr_t *fp = *(intptr_t **)__builtin_frame_address(0);
175frame myframe((intptr_t*)os::current_stack_pointer(),
176(intptr_t*)fp,
177CAST_FROM_FN_PTR(address, os::current_frame));
178if (os::is_first_C_frame(&myframe)) {
179// stack is not walkable
180return frame();
181} else {
182return os::get_sender_for_C_frame(&myframe);
183}
184}
185
186bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
187ucontext_t* uc, JavaThread* thread) {
188
189/*
190NOTE: does not seem to work on linux.
191if (info == nullptr || info->si_code <= 0 || info->si_code == SI_NOINFO) {
192// can't decode this kind of signal
193info = nullptr;
194} else {
195assert(sig == info->si_signo, "bad siginfo");
196}
197*/
198// decide if this trap can be handled by a stub
199address stub = nullptr;
200
201address pc = nullptr;
202
203//%note os_trap_1
204if (info != nullptr && uc != nullptr && thread != nullptr) {
205pc = (address) os::Posix::ucontext_get_pc(uc);
206
207address addr = (address) info->si_addr;
208
209// Make sure the high order byte is sign extended, as it may be masked away by the hardware.
210if ((uintptr_t(addr) & (uintptr_t(1) << 55)) != 0) {
211addr = address(uintptr_t(addr) | (uintptr_t(0xFF) << 56));
212}
213
214// Handle ALL stack overflow variations here
215if (sig == SIGSEGV) {
216// check if fault address is within thread stack
217if (thread->is_in_full_stack(addr)) {
218if (os::Posix::handle_stack_overflow(thread, addr, pc, uc, &stub)) {
219return true; // continue
220}
221}
222}
223
224if (thread->thread_state() == _thread_in_Java) {
225// Java thread running in Java code => find exception handler if any
226// a fault inside compiled code, the interpreter, or a stub
227
228// Handle signal from NativeJump::patch_verified_entry().
229if ((sig == SIGILL || sig == SIGTRAP)
230&& nativeInstruction_at(pc)->is_sigill_not_entrant()) {
231if (TraceTraps) {
232tty->print_cr("trap: not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
233}
234stub = SharedRuntime::get_handle_wrong_method_stub();
235} else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) {
236stub = SharedRuntime::get_poll_stub(pc);
237} else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
238// BugId 4454115: A read from a MappedByteBuffer can fault
239// here if the underlying file has been truncated.
240// Do not crash the VM in such a case.
241CodeBlob* cb = CodeCache::find_blob(pc);
242nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
243bool is_unsafe_memory_access = (thread->doing_unsafe_access() && UnsafeMemoryAccess::contains_pc(pc));
244if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_memory_access) {
245address next_pc = pc + NativeCall::instruction_size;
246if (is_unsafe_memory_access) {
247next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
248}
249stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
250}
251} else if (sig == SIGILL && nativeInstruction_at(pc)->is_stop()) {
252// Pull a pointer to the error message out of the instruction
253// stream.
254const uint64_t *detail_msg_ptr
255= (uint64_t*)(pc + NativeInstruction::instruction_size);
256const char *detail_msg = (const char *)*detail_msg_ptr;
257const char *msg = "stop";
258if (TraceTraps) {
259tty->print_cr("trap: %s: (SIGILL)", msg);
260}
261
262// End life with a fatal error, message and detail message and the context.
263// Note: no need to do any post-processing here (e.g. signal chaining)
264VMError::report_and_die(thread, uc, nullptr, 0, msg, "%s", detail_msg);
265
266ShouldNotReachHere();
267
268}
269else
270
271if (sig == SIGFPE &&
272(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
273stub =
274SharedRuntime::
275continuation_for_implicit_exception(thread,
276pc,
277SharedRuntime::
278IMPLICIT_DIVIDE_BY_ZERO);
279} else if (sig == SIGSEGV &&
280MacroAssembler::uses_implicit_null_check((void*)addr)) {
281// Determination of interpreter/vtable stub/compiled code null exception
282stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
283}
284} else if ((thread->thread_state() == _thread_in_vm ||
285thread->thread_state() == _thread_in_native) &&
286sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
287thread->doing_unsafe_access()) {
288address next_pc = pc + NativeCall::instruction_size;
289if (UnsafeMemoryAccess::contains_pc(pc)) {
290next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
291}
292stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
293}
294
295// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
296// and the heap gets shrunk before the field access.
297if ((sig == SIGSEGV) || (sig == SIGBUS)) {
298address addr = JNI_FastGetField::find_slowcase_pc(pc);
299if (addr != (address)-1) {
300stub = addr;
301}
302}
303}
304
305if (stub != nullptr) {
306// save all thread context in case we need to restore it
307if (thread != nullptr) thread->set_saved_exception_pc(pc);
308
309os::Posix::ucontext_set_pc(uc, stub);
310return true;
311}
312
313return false; // Mute compiler
314}
315
316void os::Linux::init_thread_fpu_state(void) {
317}
318
319int os::Linux::get_fpu_control_word(void) {
320return 0;
321}
322
323void os::Linux::set_fpu_control_word(int fpu_control) {
324}
325
326////////////////////////////////////////////////////////////////////////////////
327// thread stack
328
329// Minimum usable stack sizes required to get to user code. Space for
330// HotSpot guard pages is added later.
331size_t os::_compiler_thread_min_stack_allowed = 72 * K;
332size_t os::_java_thread_min_stack_allowed = 72 * K;
333size_t os::_vm_internal_thread_min_stack_allowed = 72 * K;
334
335// return default stack size for thr_type
336size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
337// default stack size (compiler thread needs larger stack)
338size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
339return s;
340}
341
342/////////////////////////////////////////////////////////////////////////////
343// helper functions for fatal error handler
344
345void os::print_context(outputStream *st, const void *context) {
346if (context == nullptr) return;
347
348const ucontext_t *uc = (const ucontext_t*)context;
349
350st->print_cr("Registers:");
351for (int r = 0; r < 31; r++) {
352st->print_cr( "R%d=" INTPTR_FORMAT, r, (uintptr_t)uc->uc_mcontext.regs[r]);
353}
354st->cr();
355}
356
357void os::print_tos_pc(outputStream *st, const void *context) {
358if (context == nullptr) return;
359
360const ucontext_t* uc = (const ucontext_t*)context;
361
362address sp = (address)os::Linux::ucontext_get_sp(uc);
363print_tos(st, sp);
364st->cr();
365
366// Note: it may be unsafe to inspect memory near pc. For example, pc may
367// point to garbage if entry point in an nmethod is corrupted. Leave
368// this at the end, and hope for the best.
369address pc = os::fetch_frame_from_context(uc).pc();
370print_instructions(st, pc);
371st->cr();
372}
373
374void os::print_register_info(outputStream *st, const void *context, int& continuation) {
375const int register_count = 32 /* r0-r31 */;
376int n = continuation;
377assert(n >= 0 && n <= register_count, "Invalid continuation value");
378if (context == nullptr || n == register_count) {
379return;
380}
381
382const ucontext_t *uc = (const ucontext_t*)context;
383while (n < register_count) {
384// Update continuation with next index before printing location
385continuation = n + 1;
386st->print("R%-2d=", n);
387print_location(st, uc->uc_mcontext.regs[n]);
388++n;
389}
390}
391
392void os::setup_fpu() {
393}
394
395#ifndef PRODUCT
396void os::verify_stack_alignment() {
397assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
398}
399#endif
400
401int os::extra_bang_size_in_bytes() {
402// AArch64 does not require the additional stack bang.
403return 0;
404}
405
406static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
407*(jlong *) dst = *(const jlong *) src;
408}
409
410extern "C" {
411int SpinPause() {
412using spin_wait_func_ptr_t = void (*)();
413spin_wait_func_ptr_t func = CAST_TO_FN_PTR(spin_wait_func_ptr_t, StubRoutines::aarch64::spin_wait());
414assert(func != nullptr, "StubRoutines::aarch64::spin_wait must not be null.");
415(*func)();
416// If StubRoutines::aarch64::spin_wait consists of only a RET,
417// SpinPause can be considered as implemented. There will be a sequence
418// of instructions for:
419// - call of SpinPause
420// - load of StubRoutines::aarch64::spin_wait stub pointer
421// - indirect call of the stub
422// - return from the stub
423// - return from SpinPause
424// So '1' always is returned.
425return 1;
426}
427
428void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
429if (from > to) {
430const jshort *end = from + count;
431while (from < end)
432*(to++) = *(from++);
433}
434else if (from < to) {
435const jshort *end = from;
436from += count - 1;
437to += count - 1;
438while (from >= end)
439*(to--) = *(from--);
440}
441}
442void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
443if (from > to) {
444const jint *end = from + count;
445while (from < end)
446*(to++) = *(from++);
447}
448else if (from < to) {
449const jint *end = from;
450from += count - 1;
451to += count - 1;
452while (from >= end)
453*(to--) = *(from--);
454}
455}
456
457void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
458if (from > to) {
459const jlong *end = from + count;
460while (from < end)
461atomic_copy64(from++, to++);
462}
463else if (from < to) {
464const jlong *end = from;
465from += count - 1;
466to += count - 1;
467while (from >= end)
468atomic_copy64(from--, to--);
469}
470}
471
472void _Copy_arrayof_conjoint_bytes(const HeapWord* from,
473HeapWord* to,
474size_t count) {
475memmove(to, from, count);
476}
477void _Copy_arrayof_conjoint_jshorts(const HeapWord* from,
478HeapWord* to,
479size_t count) {
480memmove(to, from, count * 2);
481}
482void _Copy_arrayof_conjoint_jints(const HeapWord* from,
483HeapWord* to,
484size_t count) {
485memmove(to, from, count * 4);
486}
487void _Copy_arrayof_conjoint_jlongs(const HeapWord* from,
488HeapWord* to,
489size_t count) {
490memmove(to, from, count * 8);
491}
492};
493