jdk

Форк
0
/
os_linux_riscv.cpp 
487 строк · 16.2 Кб
1
/*
2
 * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
3
 * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
4
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
 *
6
 * This code is free software; you can redistribute it and/or modify it
7
 * under the terms of the GNU General Public License version 2 only, as
8
 * published by the Free Software Foundation.
9
 *
10
 * This code is distributed in the hope that it will be useful, but WITHOUT
11
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13
 * version 2 for more details (a copy is included in the LICENSE file that
14
 * accompanied this code).
15
 *
16
 * You should have received a copy of the GNU General Public License version
17
 * 2 along with this work; if not, write to the Free Software Foundation,
18
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
 *
20
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
 * or visit www.oracle.com if you need additional information or have any
22
 * questions.
23
 *
24
 */
25

26
// no precompiled headers
27
#include "asm/macroAssembler.hpp"
28
#include "classfile/vmSymbols.hpp"
29
#include "code/codeCache.hpp"
30
#include "code/nativeInst.hpp"
31
#include "code/vtableStubs.hpp"
32
#include "interpreter/interpreter.hpp"
33
#include "jvm.h"
34
#include "memory/allocation.inline.hpp"
35
#include "os_linux.hpp"
36
#include "os_posix.hpp"
37
#include "prims/jniFastGetField.hpp"
38
#include "prims/jvm_misc.hpp"
39
#include "runtime/arguments.hpp"
40
#include "runtime/frame.inline.hpp"
41
#include "runtime/globals.hpp"
42
#include "runtime/interfaceSupport.inline.hpp"
43
#include "runtime/java.hpp"
44
#include "runtime/javaCalls.hpp"
45
#include "runtime/javaThread.hpp"
46
#include "runtime/mutexLocker.hpp"
47
#include "runtime/osThread.hpp"
48
#include "runtime/safepointMechanism.hpp"
49
#include "runtime/sharedRuntime.hpp"
50
#include "runtime/stubRoutines.hpp"
51
#include "runtime/timer.hpp"
52
#include "signals_posix.hpp"
53
#include "utilities/debug.hpp"
54
#include "utilities/events.hpp"
55
#include "utilities/vmError.hpp"
56

57
// put OS-includes here
58
# include <dlfcn.h>
59
# include <fpu_control.h>
60
# include <errno.h>
61
# include <pthread.h>
62
# include <signal.h>
63
# include <stdio.h>
64
# include <stdlib.h>
65
# include <sys/mman.h>
66
# include <sys/resource.h>
67
# include <sys/socket.h>
68
# include <sys/stat.h>
69
# include <sys/time.h>
70
# include <sys/types.h>
71
# include <sys/utsname.h>
72
# include <sys/wait.h>
73
# include <poll.h>
74
# include <pwd.h>
75
# include <ucontext.h>
76
# include <unistd.h>
77

78
#define REG_LR       1
79
#define REG_FP       8
80

81
NOINLINE address os::current_stack_pointer() {
82
  return (address)__builtin_frame_address(0);
83
}
84

85
char* os::non_memory_address_word() {
86
  // Must never look like an address returned by reserve_memory,
87
  // even in its subfields (as defined by the CPU immediate fields,
88
  // if the CPU splits constants across multiple instructions).
89

90
  return (char*) 0xffffffffffff;
91
}
92

93
address os::Posix::ucontext_get_pc(const ucontext_t * uc) {
94
  return (address)uc->uc_mcontext.__gregs[REG_PC];
95
}
96

97
void os::Posix::ucontext_set_pc(ucontext_t * uc, address pc) {
98
  uc->uc_mcontext.__gregs[REG_PC] = (intptr_t)pc;
99
}
100

101
intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
102
  return (intptr_t*)uc->uc_mcontext.__gregs[REG_SP];
103
}
104

105
intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) {
106
  return (intptr_t*)uc->uc_mcontext.__gregs[REG_FP];
107
}
108

109
address os::fetch_frame_from_context(const void* ucVoid,
110
                                     intptr_t** ret_sp, intptr_t** ret_fp) {
111
  address epc;
112
  const ucontext_t* uc = (const ucontext_t*)ucVoid;
113

114
  if (uc != nullptr) {
115
    epc = os::Posix::ucontext_get_pc(uc);
116
    if (ret_sp != nullptr) {
117
      *ret_sp = os::Linux::ucontext_get_sp(uc);
118
    }
119
    if (ret_fp != nullptr) {
120
      *ret_fp = os::Linux::ucontext_get_fp(uc);
121
    }
122
  } else {
123
    epc = nullptr;
124
    if (ret_sp != nullptr) {
125
      *ret_sp = (intptr_t *)nullptr;
126
    }
127
    if (ret_fp != nullptr) {
128
      *ret_fp = (intptr_t *)nullptr;
129
    }
130
  }
131

132
  return epc;
133
}
134

135
frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
136
  const ucontext_t* uc = (const ucontext_t*)ucVoid;
137
  // In compiled code, the stack banging is performed before RA
138
  // has been saved in the frame. RA is live, and SP and FP
139
  // belong to the caller.
140
  intptr_t* frame_fp = os::Linux::ucontext_get_fp(uc);
141
  intptr_t* frame_sp = os::Linux::ucontext_get_sp(uc);
142
  address frame_pc = (address)(uc->uc_mcontext.__gregs[REG_LR]
143
                         - NativeInstruction::instruction_size);
144
  return frame(frame_sp, frame_fp, frame_pc);
145
}
146

147
frame os::fetch_frame_from_context(const void* ucVoid) {
148
  intptr_t* frame_sp = nullptr;
149
  intptr_t* frame_fp = nullptr;
150
  address epc = fetch_frame_from_context(ucVoid, &frame_sp, &frame_fp);
151
  if (!is_readable_pointer(epc)) {
152
    // Try to recover from calling into bad memory
153
    // Assume new frame has not been set up, the same as
154
    // compiled frame stack bang
155
    return fetch_compiled_frame_from_context(ucVoid);
156
  }
157
  return frame(frame_sp, frame_fp, epc);
158
}
159

160
// By default, gcc always saves frame pointer rfp on this stack. This
161
// may get turned off by -fomit-frame-pointer.
162
frame os::get_sender_for_C_frame(frame* fr) {
163
  return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
164
}
165

166
NOINLINE frame os::current_frame() {
167
  intptr_t **sender_sp = (intptr_t **)__builtin_frame_address(0);
168
  if (sender_sp != nullptr) {
169
    frame myframe((intptr_t*)os::current_stack_pointer(),
170
                  sender_sp[frame::link_offset],
171
                  CAST_FROM_FN_PTR(address, os::current_frame));
172
    if (os::is_first_C_frame(&myframe)) {
173
      // stack is not walkable
174
      return frame();
175
    } else {
176
      return os::get_sender_for_C_frame(&myframe);
177
    }
178
  } else {
179
    ShouldNotReachHere();
180
    return frame();
181
  }
182
}
183

184
// Utility functions
185
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
186
                                             ucontext_t* uc, JavaThread* thread) {
187

188
  // decide if this trap can be handled by a stub
189
  address stub = nullptr;
190

191
  address pc = nullptr;
192

193
  //%note os_trap_1
194
  if (info != nullptr && uc != nullptr && thread != nullptr) {
195
    pc = (address) os::Posix::ucontext_get_pc(uc);
196

197
    address addr = (address) info->si_addr;
198

199
    // Make sure the high order byte is sign extended, as it may be masked away by the hardware.
200
    if ((uintptr_t(addr) & (uintptr_t(1) << 55)) != 0) {
201
      addr = address(uintptr_t(addr) | (uintptr_t(0xFF) << 56));
202
    }
203

204
    // Handle ALL stack overflow variations here
205
    if (sig == SIGSEGV) {
206
      // check if fault address is within thread stack
207
      if (thread->is_in_full_stack(addr)) {
208
        if (os::Posix::handle_stack_overflow(thread, addr, pc, uc, &stub)) {
209
          return true; // continue
210
        }
211
      }
212
    }
213

214
    if (thread->thread_state() == _thread_in_Java) {
215
      // Java thread running in Java code => find exception handler if any
216
      // a fault inside compiled code, the interpreter, or a stub
217

218
      // Handle signal from NativeJump::patch_verified_entry().
219
      if ((sig == SIGILL || sig == SIGTRAP)
220
          && nativeInstruction_at(pc)->is_sigill_not_entrant()) {
221
        if (TraceTraps) {
222
          tty->print_cr("trap: not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
223
        }
224
        stub = SharedRuntime::get_handle_wrong_method_stub();
225
      } else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) {
226
        stub = SharedRuntime::get_poll_stub(pc);
227
      } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
228
        // BugId 4454115: A read from a MappedByteBuffer can fault
229
        // here if the underlying file has been truncated.
230
        // Do not crash the VM in such a case.
231
        CodeBlob* cb = CodeCache::find_blob(pc);
232
        nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
233
        bool is_unsafe_memory_access = (thread->doing_unsafe_access() && UnsafeMemoryAccess::contains_pc(pc));
234
        if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_memory_access) {
235
          address next_pc = Assembler::locate_next_instruction(pc);
236
          if (is_unsafe_memory_access) {
237
            next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
238
          }
239
          stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
240
        }
241
      } else if (sig == SIGILL && nativeInstruction_at(pc)->is_stop()) {
242
        // Pull a pointer to the error message out of the instruction
243
        // stream.
244
        const uint64_t *detail_msg_ptr
245
          = (uint64_t*)(pc + NativeInstruction::instruction_size);
246
        const char *detail_msg = (const char *)*detail_msg_ptr;
247
        const char *msg = "stop";
248
        if (TraceTraps) {
249
          tty->print_cr("trap: %s: (SIGILL)", msg);
250
        }
251

252
        // End life with a fatal error, message and detail message and the context.
253
        // Note: no need to do any post-processing here (e.g. signal chaining)
254
        VMError::report_and_die(thread, uc, nullptr, 0, msg, "%s", detail_msg);
255

256
        ShouldNotReachHere();
257
      } else if (sig == SIGFPE  &&
258
          (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
259
        stub =
260
          SharedRuntime::
261
          continuation_for_implicit_exception(thread,
262
                                              pc,
263
                                              SharedRuntime::
264
                                              IMPLICIT_DIVIDE_BY_ZERO);
265
      } else if (sig == SIGSEGV &&
266
                 MacroAssembler::uses_implicit_null_check((void*)addr)) {
267
          // Determination of interpreter/vtable stub/compiled code null exception
268
          stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
269
      }
270
    } else if ((thread->thread_state() == _thread_in_vm ||
271
                thread->thread_state() == _thread_in_native) &&
272
                sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
273
                thread->doing_unsafe_access()) {
274
      address next_pc = Assembler::locate_next_instruction(pc);
275
      if (UnsafeMemoryAccess::contains_pc(pc)) {
276
        next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
277
      }
278
      stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
279
    }
280

281
    // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
282
    // and the heap gets shrunk before the field access.
283
    if ((sig == SIGSEGV) || (sig == SIGBUS)) {
284
      address addr_slow = JNI_FastGetField::find_slowcase_pc(pc);
285
      if (addr_slow != (address)-1) {
286
        stub = addr_slow;
287
      }
288
    }
289
  }
290

291
  if (stub != nullptr) {
292
    // save all thread context in case we need to restore it
293
    if (thread != nullptr) {
294
      thread->set_saved_exception_pc(pc);
295
    }
296

297
    os::Posix::ucontext_set_pc(uc, stub);
298
    return true;
299
  }
300

301
  return false; // Mute compiler
302
}
303

304
void os::Linux::init_thread_fpu_state(void) {
305
}
306

307
int os::Linux::get_fpu_control_word(void) {
308
  return 0;
309
}
310

311
void os::Linux::set_fpu_control_word(int fpu_control) {
312
}
313

314
////////////////////////////////////////////////////////////////////////////////
315
// thread stack
316

317
// Minimum usable stack sizes required to get to user code. Space for
318
// HotSpot guard pages is added later.
319
size_t os::_compiler_thread_min_stack_allowed = 72 * K;
320
size_t os::_java_thread_min_stack_allowed = 72 * K;
321
size_t os::_vm_internal_thread_min_stack_allowed = 72 * K;
322

323
// return default stack size for thr_type
324
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
325
  // default stack size (compiler thread needs larger stack)
326
  size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
327
  return s;
328
}
329

330
/////////////////////////////////////////////////////////////////////////////
331
// helper functions for fatal error handler
332

333
static const char* reg_abi_names[] = {
334
  "pc",
335
  "x1(ra)", "x2(sp)", "x3(gp)", "x4(tp)",
336
  "x5(t0)", "x6(t1)", "x7(t2)",
337
  "x8(s0)", "x9(s1)",
338
  "x10(a0)", "x11(a1)", "x12(a2)", "x13(a3)", "x14(a4)", "x15(a5)", "x16(a6)", "x17(a7)",
339
  "x18(s2)", "x19(s3)", "x20(s4)", "x21(s5)", "x22(s6)", "x23(s7)", "x24(s8)", "x25(s9)", "x26(s10)", "x27(s11)",
340
  "x28(t3)", "x29(t4)","x30(t5)", "x31(t6)"
341
};
342

343
void os::print_context(outputStream *st, const void *context) {
344
  if (context == nullptr) return;
345

346
  const ucontext_t *uc = (const ucontext_t*)context;
347

348
  st->print_cr("Registers:");
349
  for (int r = 0; r < 32; r++) {
350
    st->print_cr("%-*.*s=" INTPTR_FORMAT, 8, 8, reg_abi_names[r], (uintptr_t)uc->uc_mcontext.__gregs[r]);
351
  }
352
  st->cr();
353
}
354

355
void os::print_tos_pc(outputStream *st, const void *context) {
356
  if (context == nullptr) return;
357

358
  const ucontext_t* uc = (const ucontext_t*)context;
359

360
  address sp = (address)os::Linux::ucontext_get_sp(uc);
361
  print_tos(st, sp);
362
  st->cr();
363

364
  // Note: it may be unsafe to inspect memory near pc. For example, pc may
365
  // point to garbage if entry point in an nmethod is corrupted. Leave
366
  // this at the end, and hope for the best.
367
  address pc = os::fetch_frame_from_context(uc).pc();
368
  print_instructions(st, pc);
369
  st->cr();
370
}
371

372
void os::print_register_info(outputStream *st, const void *context, int& continuation) {
373
  const int register_count = 32;
374
  int n = continuation;
375
  assert(n >= 0 && n <= register_count, "Invalid continuation value");
376
  if (context == nullptr || n == register_count) {
377
    return;
378
  }
379

380
  const ucontext_t *uc = (const ucontext_t*)context;
381
  while (n < register_count) {
382
    // Update continuation with next index before printing location
383
    continuation = n + 1;
384
    st->print("%-8.8s=", reg_abi_names[n]);
385
    print_location(st, uc->uc_mcontext.__gregs[n]);
386
    ++n;
387
  }
388
}
389

390
void os::setup_fpu() {
391
}
392

393
#ifndef PRODUCT
394
void os::verify_stack_alignment() {
395
  assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
396
}
397
#endif
398

399
int os::extra_bang_size_in_bytes() {
400
  return 0;
401
}
402

403
static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
404
  *(jlong *) dst = *(const jlong *) src;
405
}
406

407
extern "C" {
408
  int SpinPause() {
409
    if (UseZihintpause) {
410
      // PAUSE is encoded as a FENCE instruction with pred=W, succ=0, fm=0, rd=x0, and rs1=x0.
411
      // To do: __asm__ volatile("pause " : : : );
412
      // Since we're currently not passing '-march=..._zihintpause' to the compiler,
413
      // it will not recognize the "pause" instruction, hence the hard-coded instruction.
414
      __asm__ volatile(".word 0x0100000f  " : : : );
415
      return 1;
416
    }
417
    return 0;
418
  }
419

420
  void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
421
    if (from > to) {
422
      const jshort *end = from + count;
423
      while (from < end) {
424
        *(to++) = *(from++);
425
      }
426
    } else if (from < to) {
427
      const jshort *end = from;
428
      from += count - 1;
429
      to   += count - 1;
430
      while (from >= end) {
431
        *(to--) = *(from--);
432
      }
433
    }
434
  }
435
  void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
436
    if (from > to) {
437
      const jint *end = from + count;
438
      while (from < end) {
439
        *(to++) = *(from++);
440
      }
441
    } else if (from < to) {
442
      const jint *end = from;
443
      from += count - 1;
444
      to   += count - 1;
445
      while (from >= end) {
446
        *(to--) = *(from--);
447
      }
448
    }
449
  }
450

451
  void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
452
    if (from > to) {
453
      const jlong *end = from + count;
454
      while (from < end) {
455
        atomic_copy64(from++, to++);
456
      }
457
    } else if (from < to) {
458
      const jlong *end = from;
459
      from += count - 1;
460
      to   += count - 1;
461
      while (from >= end) {
462
        atomic_copy64(from--, to--);
463
      }
464
    }
465
  }
466

467
  void _Copy_arrayof_conjoint_bytes(const HeapWord* from,
468
                                    HeapWord* to,
469
                                    size_t    count) {
470
    memmove(to, from, count);
471
  }
472
  void _Copy_arrayof_conjoint_jshorts(const HeapWord* from,
473
                                      HeapWord* to,
474
                                      size_t    count) {
475
    memmove(to, from, count * 2);
476
  }
477
  void _Copy_arrayof_conjoint_jints(const HeapWord* from,
478
                                    HeapWord* to,
479
                                    size_t    count) {
480
    memmove(to, from, count * 4);
481
  }
482
  void _Copy_arrayof_conjoint_jlongs(const HeapWord* from,
483
                                     HeapWord* to,
484
                                     size_t    count) {
485
    memmove(to, from, count * 8);
486
  }
487
};
488

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.