2
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
25
#include "precompiled.hpp"
26
#include "code/codeCache.hpp"
27
#include "compiler/compilerDefinitions.inline.hpp"
28
#include "interpreter/invocationCounter.hpp"
30
#include "runtime/arguments.hpp"
31
#include "runtime/continuation.hpp"
32
#include "runtime/flags/jvmFlag.hpp"
33
#include "runtime/flags/jvmFlagAccess.hpp"
34
#include "runtime/flags/jvmFlagConstraintsCompiler.hpp"
35
#include "runtime/flags/jvmFlagLimit.hpp"
36
#include "runtime/globals.hpp"
37
#include "runtime/globals_extension.hpp"
38
#include "utilities/defaultStream.hpp"
40
const char* compilertype2name_tab[compiler_number_of_types] = {
47
CompilationModeFlag::Mode CompilationModeFlag::_mode = CompilationModeFlag::Mode::NORMAL;
49
static void print_mode_unavailable(const char* mode_name, const char* reason) {
50
warning("%s compilation mode unavailable because %s.", mode_name, reason);
53
bool CompilationModeFlag::initialize() {
55
// During parsing we want to be very careful not to use any methods of CompilerConfig that depend on
56
// CompilationModeFlag.
57
if (CompilationMode != nullptr) {
58
if (strcmp(CompilationMode, "default") == 0 || strcmp(CompilationMode, "normal") == 0) {
59
assert(_mode == Mode::NORMAL, "Precondition");
60
} else if (strcmp(CompilationMode, "quick-only") == 0) {
61
if (!CompilerConfig::has_c1()) {
62
print_mode_unavailable("quick-only", "there is no c1 present");
64
_mode = Mode::QUICK_ONLY;
66
} else if (strcmp(CompilationMode, "high-only") == 0) {
67
if (!CompilerConfig::has_c2() && !CompilerConfig::is_jvmci_compiler()) {
68
print_mode_unavailable("high-only", "there is no c2 or jvmci compiler present");
70
_mode = Mode::HIGH_ONLY;
72
} else if (strcmp(CompilationMode, "high-only-quick-internal") == 0) {
73
if (!CompilerConfig::has_c1() || !CompilerConfig::is_jvmci_compiler()) {
74
print_mode_unavailable("high-only-quick-internal", "there is no c1 and jvmci compiler present");
76
_mode = Mode::HIGH_ONLY_QUICK_INTERNAL;
84
// Now that the flag is parsed, we can use any methods of CompilerConfig.
86
if (CompilerConfig::is_c1_simple_only()) {
87
_mode = Mode::QUICK_ONLY;
88
} else if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
89
_mode = Mode::HIGH_ONLY;
90
} else if (CompilerConfig::is_jvmci_compiler_enabled() && CompilerConfig::is_c1_enabled() && !TieredCompilation) {
91
warning("Disabling tiered compilation with non-native JVMCI compiler is not recommended, "
92
"disabling intermediate compilation levels instead. ");
93
_mode = Mode::HIGH_ONLY_QUICK_INTERNAL;
99
void CompilationModeFlag::print_error() {
100
jio_fprintf(defaultStream::error_stream(), "Unsupported compilation mode '%s', available modes are:", CompilationMode);
102
if (CompilerConfig::has_c1()) {
103
jio_fprintf(defaultStream::error_stream(), "%s quick-only", comma ? "," : "");
106
if (CompilerConfig::has_c2() || CompilerConfig::has_jvmci()) {
107
jio_fprintf(defaultStream::error_stream(), "%s high-only", comma ? "," : "");
110
if (CompilerConfig::has_c1() && CompilerConfig::has_jvmci()) {
111
jio_fprintf(defaultStream::error_stream(), "%s high-only-quick-internal", comma ? "," : "");
114
jio_fprintf(defaultStream::error_stream(), "\n");
117
// Returns threshold scaled with CompileThresholdScaling
118
intx CompilerConfig::scaled_compile_threshold(intx threshold) {
119
return scaled_compile_threshold(threshold, CompileThresholdScaling);
122
// Returns freq_log scaled with CompileThresholdScaling
123
intx CompilerConfig::scaled_freq_log(intx freq_log) {
124
return scaled_freq_log(freq_log, CompileThresholdScaling);
127
// For XXXThreshold flags, which all have a valid range of [0 .. max_jint]
128
intx CompilerConfig::jvmflag_scaled_compile_threshold(intx threshold) {
129
return MAX2((intx)0, MIN2(scaled_compile_threshold(threshold), (intx)max_jint));
132
// For XXXNotifyFreqLog flags, which all have a valid range of [0 .. 30]
133
intx CompilerConfig::jvmflag_scaled_freq_log(intx freq_log) {
134
return MAX2((intx)0, MIN2(scaled_freq_log(freq_log), (intx)30));
137
// Returns threshold scaled with the value of scale.
138
// If scale < 0.0, threshold is returned without scaling.
139
intx CompilerConfig::scaled_compile_threshold(intx threshold, double scale) {
140
assert(threshold >= 0, "must be");
141
if (scale == 1.0 || scale < 0.0) {
144
double v = threshold * scale;
145
assert(v >= 0, "must be");
146
if (g_isnan(v) || !g_isfinite(v)) {
150
(void) frexp(v, &exp);
151
int max_exp = sizeof(intx) * BitsPerByte - 1;
156
assert(r >= 0, "must be");
161
// Returns freq_log scaled with the value of scale.
162
// Returned values are in the range of [0, InvocationCounter::number_of_count_bits + 1].
163
// If scale < 0.0, freq_log is returned without scaling.
164
intx CompilerConfig::scaled_freq_log(intx freq_log, double scale) {
165
// Check if scaling is necessary or if negative value was specified.
166
if (scale == 1.0 || scale < 0.0) {
169
// Check values to avoid calculating log2 of 0.
170
if (scale == 0.0 || freq_log == 0) {
173
// Determine the maximum notification frequency value currently supported.
174
// The largest mask value that the interpreter/C1 can handle is
175
// of length InvocationCounter::number_of_count_bits. Mask values are always
176
// one bit shorter then the value of the notification frequency. Set
177
// max_freq_bits accordingly.
178
int max_freq_bits = InvocationCounter::number_of_count_bits + 1;
179
intx scaled_freq = scaled_compile_threshold((intx)1 << freq_log, scale);
181
if (scaled_freq == 0) {
182
// Return 0 right away to avoid calculating log2 of 0.
185
return MIN2(log2i(scaled_freq), max_freq_bits);
189
void CompilerConfig::set_client_emulation_mode_flags() {
190
assert(has_c1(), "Must have C1 compiler present");
191
CompilationModeFlag::set_quick_only();
193
FLAG_SET_ERGO(ProfileInterpreter, false);
195
FLAG_SET_ERGO(EnableJVMCI, false);
196
FLAG_SET_ERGO(UseJVMCICompiler, false);
198
if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
199
FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
201
if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
202
FLAG_SET_ERGO(InitialCodeCacheSize, 160*K);
204
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
205
FLAG_SET_ERGO(ReservedCodeCacheSize, 32*M);
207
if (FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) {
208
FLAG_SET_ERGO(NonProfiledCodeHeapSize, 27*M);
210
if (FLAG_IS_DEFAULT(ProfiledCodeHeapSize)) {
211
FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
213
if (FLAG_IS_DEFAULT(NonNMethodCodeHeapSize)) {
214
FLAG_SET_ERGO(NonNMethodCodeHeapSize, 5*M);
216
if (FLAG_IS_DEFAULT(CodeCacheExpansionSize)) {
217
FLAG_SET_ERGO(CodeCacheExpansionSize, 32*K);
219
if (FLAG_IS_DEFAULT(MaxRAM)) {
220
// Do not use FLAG_SET_ERGO to update MaxRAM, as this will impact
221
// heap setting done based on available phys_mem (see Arguments::set_heap_size).
222
FLAG_SET_DEFAULT(MaxRAM, 1ULL*G);
224
if (FLAG_IS_DEFAULT(CICompilerCount)) {
225
FLAG_SET_ERGO(CICompilerCount, 1);
229
bool CompilerConfig::is_compilation_mode_selected() {
230
return !FLAG_IS_DEFAULT(TieredCompilation) ||
231
!FLAG_IS_DEFAULT(TieredStopAtLevel) ||
232
!FLAG_IS_DEFAULT(CompilationMode)
233
JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI)
234
|| !FLAG_IS_DEFAULT(UseJVMCICompiler));
237
static bool check_legacy_flags() {
238
JVMFlag* compile_threshold_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(CompileThreshold));
239
if (JVMFlagAccess::check_constraint(compile_threshold_flag, JVMFlagLimit::get_constraint(compile_threshold_flag)->constraint_func(), false) != JVMFlag::SUCCESS) {
242
JVMFlag* on_stack_replace_percentage_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(OnStackReplacePercentage));
243
if (JVMFlagAccess::check_constraint(on_stack_replace_percentage_flag, JVMFlagLimit::get_constraint(on_stack_replace_percentage_flag)->constraint_func(), false) != JVMFlag::SUCCESS) {
246
JVMFlag* interpreter_profile_percentage_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(InterpreterProfilePercentage));
247
if (JVMFlagAccess::check_range(interpreter_profile_percentage_flag, false) != JVMFlag::SUCCESS) {
253
void CompilerConfig::set_legacy_emulation_flags() {
254
// Any legacy flags set?
255
if (!FLAG_IS_DEFAULT(CompileThreshold) ||
256
!FLAG_IS_DEFAULT(OnStackReplacePercentage) ||
257
!FLAG_IS_DEFAULT(InterpreterProfilePercentage)) {
258
if (CompilerConfig::is_c1_only() || CompilerConfig::is_c2_or_jvmci_compiler_only()) {
259
// This function is called before these flags are validated. In order to not confuse the user with extraneous
260
// error messages, we check the validity of these flags here and bail out if any of them are invalid.
261
if (!check_legacy_flags()) {
264
// Note, we do not scale CompileThreshold before this because the tiered flags are
265
// all going to be scaled further in set_compilation_policy_flags().
266
const intx threshold = CompileThreshold;
267
const intx profile_threshold = threshold * InterpreterProfilePercentage / 100;
268
const intx osr_threshold = threshold * OnStackReplacePercentage / 100;
269
const intx osr_profile_threshold = osr_threshold * InterpreterProfilePercentage / 100;
271
const intx threshold_log = log2i_graceful(CompilerConfig::is_c1_only() ? threshold : profile_threshold);
272
const intx osr_threshold_log = log2i_graceful(CompilerConfig::is_c1_only() ? osr_threshold : osr_profile_threshold);
274
if (Tier0InvokeNotifyFreqLog > threshold_log) {
275
FLAG_SET_ERGO(Tier0InvokeNotifyFreqLog, MAX2<intx>(0, threshold_log));
278
// Note: Emulation oddity. The legacy policy limited the amount of callbacks from the
279
// interpreter for backedge events to once every 1024 counter increments.
280
// We simulate this behavior by limiting the backedge notification frequency to be
282
if (Tier0BackedgeNotifyFreqLog > osr_threshold_log) {
283
FLAG_SET_ERGO(Tier0BackedgeNotifyFreqLog, MAX2<intx>(10, osr_threshold_log));
285
// Adjust the tiered policy flags to approximate the legacy behavior.
286
FLAG_SET_ERGO(Tier3InvocationThreshold, threshold);
287
FLAG_SET_ERGO(Tier3MinInvocationThreshold, threshold);
288
FLAG_SET_ERGO(Tier3CompileThreshold, threshold);
289
FLAG_SET_ERGO(Tier3BackEdgeThreshold, osr_threshold);
290
if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
291
FLAG_SET_ERGO(Tier4InvocationThreshold, threshold);
292
FLAG_SET_ERGO(Tier4MinInvocationThreshold, threshold);
293
FLAG_SET_ERGO(Tier4CompileThreshold, threshold);
294
FLAG_SET_ERGO(Tier4BackEdgeThreshold, osr_threshold);
295
FLAG_SET_ERGO(Tier0ProfilingStartPercentage, InterpreterProfilePercentage);
298
// Normal tiered mode, ignore legacy flags
301
// Scale CompileThreshold
302
// CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves CompileThreshold unchanged.
303
if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0 && CompileThreshold > 0) {
304
intx scaled_value = scaled_compile_threshold(CompileThreshold);
305
if (CompileThresholdConstraintFunc(scaled_value, true) != JVMFlag::VIOLATES_CONSTRAINT) {
306
FLAG_SET_ERGO(CompileThreshold, scaled_value);
312
void CompilerConfig::set_compilation_policy_flags() {
314
// Increase the code cache size - tiered compiles a lot more.
315
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
316
FLAG_SET_ERGO(ReservedCodeCacheSize,
317
MIN2(CODE_CACHE_DEFAULT_LIMIT, (size_t)ReservedCodeCacheSize * 5));
319
// Enable SegmentedCodeCache if tiered compilation is enabled, ReservedCodeCacheSize >= 240M
320
// and the code cache contains at least 8 pages (segmentation disables advantage of huge pages).
321
if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M &&
322
8 * CodeCache::page_size() <= ReservedCodeCacheSize) {
323
FLAG_SET_ERGO(SegmentedCodeCache, true);
325
if (Arguments::is_compiler_only()) { // -Xcomp
326
// Be much more aggressive in tiered mode with -Xcomp and exercise C2 more.
327
// We will first compile a level 3 version (C1 with full profiling), then do one invocation of it and
328
// compile a level 4 (C2) and then continue executing it.
329
if (FLAG_IS_DEFAULT(Tier3InvokeNotifyFreqLog)) {
330
FLAG_SET_CMDLINE(Tier3InvokeNotifyFreqLog, 0);
332
if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) {
333
FLAG_SET_CMDLINE(Tier4InvocationThreshold, 0);
338
if (CompileThresholdScaling < 0) {
339
vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", nullptr);
342
if (CompilationModeFlag::disable_intermediate()) {
343
if (FLAG_IS_DEFAULT(Tier0ProfilingStartPercentage)) {
344
FLAG_SET_DEFAULT(Tier0ProfilingStartPercentage, 33);
347
if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) {
348
FLAG_SET_DEFAULT(Tier4InvocationThreshold, 5000);
350
if (FLAG_IS_DEFAULT(Tier4MinInvocationThreshold)) {
351
FLAG_SET_DEFAULT(Tier4MinInvocationThreshold, 600);
353
if (FLAG_IS_DEFAULT(Tier4CompileThreshold)) {
354
FLAG_SET_DEFAULT(Tier4CompileThreshold, 10000);
356
if (FLAG_IS_DEFAULT(Tier4BackEdgeThreshold)) {
357
FLAG_SET_DEFAULT(Tier4BackEdgeThreshold, 15000);
360
if (FLAG_IS_DEFAULT(Tier3InvocationThreshold)) {
361
FLAG_SET_DEFAULT(Tier3InvocationThreshold, Tier4InvocationThreshold);
363
if (FLAG_IS_DEFAULT(Tier3MinInvocationThreshold)) {
364
FLAG_SET_DEFAULT(Tier3MinInvocationThreshold, Tier4MinInvocationThreshold);
366
if (FLAG_IS_DEFAULT(Tier3CompileThreshold)) {
367
FLAG_SET_DEFAULT(Tier3CompileThreshold, Tier4CompileThreshold);
369
if (FLAG_IS_DEFAULT(Tier3BackEdgeThreshold)) {
370
FLAG_SET_DEFAULT(Tier3BackEdgeThreshold, Tier4BackEdgeThreshold);
375
// Scale tiered compilation thresholds.
376
// CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves compilation thresholds unchanged.
377
if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) {
378
FLAG_SET_ERGO(Tier0InvokeNotifyFreqLog, jvmflag_scaled_freq_log(Tier0InvokeNotifyFreqLog));
379
FLAG_SET_ERGO(Tier0BackedgeNotifyFreqLog, jvmflag_scaled_freq_log(Tier0BackedgeNotifyFreqLog));
381
FLAG_SET_ERGO(Tier3InvocationThreshold, jvmflag_scaled_compile_threshold(Tier3InvocationThreshold));
382
FLAG_SET_ERGO(Tier3MinInvocationThreshold, jvmflag_scaled_compile_threshold(Tier3MinInvocationThreshold));
383
FLAG_SET_ERGO(Tier3CompileThreshold, jvmflag_scaled_compile_threshold(Tier3CompileThreshold));
384
FLAG_SET_ERGO(Tier3BackEdgeThreshold, jvmflag_scaled_compile_threshold(Tier3BackEdgeThreshold));
386
// Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
387
// once these thresholds become supported.
389
FLAG_SET_ERGO(Tier2InvokeNotifyFreqLog, jvmflag_scaled_freq_log(Tier2InvokeNotifyFreqLog));
390
FLAG_SET_ERGO(Tier2BackedgeNotifyFreqLog, jvmflag_scaled_freq_log(Tier2BackedgeNotifyFreqLog));
392
FLAG_SET_ERGO(Tier3InvokeNotifyFreqLog, jvmflag_scaled_freq_log(Tier3InvokeNotifyFreqLog));
393
FLAG_SET_ERGO(Tier3BackedgeNotifyFreqLog, jvmflag_scaled_freq_log(Tier3BackedgeNotifyFreqLog));
395
FLAG_SET_ERGO(Tier23InlineeNotifyFreqLog, jvmflag_scaled_freq_log(Tier23InlineeNotifyFreqLog));
397
FLAG_SET_ERGO(Tier4InvocationThreshold, jvmflag_scaled_compile_threshold(Tier4InvocationThreshold));
398
FLAG_SET_ERGO(Tier4MinInvocationThreshold, jvmflag_scaled_compile_threshold(Tier4MinInvocationThreshold));
399
FLAG_SET_ERGO(Tier4CompileThreshold, jvmflag_scaled_compile_threshold(Tier4CompileThreshold));
400
FLAG_SET_ERGO(Tier4BackEdgeThreshold, jvmflag_scaled_compile_threshold(Tier4BackEdgeThreshold));
404
// Reduce stack usage due to inlining of methods which require much stack.
405
// (High tier compiler can inline better based on profiling information.)
406
if (FLAG_IS_DEFAULT(C1InlineStackLimit) &&
407
TieredStopAtLevel == CompLevel_full_optimization && !CompilerConfig::is_c1_only()) {
408
FLAG_SET_DEFAULT(C1InlineStackLimit, 5);
412
if (CompilerConfig::is_tiered() && CompilerConfig::is_c2_enabled()) {
414
// Some inlining tuning
415
#if defined(X86) || defined(AARCH64) || defined(RISCV64)
416
if (FLAG_IS_DEFAULT(InlineSmallCode)) {
417
FLAG_SET_DEFAULT(InlineSmallCode, 2500);
426
void CompilerConfig::set_jvmci_specific_flags() {
427
if (UseJVMCICompiler) {
428
if (FLAG_IS_DEFAULT(TypeProfileWidth)) {
429
FLAG_SET_DEFAULT(TypeProfileWidth, 8);
431
if (FLAG_IS_DEFAULT(TypeProfileLevel)) {
432
FLAG_SET_DEFAULT(TypeProfileLevel, 0);
435
if (UseJVMCINativeLibrary) {
436
// SVM compiled code requires more stack space
437
if (FLAG_IS_DEFAULT(CompilerThreadStackSize)) {
438
// Duplicate logic in the implementations of os::create_thread
439
// so that we can then double the computed stack size. Once
440
// the stack size requirements of SVM are better understood,
441
// this logic can be pushed down into os::create_thread.
442
int stack_size = CompilerThreadStackSize;
443
if (stack_size == 0) {
444
stack_size = VMThreadStackSize;
446
if (stack_size != 0) {
447
FLAG_SET_DEFAULT(CompilerThreadStackSize, stack_size * 2);
451
// JVMCI needs values not less than defaults
452
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
453
FLAG_SET_DEFAULT(ReservedCodeCacheSize, MAX2(64*M, ReservedCodeCacheSize));
455
if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
456
FLAG_SET_DEFAULT(InitialCodeCacheSize, MAX2(16*M, InitialCodeCacheSize));
458
if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) {
459
FLAG_SET_DEFAULT(NewSizeThreadIncrease, MAX2(4*K, NewSizeThreadIncrease));
461
if (FLAG_IS_DEFAULT(Tier3DelayOn)) {
462
// This effectively prevents the compile broker scheduling tier 2
463
// (i.e., limited C1 profiling) compilations instead of tier 3
464
// (i.e., full C1 profiling) compilations when the tier 4 queue
465
// backs up (which is quite likely when using a non-AOT compiled JVMCI
466
// compiler). The observation based on jargraal is that the downside
467
// of skipping full profiling is much worse for performance than the
469
FLAG_SET_DEFAULT(Tier3DelayOn, 100000);
471
} // !UseJVMCINativeLibrary
472
} // UseJVMCICompiler
474
#endif // INCLUDE_JVMCI
476
bool CompilerConfig::check_args_consistency(bool status) {
477
// Check lower bounds of the code cache
478
// Template Interpreter code is approximately 3X larger in debug builds.
479
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
480
if (ReservedCodeCacheSize < InitialCodeCacheSize) {
481
jio_fprintf(defaultStream::error_stream(),
482
"Invalid ReservedCodeCacheSize: %dK. Must be at least InitialCodeCacheSize=%dK.\n",
483
ReservedCodeCacheSize/K, InitialCodeCacheSize/K);
485
} else if (ReservedCodeCacheSize < min_code_cache_size) {
486
jio_fprintf(defaultStream::error_stream(),
487
"Invalid ReservedCodeCacheSize=%dK. Must be at least %uK.\n", ReservedCodeCacheSize/K,
488
min_code_cache_size/K);
490
} else if (ReservedCodeCacheSize > CODE_CACHE_SIZE_LIMIT) {
491
// Code cache size larger than CODE_CACHE_SIZE_LIMIT is not supported.
492
jio_fprintf(defaultStream::error_stream(),
493
"Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M,
494
CODE_CACHE_SIZE_LIMIT/M);
496
} else if (NonNMethodCodeHeapSize < min_code_cache_size) {
497
jio_fprintf(defaultStream::error_stream(),
498
"Invalid NonNMethodCodeHeapSize=%dK. Must be at least %uK.\n", NonNMethodCodeHeapSize/K,
499
min_code_cache_size/K);
501
} else if (InlineCacheBufferSize > NonNMethodCodeHeapSize / 2) {
502
jio_fprintf(defaultStream::error_stream(),
503
"Invalid InlineCacheBufferSize=" SIZE_FORMAT "K. Must be less than or equal to " SIZE_FORMAT "K.\n",
504
InlineCacheBufferSize/K, NonNMethodCodeHeapSize/2/K);
509
if (!FLAG_IS_DEFAULT(CICompilerCount) && !FLAG_IS_DEFAULT(CICompilerCountPerCPU) && CICompilerCountPerCPU) {
510
warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
514
if (BackgroundCompilation && ReplayCompiles) {
515
if (!FLAG_IS_DEFAULT(BackgroundCompilation)) {
516
warning("BackgroundCompilation disabled due to ReplayCompiles option.");
518
FLAG_SET_CMDLINE(BackgroundCompilation, false);
521
if (CompilerConfig::is_interpreter_only()) {
523
if (!FLAG_IS_DEFAULT(UseCompiler)) {
524
warning("UseCompiler disabled due to -Xint.");
526
FLAG_SET_CMDLINE(UseCompiler, false);
528
if (ProfileInterpreter) {
529
if (!FLAG_IS_DEFAULT(ProfileInterpreter)) {
530
warning("ProfileInterpreter disabled due to -Xint.");
532
FLAG_SET_CMDLINE(ProfileInterpreter, false);
534
if (TieredCompilation) {
535
if (!FLAG_IS_DEFAULT(TieredCompilation)) {
536
warning("TieredCompilation disabled due to -Xint.");
538
FLAG_SET_CMDLINE(TieredCompilation, false);
540
if (SegmentedCodeCache) {
541
warning("SegmentedCodeCache has no meaningful effect with -Xint");
542
FLAG_SET_DEFAULT(SegmentedCodeCache, false);
545
if (EnableJVMCI || UseJVMCICompiler) {
546
if (!FLAG_IS_DEFAULT(EnableJVMCI) || !FLAG_IS_DEFAULT(UseJVMCICompiler)) {
547
warning("JVMCI Compiler disabled due to -Xint.");
549
FLAG_SET_CMDLINE(EnableJVMCI, false);
550
FLAG_SET_CMDLINE(UseJVMCICompiler, false);
555
status = status && JVMCIGlobals::check_jvmci_flags_are_consistent();
562
void CompilerConfig::ergo_initialize() {
563
#if !COMPILER1_OR_COMPILER2
568
if (!is_compilation_mode_selected()) {
569
#if defined(_WINDOWS) && !defined(_LP64)
570
if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
571
FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
574
if (NeverActAsServerClassMachine) {
575
set_client_emulation_mode_flags();
577
} else if (!has_c2() && !is_jvmci_compiler()) {
578
set_client_emulation_mode_flags();
582
set_legacy_emulation_flags();
583
set_compilation_policy_flags();
586
// Check that JVMCI supports selected GC.
587
// Should be done after GCConfig::initialize() was called.
588
JVMCIGlobals::check_jvmci_supported_gc();
590
// Do JVMCI specific settings
591
set_jvmci_specific_flags();
594
if (UseOnStackReplacement && !UseLoopCounter) {
595
warning("On-stack-replacement requires loop counters; enabling loop counters");
596
FLAG_SET_DEFAULT(UseLoopCounter, true);
599
if (ProfileInterpreter && CompilerConfig::is_c1_simple_only()) {
600
if (!FLAG_IS_DEFAULT(ProfileInterpreter)) {
601
warning("ProfileInterpreter disabled due to client emulation mode");
603
FLAG_SET_CMDLINE(ProfileInterpreter, false);
607
if (!EliminateLocks) {
608
EliminateNestedLocks = false;
610
if (!Inline || !IncrementalInline) {
611
IncrementalInline = false;
612
IncrementalInlineMH = false;
613
IncrementalInlineVirtual = false;
614
StressIncrementalInlining = false;
617
if (!IncrementalInline) {
618
AlwaysIncrementalInline = false;
620
if (FLAG_IS_CMDLINE(PrintIdealGraph) && !PrintIdealGraph) {
621
FLAG_SET_ERGO(PrintIdealGraphLevel, -1);
624
if (!UseTypeSpeculation && FLAG_IS_DEFAULT(TypeProfileLevel)) {
625
// nothing to use the profiling, turn if off
626
FLAG_SET_DEFAULT(TypeProfileLevel, 0);
628
if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) {
629
FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1);
631
if (FLAG_IS_DEFAULT(LoopStripMiningIterShortLoop)) {
633
LoopStripMiningIterShortLoop = LoopStripMiningIter / 10;