diff mbox series

[3/7] Apply mechanical replacement (generated patch).

Message ID 90868706a74cb5641c5d15430200e50bbd3785e7.1573125297.git.mliska@suse.cz
State New
Headers show
Series Param conversion to option machinery | expand

Commit Message

Martin Liška Nov. 6, 2019, 10:50 a.m. UTC
gcc/ChangeLog:

2019-11-07  Martin Liska  <mliska@suse.cz>

	* asan.c (asan_sanitize_stack_p): Replace old parameter syntax
	with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
	macro.
	(asan_sanitize_allocas_p): Likewise.
	(asan_emit_stack_protection): Likewise.
	(asan_protect_global): Likewise.
	(instrument_derefs): Likewise.
	(instrument_builtin_call): Likewise.
	(asan_expand_mark_ifn): Likewise.
	* auto-profile.c (auto_profile): Likewise.
	* bb-reorder.c (copy_bb_p): Likewise.
	(duplicate_computed_gotos): Likewise.
	* builtins.c (inline_expand_builtin_string_cmp): Likewise.
	* cfgcleanup.c (try_crossjump_to_edge): Likewise.
	(try_crossjump_bb): Likewise.
	* cfgexpand.c (defer_stack_allocation): Likewise.
	(stack_protect_classify_type): Likewise.
	(pass_expand::execute): Likewise.
	* cfgloopanal.c (expected_loop_iterations_unbounded): Likewise.
	(estimate_reg_pressure_cost): Likewise.
	* cgraph.c (cgraph_edge::maybe_hot_p): Likewise.
	* combine.c (combine_instructions): Likewise.
	(record_value_for_reg): Likewise.
	* common/config/aarch64/aarch64-common.c (aarch64_option_validate_param): Likewise.
	(aarch64_option_default_params): Likewise.
	* common/config/ia64/ia64-common.c (ia64_option_default_params): Likewise.
	* common/config/powerpcspe/powerpcspe-common.c (rs6000_option_default_params): Likewise.
	* common/config/rs6000/rs6000-common.c (rs6000_option_default_params): Likewise.
	* common/config/sh/sh-common.c (sh_option_default_params): Likewise.
	* config/aarch64/aarch64.c (aarch64_output_probe_stack_range): Likewise.
	(aarch64_allocate_and_probe_stack_space): Likewise.
	(aarch64_expand_epilogue): Likewise.
	(aarch64_override_options_internal): Likewise.
	* config/alpha/alpha.c (alpha_option_override): Likewise.
	* config/arm/arm.c (arm_option_override): Likewise.
	(arm_valid_target_attribute_p): Likewise.
	* config/i386/i386-options.c (ix86_option_override_internal): Likewise.
	* config/i386/i386.c (get_probe_interval): Likewise.
	(ix86_adjust_stack_and_probe_stack_clash): Likewise.
	(ix86_max_noce_ifcvt_seq_cost): Likewise.
	* config/ia64/ia64.c (ia64_adjust_cost): Likewise.
	* config/rs6000/rs6000-logue.c (get_stack_clash_protection_probe_interval): Likewise.
	(get_stack_clash_protection_guard_size): Likewise.
	* config/rs6000/rs6000.c (rs6000_option_override_internal): Likewise.
	* config/s390/s390.c (allocate_stack_space): Likewise.
	(s390_emit_prologue): Likewise.
	(s390_option_override_internal): Likewise.
	* config/sparc/sparc.c (sparc_option_override): Likewise.
	* config/visium/visium.c (visium_option_override): Likewise.
	* coverage.c (get_coverage_counts): Likewise.
	(coverage_compute_profile_id): Likewise.
	(coverage_begin_function): Likewise.
	(coverage_end_function): Likewise.
	* cse.c (cse_find_path): Likewise.
	(cse_extended_basic_block): Likewise.
	(cse_main): Likewise.
	* cselib.c (cselib_invalidate_mem): Likewise.
	* dse.c (dse_step1): Likewise.
	* emit-rtl.c (set_new_first_and_last_insn): Likewise.
	(get_max_insn_count): Likewise.
	(make_debug_insn_raw): Likewise.
	(init_emit): Likewise.
	* explow.c (compute_stack_clash_protection_loop_data): Likewise.
	* final.c (compute_alignments): Likewise.
	* fold-const.c (fold_range_test): Likewise.
	(fold_truth_andor): Likewise.
	(tree_single_nonnegative_warnv_p): Likewise.
	(integer_valued_real_single_p): Likewise.
	* gcse.c (want_to_gcse_p): Likewise.
	(prune_insertions_deletions): Likewise.
	(hoist_code): Likewise.
	(gcse_or_cprop_is_too_expensive): Likewise.
	* ggc-common.c: Likewise.
	* ggc-page.c (ggc_collect): Likewise.
	* gimple-loop-interchange.cc (MAX_NUM_STMT): Likewise.
	(MAX_DATAREFS): Likewise.
	(OUTER_STRIDE_RATIO): Likewise.
	* gimple-loop-jam.c (tree_loop_unroll_and_jam): Likewise.
	* gimple-loop-versioning.cc (loop_versioning::max_insns_for_loop): Likewise.
	* gimple-ssa-split-paths.c (is_feasible_trace): Likewise.
	* gimple-ssa-store-merging.c (imm_store_chain_info::try_coalesce_bswap): Likewise.
	(imm_store_chain_info::coalesce_immediate_stores): Likewise.
	(imm_store_chain_info::output_merged_store): Likewise.
	(pass_store_merging::process_store): Likewise.
	* gimple-ssa-strength-reduction.c (find_basis_for_base_expr): Likewise.
	* graphite-isl-ast-to-gimple.c (class translate_isl_ast_to_gimple): Likewise.
	(scop_to_isl_ast): Likewise.
	* graphite-optimize-isl.c (get_schedule_for_node_st): Likewise.
	(optimize_isl): Likewise.
	* graphite-scop-detection.c (build_scops): Likewise.
	* haifa-sched.c (set_modulo_params): Likewise.
	(rank_for_schedule): Likewise.
	(model_add_to_worklist): Likewise.
	(model_promote_insn): Likewise.
	(model_choose_insn): Likewise.
	(queue_to_ready): Likewise.
	(autopref_multipass_dfa_lookahead_guard): Likewise.
	(schedule_block): Likewise.
	(sched_init): Likewise.
	* hsa-gen.c (init_prologue): Likewise.
	* ifcvt.c (bb_ok_for_noce_convert_multiple_sets): Likewise.
	(cond_move_process_if_block): Likewise.
	* ipa-cp.c (ipcp_lattice::add_value): Likewise.
	(merge_agg_lats_step): Likewise.
	(devirtualization_time_bonus): Likewise.
	(hint_time_bonus): Likewise.
	(incorporate_penalties): Likewise.
	(good_cloning_opportunity_p): Likewise.
	(ipcp_propagate_stage): Likewise.
	* ipa-fnsummary.c (decompose_param_expr): Likewise.
	(set_switch_stmt_execution_predicate): Likewise.
	(analyze_function_body): Likewise.
	(compute_fn_summary): Likewise.
	* ipa-inline-analysis.c (estimate_growth): Likewise.
	* ipa-inline.c (caller_growth_limits): Likewise.
	(inline_insns_single): Likewise.
	(inline_insns_auto): Likewise.
	(can_inline_edge_by_limits_p): Likewise.
	(want_early_inline_function_p): Likewise.
	(big_speedup_p): Likewise.
	(want_inline_small_function_p): Likewise.
	(want_inline_self_recursive_call_p): Likewise.
	(edge_badness): Likewise.
	(recursive_inlining): Likewise.
	(compute_max_insns): Likewise.
	(early_inliner): Likewise.
	* ipa-polymorphic-call.c (csftc_abort_walking_p): Likewise.
	* ipa-profile.c (ipa_profile): Likewise.
	* ipa-prop.c (determine_known_aggregate_parts): Likewise.
	(ipa_analyze_node): Likewise.
	(ipcp_transform_function): Likewise.
	* ipa-split.c (consider_split): Likewise.
	* ipa-sra.c (allocate_access): Likewise.
	(process_scan_results): Likewise.
	(ipa_sra_summarize_function): Likewise.
	(pull_accesses_from_callee): Likewise.
	* ira-build.c (loop_compare_func): Likewise.
	(mark_loops_for_removal): Likewise.
	* ira-conflicts.c (build_conflict_bit_table): Likewise.
	* loop-doloop.c (doloop_optimize): Likewise.
	* loop-invariant.c (gain_for_invariant): Likewise.
	(move_loop_invariants): Likewise.
	* loop-unroll.c (decide_unroll_constant_iterations): Likewise.
	(decide_unroll_runtime_iterations): Likewise.
	(decide_unroll_stupid): Likewise.
	(expand_var_during_unrolling): Likewise.
	* lra-assigns.c (spill_for): Likewise.
	* lra-constraints.c (EBB_PROBABILITY_CUTOFF): Likewise.
	* modulo-sched.c (sms_schedule): Likewise.
	(DFA_HISTORY): Likewise.
	* opts.c (default_options_optimization): Likewise.
	(finish_options): Likewise.
	(common_handle_option): Likewise.
	* postreload-gcse.c (eliminate_partially_redundant_load): Likewise.
	(if): Likewise.
	* predict.c (get_hot_bb_threshold): Likewise.
	(maybe_hot_count_p): Likewise.
	(probably_never_executed): Likewise.
	(predictable_edge_p): Likewise.
	(predict_loops): Likewise.
	(expr_expected_value_1): Likewise.
	(tree_predict_by_opcode): Likewise.
	(handle_missing_profiles): Likewise.
	* reload.c (find_equiv_reg): Likewise.
	* reorg.c (redundant_insn): Likewise.
	* resource.c (mark_target_live_regs): Likewise.
	(incr_ticks_for_insn): Likewise.
	* sanopt.c (pass_sanopt::execute): Likewise.
	* sched-deps.c (sched_analyze_1): Likewise.
	(sched_analyze_2): Likewise.
	(sched_analyze_insn): Likewise.
	(deps_analyze_insn): Likewise.
	* sched-ebb.c (schedule_ebbs): Likewise.
	* sched-rgn.c (find_single_block_region): Likewise.
	(too_large): Likewise.
	(haifa_find_rgns): Likewise.
	(extend_rgns): Likewise.
	(new_ready): Likewise.
	(schedule_region): Likewise.
	(sched_rgn_init): Likewise.
	* sel-sched-ir.c (make_region_from_loop): Likewise.
	* sel-sched-ir.h (MAX_WS): Likewise.
	* sel-sched.c (process_pipelined_exprs): Likewise.
	(sel_setup_region_sched_flags): Likewise.
	* shrink-wrap.c (try_shrink_wrapping): Likewise.
	* targhooks.c (default_max_noce_ifcvt_seq_cost): Likewise.
	* toplev.c (print_version): Likewise.
	(process_options): Likewise.
	* tracer.c (tail_duplicate): Likewise.
	* trans-mem.c (tm_log_add): Likewise.
	* tree-chrec.c (chrec_fold_plus_1): Likewise.
	* tree-data-ref.c (split_constant_offset): Likewise.
	(compute_all_dependences): Likewise.
	* tree-if-conv.c (MAX_PHI_ARG_NUM): Likewise.
	* tree-inline.c (remap_gimple_stmt): Likewise.
	* tree-loop-distribution.c (MAX_DATAREFS_NUM): Likewise.
	* tree-parloops.c (MIN_PER_THREAD): Likewise.
	(create_parallel_loop): Likewise.
	* tree-predcom.c (determine_unroll_factor): Likewise.
	* tree-scalar-evolution.c (instantiate_scev_r): Likewise.
	* tree-sra.c (analyze_all_variable_accesses): Likewise.
	* tree-ssa-ccp.c (fold_builtin_alloca_with_align): Likewise.
	* tree-ssa-dse.c (setup_live_bytes_from_ref): Likewise.
	(dse_optimize_redundant_stores): Likewise.
	(dse_classify_store): Likewise.
	* tree-ssa-ifcombine.c (ifcombine_ifandif): Likewise.
	* tree-ssa-loop-ch.c (ch_base::copy_headers): Likewise.
	* tree-ssa-loop-im.c (LIM_EXPENSIVE): Likewise.
	* tree-ssa-loop-ivcanon.c (try_unroll_loop_completely): Likewise.
	(try_peel_loop): Likewise.
	(tree_unroll_loops_completely): Likewise.
	* tree-ssa-loop-ivopts.c (avg_loop_niter): Likewise.
	(CONSIDER_ALL_CANDIDATES_BOUND): Likewise.
	(MAX_CONSIDERED_GROUPS): Likewise.
	(ALWAYS_PRUNE_CAND_SET_BOUND): Likewise.
	* tree-ssa-loop-manip.c (can_unroll_loop_p): Likewise.
	* tree-ssa-loop-niter.c (MAX_ITERATIONS_TO_TRACK): Likewise.
	* tree-ssa-loop-prefetch.c (PREFETCH_BLOCK): Likewise.
	(L1_CACHE_SIZE_BYTES): Likewise.
	(L2_CACHE_SIZE_BYTES): Likewise.
	(should_issue_prefetch_p): Likewise.
	(schedule_prefetches): Likewise.
	(determine_unroll_factor): Likewise.
	(volume_of_references): Likewise.
	(add_subscript_strides): Likewise.
	(self_reuse_distance): Likewise.
	(mem_ref_count_reasonable_p): Likewise.
	(insn_to_prefetch_ratio_too_small_p): Likewise.
	(loop_prefetch_arrays): Likewise.
	(tree_ssa_prefetch_arrays): Likewise.
	* tree-ssa-loop-unswitch.c (tree_unswitch_single_loop): Likewise.
	* tree-ssa-math-opts.c (gimple_expand_builtin_pow): Likewise.
	(convert_mult_to_fma): Likewise.
	(math_opts_dom_walker::after_dom_children): Likewise.
	* tree-ssa-phiopt.c (cond_if_else_store_replacement): Likewise.
	(hoist_adjacent_loads): Likewise.
	(gate_hoist_loads): Likewise.
	* tree-ssa-pre.c (translate_vuse_through_block): Likewise.
	(compute_partial_antic_aux): Likewise.
	* tree-ssa-reassoc.c (get_reassociation_width): Likewise.
	* tree-ssa-sccvn.c (vn_reference_lookup_pieces): Likewise.
	(vn_reference_lookup): Likewise.
	(do_rpo_vn): Likewise.
	* tree-ssa-scopedtables.c (avail_exprs_stack::lookup_avail_expr): Likewise.
	* tree-ssa-sink.c (select_best_block): Likewise.
	* tree-ssa-strlen.c (new_stridx): Likewise.
	(new_addr_stridx): Likewise.
	(get_range_strlen_dynamic): Likewise.
	(class ssa_name_limit_t): Likewise.
	* tree-ssa-structalias.c (push_fields_onto_fieldstack): Likewise.
	(create_variable_info_for_1): Likewise.
	(init_alias_vars): Likewise.
	* tree-ssa-tail-merge.c (find_clusters_1): Likewise.
	(tail_merge_optimize): Likewise.
	* tree-ssa-threadbackward.c (thread_jumps::profitable_jump_thread_path): Likewise.
	(thread_jumps::fsm_find_control_statement_thread_paths): Likewise.
	(thread_jumps::find_jump_threads_backwards): Likewise.
	* tree-ssa-threadedge.c (record_temporary_equivalences_from_stmts_at_dest): Likewise.
	* tree-ssa-uninit.c (compute_control_dep_chain): Likewise.
	* tree-switch-conversion.c (switch_conversion::check_range): Likewise.
	(jump_table_cluster::can_be_handled): Likewise.
	* tree-switch-conversion.h (jump_table_cluster::case_values_threshold): Likewise.
	(SWITCH_CONVERSION_BRANCH_RATIO): Likewise.
	(param_switch_conversion_branch_ratio): Likewise.
	* tree-vect-data-refs.c (vect_mark_for_runtime_alias_test): Likewise.
	(vect_enhance_data_refs_alignment): Likewise.
	(vect_prune_runtime_alias_test_list): Likewise.
	* tree-vect-loop.c (vect_analyze_loop_costing): Likewise.
	(vect_get_datarefs_in_loop): Likewise.
	(vect_analyze_loop): Likewise.
	* tree-vect-slp.c (vect_slp_bb): Likewise.
	* tree-vectorizer.h: Likewise.
	* tree-vrp.c (find_switch_asserts): Likewise.
	(vrp_prop::check_mem_ref): Likewise.
	* tree.c (wide_int_to_tree_1): Likewise.
	(cache_integer_cst): Likewise.
	* var-tracking.c (EXPR_USE_DEPTH): Likewise.
	(reverse_op): Likewise.
	(vt_find_locations): Likewise.

gcc/c/ChangeLog:

2019-11-07  Martin Liska  <mliska@suse.cz>

	* gimple-parser.c (c_parser_parse_gimple_body): Replace old parameter syntax
	with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
	macro.

gcc/cp/ChangeLog:

2019-11-07  Martin Liska  <mliska@suse.cz>

	* name-lookup.c (namespace_hints::namespace_hints): Replace old parameter syntax
	with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
	macro.
	* typeck.c (comptypes): Likewise.

gcc/lto/ChangeLog:

2019-11-07  Martin Liska  <mliska@suse.cz>

	* lto-partition.c (lto_balanced_map): Replace old parameter syntax
	with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
	macro.
	* lto.c (do_whole_program_analysis): Likewise.
---
 gcc/asan.c                                    | 18 ++--
 gcc/auto-profile.c                            |  2 +-
 gcc/bb-reorder.c                              |  4 +-
 gcc/builtins.c                                |  2 +-
 gcc/c/gimple-parser.c                         |  2 +-
 gcc/cfgcleanup.c                              |  4 +-
 gcc/cfgexpand.c                               |  8 +-
 gcc/cfgloopanal.c                             |  8 +-
 gcc/cgraph.c                                  |  2 +-
 gcc/combine.c                                 |  4 +-
 gcc/common/config/aarch64/aarch64-common.c    | 15 ++--
 gcc/common/config/ia64/ia64-common.c          |  8 +-
 .../config/powerpcspe/powerpcspe-common.c     |  2 +-
 gcc/common/config/rs6000/rs6000-common.c      |  2 +-
 gcc/common/config/sh/sh-common.c              |  2 +-
 gcc/config/aarch64/aarch64.c                  | 79 ++++++++----------
 gcc/config/alpha/alpha.c                      | 16 ++--
 gcc/config/arm/arm.c                          | 43 ++++------
 gcc/config/i386/i386-options.c                | 32 +++----
 gcc/config/i386/i386.c                        | 26 +++---
 gcc/config/ia64/ia64.c                        |  2 +-
 gcc/config/rs6000/rs6000-logue.c              |  4 +-
 gcc/config/rs6000/rs6000.c                    | 55 ++++++------
 gcc/config/s390/s390.c                        | 79 ++++++------------
 gcc/config/sparc/sparc.c                      | 83 +++++++++----------
 gcc/config/visium/visium.c                    |  6 +-
 gcc/coverage.c                                |  8 +-
 gcc/cp/name-lookup.c                          |  2 +-
 gcc/cp/typeck.c                               |  4 +-
 gcc/cse.c                                     |  6 +-
 gcc/cselib.c                                  |  2 +-
 gcc/dse.c                                     |  2 +-
 gcc/emit-rtl.c                                | 18 ++--
 gcc/explow.c                                  |  2 +-
 gcc/final.c                                   |  4 +-
 gcc/fold-const.c                              | 12 +--
 gcc/gcse.c                                    | 16 ++--
 gcc/ggc-common.c                              |  4 +-
 gcc/ggc-page.c                                |  4 +-
 gcc/gimple-loop-interchange.cc                |  6 +-
 gcc/gimple-loop-jam.c                         |  8 +-
 gcc/gimple-loop-versioning.cc                 |  4 +-
 gcc/gimple-ssa-split-paths.c                  |  2 +-
 gcc/gimple-ssa-store-merging.c                |  8 +-
 gcc/gimple-ssa-strength-reduction.c           |  2 +-
 gcc/graphite-isl-ast-to-gimple.c              |  4 +-
 gcc/graphite-optimize-isl.c                   |  4 +-
 gcc/graphite-scop-detection.c                 |  4 +-
 gcc/haifa-sched.c                             | 38 ++++-----
 gcc/hsa-gen.c                                 |  2 +-
 gcc/ifcvt.c                                   |  4 +-
 gcc/ipa-cp.c                                  | 30 +++----
 gcc/ipa-fnsummary.c                           | 20 ++---
 gcc/ipa-inline-analysis.c                     |  2 +-
 gcc/ipa-inline.c                              | 77 +++++++++--------
 gcc/ipa-polymorphic-call.c                    |  2 +-
 gcc/ipa-profile.c                             |  2 +-
 gcc/ipa-prop.c                                |  6 +-
 gcc/ipa-split.c                               | 10 +--
 gcc/ipa-sra.c                                 |  9 +-
 gcc/ira-build.c                               |  4 +-
 gcc/ira-conflicts.c                           |  4 +-
 gcc/loop-doloop.c                             |  2 +-
 gcc/loop-invariant.c                          |  4 +-
 gcc/loop-unroll.c                             | 26 +++---
 gcc/lra-assigns.c                             |  2 +-
 gcc/lra-constraints.c                         |  2 +-
 gcc/lto/lto-partition.c                       | 10 +--
 gcc/lto/lto.c                                 | 10 +--
 gcc/modulo-sched.c                            |  6 +-
 gcc/opts.c                                    | 48 ++++-------
 gcc/postreload-gcse.c                         |  6 +-
 gcc/predict.c                                 | 20 ++---
 gcc/reload.c                                  |  2 +-
 gcc/reorg.c                                   |  4 +-
 gcc/resource.c                                |  4 +-
 gcc/sanopt.c                                  |  4 +-
 gcc/sched-deps.c                              | 10 +--
 gcc/sched-ebb.c                               |  4 +-
 gcc/sched-rgn.c                               | 18 ++--
 gcc/sel-sched-ir.c                            |  4 +-
 gcc/sel-sched-ir.h                            |  2 +-
 gcc/sel-sched.c                               |  4 +-
 gcc/shrink-wrap.c                             |  2 +-
 gcc/targhooks.c                               | 21 ++---
 gcc/toplev.c                                  |  4 +-
 gcc/tracer.c                                  | 12 +--
 gcc/trans-mem.c                               |  2 +-
 gcc/tree-chrec.c                              |  4 +-
 gcc/tree-data-ref.c                           |  4 +-
 gcc/tree-if-conv.c                            |  2 +-
 gcc/tree-inline.c                             |  2 +-
 gcc/tree-loop-distribution.c                  |  2 +-
 gcc/tree-parloops.c                           | 18 ++--
 gcc/tree-predcom.c                            |  2 +-
 gcc/tree-scalar-evolution.c                   |  4 +-
 gcc/tree-sra.c                                | 19 +++--
 gcc/tree-ssa-ccp.c                            |  2 +-
 gcc/tree-ssa-dse.c                            |  8 +-
 gcc/tree-ssa-ifcombine.c                      |  4 +-
 gcc/tree-ssa-loop-ch.c                        |  2 +-
 gcc/tree-ssa-loop-im.c                        |  2 +-
 gcc/tree-ssa-loop-ivcanon.c                   | 18 ++--
 gcc/tree-ssa-loop-ivopts.c                    | 10 +--
 gcc/tree-ssa-loop-manip.c                     |  2 +-
 gcc/tree-ssa-loop-niter.c                     |  2 +-
 gcc/tree-ssa-loop-prefetch.c                  | 46 +++++-----
 gcc/tree-ssa-loop-unswitch.c                  |  6 +-
 gcc/tree-ssa-math-opts.c                      |  6 +-
 gcc/tree-ssa-phiopt.c                         |  8 +-
 gcc/tree-ssa-pre.c                            |  4 +-
 gcc/tree-ssa-reassoc.c                        |  2 +-
 gcc/tree-ssa-sccvn.c                          |  6 +-
 gcc/tree-ssa-scopedtables.c                   |  2 +-
 gcc/tree-ssa-sink.c                           |  2 +-
 gcc/tree-ssa-strlen.c                         |  8 +-
 gcc/tree-ssa-structalias.c                    |  8 +-
 gcc/tree-ssa-tail-merge.c                     |  4 +-
 gcc/tree-ssa-threadbackward.c                 | 16 ++--
 gcc/tree-ssa-threadedge.c                     |  4 +-
 gcc/tree-ssa-uninit.c                         |  2 +-
 gcc/tree-switch-conversion.c                  |  6 +-
 gcc/tree-switch-conversion.h                  |  4 +-
 gcc/tree-vect-data-refs.c                     | 11 ++-
 gcc/tree-vect-loop.c                          |  6 +-
 gcc/tree-vect-slp.c                           |  2 +-
 gcc/tree-vectorizer.h                         |  2 +-
 gcc/tree-vrp.c                                |  4 +-
 gcc/tree.c                                    | 22 ++---
 gcc/var-tracking.c                            |  6 +-
 130 files changed, 640 insertions(+), 731 deletions(-)

Comments

Richard Biener Nov. 11, 2019, 2:36 p.m. UTC | #1
On Thu, Nov 7, 2019 at 1:35 PM Martin Liska <mliska@suse.cz> wrote:
>
>
> gcc/ChangeLog:

I think I've noticed at least one long line (please double-check):

-             if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT)
INTEGER_SHARE_LIMIT)
+             if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT)
param_integer_share_limit)

as a followup I notice that the targets *_default_params () functions should
be merged into the default-options structs they have (did you check the
defaults are still applied and user-overridable?)

> 2019-11-07  Martin Liska  <mliska@suse.cz>
>
>         * asan.c (asan_sanitize_stack_p): Replace old parameter syntax
>         with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
>         macro.
>         (asan_sanitize_allocas_p): Likewise.
>         (asan_emit_stack_protection): Likewise.
>         (asan_protect_global): Likewise.
>         (instrument_derefs): Likewise.
>         (instrument_builtin_call): Likewise.
>         (asan_expand_mark_ifn): Likewise.
>         * auto-profile.c (auto_profile): Likewise.
>         * bb-reorder.c (copy_bb_p): Likewise.
>         (duplicate_computed_gotos): Likewise.
>         * builtins.c (inline_expand_builtin_string_cmp): Likewise.
>         * cfgcleanup.c (try_crossjump_to_edge): Likewise.
>         (try_crossjump_bb): Likewise.
>         * cfgexpand.c (defer_stack_allocation): Likewise.
>         (stack_protect_classify_type): Likewise.
>         (pass_expand::execute): Likewise.
>         * cfgloopanal.c (expected_loop_iterations_unbounded): Likewise.
>         (estimate_reg_pressure_cost): Likewise.
>         * cgraph.c (cgraph_edge::maybe_hot_p): Likewise.
>         * combine.c (combine_instructions): Likewise.
>         (record_value_for_reg): Likewise.
>         * common/config/aarch64/aarch64-common.c (aarch64_option_validate_param): Likewise.
>         (aarch64_option_default_params): Likewise.
>         * common/config/ia64/ia64-common.c (ia64_option_default_params): Likewise.
>         * common/config/powerpcspe/powerpcspe-common.c (rs6000_option_default_params): Likewise.
>         * common/config/rs6000/rs6000-common.c (rs6000_option_default_params): Likewise.
>         * common/config/sh/sh-common.c (sh_option_default_params): Likewise.
>         * config/aarch64/aarch64.c (aarch64_output_probe_stack_range): Likewise.
>         (aarch64_allocate_and_probe_stack_space): Likewise.
>         (aarch64_expand_epilogue): Likewise.
>         (aarch64_override_options_internal): Likewise.
>         * config/alpha/alpha.c (alpha_option_override): Likewise.
>         * config/arm/arm.c (arm_option_override): Likewise.
>         (arm_valid_target_attribute_p): Likewise.
>         * config/i386/i386-options.c (ix86_option_override_internal): Likewise.
>         * config/i386/i386.c (get_probe_interval): Likewise.
>         (ix86_adjust_stack_and_probe_stack_clash): Likewise.
>         (ix86_max_noce_ifcvt_seq_cost): Likewise.
>         * config/ia64/ia64.c (ia64_adjust_cost): Likewise.
>         * config/rs6000/rs6000-logue.c (get_stack_clash_protection_probe_interval): Likewise.
>         (get_stack_clash_protection_guard_size): Likewise.
>         * config/rs6000/rs6000.c (rs6000_option_override_internal): Likewise.
>         * config/s390/s390.c (allocate_stack_space): Likewise.
>         (s390_emit_prologue): Likewise.
>         (s390_option_override_internal): Likewise.
>         * config/sparc/sparc.c (sparc_option_override): Likewise.
>         * config/visium/visium.c (visium_option_override): Likewise.
>         * coverage.c (get_coverage_counts): Likewise.
>         (coverage_compute_profile_id): Likewise.
>         (coverage_begin_function): Likewise.
>         (coverage_end_function): Likewise.
>         * cse.c (cse_find_path): Likewise.
>         (cse_extended_basic_block): Likewise.
>         (cse_main): Likewise.
>         * cselib.c (cselib_invalidate_mem): Likewise.
>         * dse.c (dse_step1): Likewise.
>         * emit-rtl.c (set_new_first_and_last_insn): Likewise.
>         (get_max_insn_count): Likewise.
>         (make_debug_insn_raw): Likewise.
>         (init_emit): Likewise.
>         * explow.c (compute_stack_clash_protection_loop_data): Likewise.
>         * final.c (compute_alignments): Likewise.
>         * fold-const.c (fold_range_test): Likewise.
>         (fold_truth_andor): Likewise.
>         (tree_single_nonnegative_warnv_p): Likewise.
>         (integer_valued_real_single_p): Likewise.
>         * gcse.c (want_to_gcse_p): Likewise.
>         (prune_insertions_deletions): Likewise.
>         (hoist_code): Likewise.
>         (gcse_or_cprop_is_too_expensive): Likewise.
>         * ggc-common.c: Likewise.
>         * ggc-page.c (ggc_collect): Likewise.
>         * gimple-loop-interchange.cc (MAX_NUM_STMT): Likewise.
>         (MAX_DATAREFS): Likewise.
>         (OUTER_STRIDE_RATIO): Likewise.
>         * gimple-loop-jam.c (tree_loop_unroll_and_jam): Likewise.
>         * gimple-loop-versioning.cc (loop_versioning::max_insns_for_loop): Likewise.
>         * gimple-ssa-split-paths.c (is_feasible_trace): Likewise.
>         * gimple-ssa-store-merging.c (imm_store_chain_info::try_coalesce_bswap): Likewise.
>         (imm_store_chain_info::coalesce_immediate_stores): Likewise.
>         (imm_store_chain_info::output_merged_store): Likewise.
>         (pass_store_merging::process_store): Likewise.
>         * gimple-ssa-strength-reduction.c (find_basis_for_base_expr): Likewise.
>         * graphite-isl-ast-to-gimple.c (class translate_isl_ast_to_gimple): Likewise.
>         (scop_to_isl_ast): Likewise.
>         * graphite-optimize-isl.c (get_schedule_for_node_st): Likewise.
>         (optimize_isl): Likewise.
>         * graphite-scop-detection.c (build_scops): Likewise.
>         * haifa-sched.c (set_modulo_params): Likewise.
>         (rank_for_schedule): Likewise.
>         (model_add_to_worklist): Likewise.
>         (model_promote_insn): Likewise.
>         (model_choose_insn): Likewise.
>         (queue_to_ready): Likewise.
>         (autopref_multipass_dfa_lookahead_guard): Likewise.
>         (schedule_block): Likewise.
>         (sched_init): Likewise.
>         * hsa-gen.c (init_prologue): Likewise.
>         * ifcvt.c (bb_ok_for_noce_convert_multiple_sets): Likewise.
>         (cond_move_process_if_block): Likewise.
>         * ipa-cp.c (ipcp_lattice::add_value): Likewise.
>         (merge_agg_lats_step): Likewise.
>         (devirtualization_time_bonus): Likewise.
>         (hint_time_bonus): Likewise.
>         (incorporate_penalties): Likewise.
>         (good_cloning_opportunity_p): Likewise.
>         (ipcp_propagate_stage): Likewise.
>         * ipa-fnsummary.c (decompose_param_expr): Likewise.
>         (set_switch_stmt_execution_predicate): Likewise.
>         (analyze_function_body): Likewise.
>         (compute_fn_summary): Likewise.
>         * ipa-inline-analysis.c (estimate_growth): Likewise.
>         * ipa-inline.c (caller_growth_limits): Likewise.
>         (inline_insns_single): Likewise.
>         (inline_insns_auto): Likewise.
>         (can_inline_edge_by_limits_p): Likewise.
>         (want_early_inline_function_p): Likewise.
>         (big_speedup_p): Likewise.
>         (want_inline_small_function_p): Likewise.
>         (want_inline_self_recursive_call_p): Likewise.
>         (edge_badness): Likewise.
>         (recursive_inlining): Likewise.
>         (compute_max_insns): Likewise.
>         (early_inliner): Likewise.
>         * ipa-polymorphic-call.c (csftc_abort_walking_p): Likewise.
>         * ipa-profile.c (ipa_profile): Likewise.
>         * ipa-prop.c (determine_known_aggregate_parts): Likewise.
>         (ipa_analyze_node): Likewise.
>         (ipcp_transform_function): Likewise.
>         * ipa-split.c (consider_split): Likewise.
>         * ipa-sra.c (allocate_access): Likewise.
>         (process_scan_results): Likewise.
>         (ipa_sra_summarize_function): Likewise.
>         (pull_accesses_from_callee): Likewise.
>         * ira-build.c (loop_compare_func): Likewise.
>         (mark_loops_for_removal): Likewise.
>         * ira-conflicts.c (build_conflict_bit_table): Likewise.
>         * loop-doloop.c (doloop_optimize): Likewise.
>         * loop-invariant.c (gain_for_invariant): Likewise.
>         (move_loop_invariants): Likewise.
>         * loop-unroll.c (decide_unroll_constant_iterations): Likewise.
>         (decide_unroll_runtime_iterations): Likewise.
>         (decide_unroll_stupid): Likewise.
>         (expand_var_during_unrolling): Likewise.
>         * lra-assigns.c (spill_for): Likewise.
>         * lra-constraints.c (EBB_PROBABILITY_CUTOFF): Likewise.
>         * modulo-sched.c (sms_schedule): Likewise.
>         (DFA_HISTORY): Likewise.
>         * opts.c (default_options_optimization): Likewise.
>         (finish_options): Likewise.
>         (common_handle_option): Likewise.
>         * postreload-gcse.c (eliminate_partially_redundant_load): Likewise.
>         (if): Likewise.
>         * predict.c (get_hot_bb_threshold): Likewise.
>         (maybe_hot_count_p): Likewise.
>         (probably_never_executed): Likewise.
>         (predictable_edge_p): Likewise.
>         (predict_loops): Likewise.
>         (expr_expected_value_1): Likewise.
>         (tree_predict_by_opcode): Likewise.
>         (handle_missing_profiles): Likewise.
>         * reload.c (find_equiv_reg): Likewise.
>         * reorg.c (redundant_insn): Likewise.
>         * resource.c (mark_target_live_regs): Likewise.
>         (incr_ticks_for_insn): Likewise.
>         * sanopt.c (pass_sanopt::execute): Likewise.
>         * sched-deps.c (sched_analyze_1): Likewise.
>         (sched_analyze_2): Likewise.
>         (sched_analyze_insn): Likewise.
>         (deps_analyze_insn): Likewise.
>         * sched-ebb.c (schedule_ebbs): Likewise.
>         * sched-rgn.c (find_single_block_region): Likewise.
>         (too_large): Likewise.
>         (haifa_find_rgns): Likewise.
>         (extend_rgns): Likewise.
>         (new_ready): Likewise.
>         (schedule_region): Likewise.
>         (sched_rgn_init): Likewise.
>         * sel-sched-ir.c (make_region_from_loop): Likewise.
>         * sel-sched-ir.h (MAX_WS): Likewise.
>         * sel-sched.c (process_pipelined_exprs): Likewise.
>         (sel_setup_region_sched_flags): Likewise.
>         * shrink-wrap.c (try_shrink_wrapping): Likewise.
>         * targhooks.c (default_max_noce_ifcvt_seq_cost): Likewise.
>         * toplev.c (print_version): Likewise.
>         (process_options): Likewise.
>         * tracer.c (tail_duplicate): Likewise.
>         * trans-mem.c (tm_log_add): Likewise.
>         * tree-chrec.c (chrec_fold_plus_1): Likewise.
>         * tree-data-ref.c (split_constant_offset): Likewise.
>         (compute_all_dependences): Likewise.
>         * tree-if-conv.c (MAX_PHI_ARG_NUM): Likewise.
>         * tree-inline.c (remap_gimple_stmt): Likewise.
>         * tree-loop-distribution.c (MAX_DATAREFS_NUM): Likewise.
>         * tree-parloops.c (MIN_PER_THREAD): Likewise.
>         (create_parallel_loop): Likewise.
>         * tree-predcom.c (determine_unroll_factor): Likewise.
>         * tree-scalar-evolution.c (instantiate_scev_r): Likewise.
>         * tree-sra.c (analyze_all_variable_accesses): Likewise.
>         * tree-ssa-ccp.c (fold_builtin_alloca_with_align): Likewise.
>         * tree-ssa-dse.c (setup_live_bytes_from_ref): Likewise.
>         (dse_optimize_redundant_stores): Likewise.
>         (dse_classify_store): Likewise.
>         * tree-ssa-ifcombine.c (ifcombine_ifandif): Likewise.
>         * tree-ssa-loop-ch.c (ch_base::copy_headers): Likewise.
>         * tree-ssa-loop-im.c (LIM_EXPENSIVE): Likewise.
>         * tree-ssa-loop-ivcanon.c (try_unroll_loop_completely): Likewise.
>         (try_peel_loop): Likewise.
>         (tree_unroll_loops_completely): Likewise.
>         * tree-ssa-loop-ivopts.c (avg_loop_niter): Likewise.
>         (CONSIDER_ALL_CANDIDATES_BOUND): Likewise.
>         (MAX_CONSIDERED_GROUPS): Likewise.
>         (ALWAYS_PRUNE_CAND_SET_BOUND): Likewise.
>         * tree-ssa-loop-manip.c (can_unroll_loop_p): Likewise.
>         * tree-ssa-loop-niter.c (MAX_ITERATIONS_TO_TRACK): Likewise.
>         * tree-ssa-loop-prefetch.c (PREFETCH_BLOCK): Likewise.
>         (L1_CACHE_SIZE_BYTES): Likewise.
>         (L2_CACHE_SIZE_BYTES): Likewise.
>         (should_issue_prefetch_p): Likewise.
>         (schedule_prefetches): Likewise.
>         (determine_unroll_factor): Likewise.
>         (volume_of_references): Likewise.
>         (add_subscript_strides): Likewise.
>         (self_reuse_distance): Likewise.
>         (mem_ref_count_reasonable_p): Likewise.
>         (insn_to_prefetch_ratio_too_small_p): Likewise.
>         (loop_prefetch_arrays): Likewise.
>         (tree_ssa_prefetch_arrays): Likewise.
>         * tree-ssa-loop-unswitch.c (tree_unswitch_single_loop): Likewise.
>         * tree-ssa-math-opts.c (gimple_expand_builtin_pow): Likewise.
>         (convert_mult_to_fma): Likewise.
>         (math_opts_dom_walker::after_dom_children): Likewise.
>         * tree-ssa-phiopt.c (cond_if_else_store_replacement): Likewise.
>         (hoist_adjacent_loads): Likewise.
>         (gate_hoist_loads): Likewise.
>         * tree-ssa-pre.c (translate_vuse_through_block): Likewise.
>         (compute_partial_antic_aux): Likewise.
>         * tree-ssa-reassoc.c (get_reassociation_width): Likewise.
>         * tree-ssa-sccvn.c (vn_reference_lookup_pieces): Likewise.
>         (vn_reference_lookup): Likewise.
>         (do_rpo_vn): Likewise.
>         * tree-ssa-scopedtables.c (avail_exprs_stack::lookup_avail_expr): Likewise.
>         * tree-ssa-sink.c (select_best_block): Likewise.
>         * tree-ssa-strlen.c (new_stridx): Likewise.
>         (new_addr_stridx): Likewise.
>         (get_range_strlen_dynamic): Likewise.
>         (class ssa_name_limit_t): Likewise.
>         * tree-ssa-structalias.c (push_fields_onto_fieldstack): Likewise.
>         (create_variable_info_for_1): Likewise.
>         (init_alias_vars): Likewise.
>         * tree-ssa-tail-merge.c (find_clusters_1): Likewise.
>         (tail_merge_optimize): Likewise.
>         * tree-ssa-threadbackward.c (thread_jumps::profitable_jump_thread_path): Likewise.
>         (thread_jumps::fsm_find_control_statement_thread_paths): Likewise.
>         (thread_jumps::find_jump_threads_backwards): Likewise.
>         * tree-ssa-threadedge.c (record_temporary_equivalences_from_stmts_at_dest): Likewise.
>         * tree-ssa-uninit.c (compute_control_dep_chain): Likewise.
>         * tree-switch-conversion.c (switch_conversion::check_range): Likewise.
>         (jump_table_cluster::can_be_handled): Likewise.
>         * tree-switch-conversion.h (jump_table_cluster::case_values_threshold): Likewise.
>         (SWITCH_CONVERSION_BRANCH_RATIO): Likewise.
>         (param_switch_conversion_branch_ratio): Likewise.
>         * tree-vect-data-refs.c (vect_mark_for_runtime_alias_test): Likewise.
>         (vect_enhance_data_refs_alignment): Likewise.
>         (vect_prune_runtime_alias_test_list): Likewise.
>         * tree-vect-loop.c (vect_analyze_loop_costing): Likewise.
>         (vect_get_datarefs_in_loop): Likewise.
>         (vect_analyze_loop): Likewise.
>         * tree-vect-slp.c (vect_slp_bb): Likewise.
>         * tree-vectorizer.h: Likewise.
>         * tree-vrp.c (find_switch_asserts): Likewise.
>         (vrp_prop::check_mem_ref): Likewise.
>         * tree.c (wide_int_to_tree_1): Likewise.
>         (cache_integer_cst): Likewise.
>         * var-tracking.c (EXPR_USE_DEPTH): Likewise.
>         (reverse_op): Likewise.
>         (vt_find_locations): Likewise.
>
> gcc/c/ChangeLog:
>
> 2019-11-07  Martin Liska  <mliska@suse.cz>
>
>         * gimple-parser.c (c_parser_parse_gimple_body): Replace old parameter syntax
>         with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
>         macro.
>
> gcc/cp/ChangeLog:
>
> 2019-11-07  Martin Liska  <mliska@suse.cz>
>
>         * name-lookup.c (namespace_hints::namespace_hints): Replace old parameter syntax
>         with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
>         macro.
>         * typeck.c (comptypes): Likewise.
>
> gcc/lto/ChangeLog:
>
> 2019-11-07  Martin Liska  <mliska@suse.cz>
>
>         * lto-partition.c (lto_balanced_map): Replace old parameter syntax
>         with the new one, include opts.h if needed.  Use SET_OPTION_IF_UNSET
>         macro.
>         * lto.c (do_whole_program_analysis): Likewise.
> ---
>  gcc/asan.c                                    | 18 ++--
>  gcc/auto-profile.c                            |  2 +-
>  gcc/bb-reorder.c                              |  4 +-
>  gcc/builtins.c                                |  2 +-
>  gcc/c/gimple-parser.c                         |  2 +-
>  gcc/cfgcleanup.c                              |  4 +-
>  gcc/cfgexpand.c                               |  8 +-
>  gcc/cfgloopanal.c                             |  8 +-
>  gcc/cgraph.c                                  |  2 +-
>  gcc/combine.c                                 |  4 +-
>  gcc/common/config/aarch64/aarch64-common.c    | 15 ++--
>  gcc/common/config/ia64/ia64-common.c          |  8 +-
>  .../config/powerpcspe/powerpcspe-common.c     |  2 +-
>  gcc/common/config/rs6000/rs6000-common.c      |  2 +-
>  gcc/common/config/sh/sh-common.c              |  2 +-
>  gcc/config/aarch64/aarch64.c                  | 79 ++++++++----------
>  gcc/config/alpha/alpha.c                      | 16 ++--
>  gcc/config/arm/arm.c                          | 43 ++++------
>  gcc/config/i386/i386-options.c                | 32 +++----
>  gcc/config/i386/i386.c                        | 26 +++---
>  gcc/config/ia64/ia64.c                        |  2 +-
>  gcc/config/rs6000/rs6000-logue.c              |  4 +-
>  gcc/config/rs6000/rs6000.c                    | 55 ++++++------
>  gcc/config/s390/s390.c                        | 79 ++++++------------
>  gcc/config/sparc/sparc.c                      | 83 +++++++++----------
>  gcc/config/visium/visium.c                    |  6 +-
>  gcc/coverage.c                                |  8 +-
>  gcc/cp/name-lookup.c                          |  2 +-
>  gcc/cp/typeck.c                               |  4 +-
>  gcc/cse.c                                     |  6 +-
>  gcc/cselib.c                                  |  2 +-
>  gcc/dse.c                                     |  2 +-
>  gcc/emit-rtl.c                                | 18 ++--
>  gcc/explow.c                                  |  2 +-
>  gcc/final.c                                   |  4 +-
>  gcc/fold-const.c                              | 12 +--
>  gcc/gcse.c                                    | 16 ++--
>  gcc/ggc-common.c                              |  4 +-
>  gcc/ggc-page.c                                |  4 +-
>  gcc/gimple-loop-interchange.cc                |  6 +-
>  gcc/gimple-loop-jam.c                         |  8 +-
>  gcc/gimple-loop-versioning.cc                 |  4 +-
>  gcc/gimple-ssa-split-paths.c                  |  2 +-
>  gcc/gimple-ssa-store-merging.c                |  8 +-
>  gcc/gimple-ssa-strength-reduction.c           |  2 +-
>  gcc/graphite-isl-ast-to-gimple.c              |  4 +-
>  gcc/graphite-optimize-isl.c                   |  4 +-
>  gcc/graphite-scop-detection.c                 |  4 +-
>  gcc/haifa-sched.c                             | 38 ++++-----
>  gcc/hsa-gen.c                                 |  2 +-
>  gcc/ifcvt.c                                   |  4 +-
>  gcc/ipa-cp.c                                  | 30 +++----
>  gcc/ipa-fnsummary.c                           | 20 ++---
>  gcc/ipa-inline-analysis.c                     |  2 +-
>  gcc/ipa-inline.c                              | 77 +++++++++--------
>  gcc/ipa-polymorphic-call.c                    |  2 +-
>  gcc/ipa-profile.c                             |  2 +-
>  gcc/ipa-prop.c                                |  6 +-
>  gcc/ipa-split.c                               | 10 +--
>  gcc/ipa-sra.c                                 |  9 +-
>  gcc/ira-build.c                               |  4 +-
>  gcc/ira-conflicts.c                           |  4 +-
>  gcc/loop-doloop.c                             |  2 +-
>  gcc/loop-invariant.c                          |  4 +-
>  gcc/loop-unroll.c                             | 26 +++---
>  gcc/lra-assigns.c                             |  2 +-
>  gcc/lra-constraints.c                         |  2 +-
>  gcc/lto/lto-partition.c                       | 10 +--
>  gcc/lto/lto.c                                 | 10 +--
>  gcc/modulo-sched.c                            |  6 +-
>  gcc/opts.c                                    | 48 ++++-------
>  gcc/postreload-gcse.c                         |  6 +-
>  gcc/predict.c                                 | 20 ++---
>  gcc/reload.c                                  |  2 +-
>  gcc/reorg.c                                   |  4 +-
>  gcc/resource.c                                |  4 +-
>  gcc/sanopt.c                                  |  4 +-
>  gcc/sched-deps.c                              | 10 +--
>  gcc/sched-ebb.c                               |  4 +-
>  gcc/sched-rgn.c                               | 18 ++--
>  gcc/sel-sched-ir.c                            |  4 +-
>  gcc/sel-sched-ir.h                            |  2 +-
>  gcc/sel-sched.c                               |  4 +-
>  gcc/shrink-wrap.c                             |  2 +-
>  gcc/targhooks.c                               | 21 ++---
>  gcc/toplev.c                                  |  4 +-
>  gcc/tracer.c                                  | 12 +--
>  gcc/trans-mem.c                               |  2 +-
>  gcc/tree-chrec.c                              |  4 +-
>  gcc/tree-data-ref.c                           |  4 +-
>  gcc/tree-if-conv.c                            |  2 +-
>  gcc/tree-inline.c                             |  2 +-
>  gcc/tree-loop-distribution.c                  |  2 +-
>  gcc/tree-parloops.c                           | 18 ++--
>  gcc/tree-predcom.c                            |  2 +-
>  gcc/tree-scalar-evolution.c                   |  4 +-
>  gcc/tree-sra.c                                | 19 +++--
>  gcc/tree-ssa-ccp.c                            |  2 +-
>  gcc/tree-ssa-dse.c                            |  8 +-
>  gcc/tree-ssa-ifcombine.c                      |  4 +-
>  gcc/tree-ssa-loop-ch.c                        |  2 +-
>  gcc/tree-ssa-loop-im.c                        |  2 +-
>  gcc/tree-ssa-loop-ivcanon.c                   | 18 ++--
>  gcc/tree-ssa-loop-ivopts.c                    | 10 +--
>  gcc/tree-ssa-loop-manip.c                     |  2 +-
>  gcc/tree-ssa-loop-niter.c                     |  2 +-
>  gcc/tree-ssa-loop-prefetch.c                  | 46 +++++-----
>  gcc/tree-ssa-loop-unswitch.c                  |  6 +-
>  gcc/tree-ssa-math-opts.c                      |  6 +-
>  gcc/tree-ssa-phiopt.c                         |  8 +-
>  gcc/tree-ssa-pre.c                            |  4 +-
>  gcc/tree-ssa-reassoc.c                        |  2 +-
>  gcc/tree-ssa-sccvn.c                          |  6 +-
>  gcc/tree-ssa-scopedtables.c                   |  2 +-
>  gcc/tree-ssa-sink.c                           |  2 +-
>  gcc/tree-ssa-strlen.c                         |  8 +-
>  gcc/tree-ssa-structalias.c                    |  8 +-
>  gcc/tree-ssa-tail-merge.c                     |  4 +-
>  gcc/tree-ssa-threadbackward.c                 | 16 ++--
>  gcc/tree-ssa-threadedge.c                     |  4 +-
>  gcc/tree-ssa-uninit.c                         |  2 +-
>  gcc/tree-switch-conversion.c                  |  6 +-
>  gcc/tree-switch-conversion.h                  |  4 +-
>  gcc/tree-vect-data-refs.c                     | 11 ++-
>  gcc/tree-vect-loop.c                          |  6 +-
>  gcc/tree-vect-slp.c                           |  2 +-
>  gcc/tree-vectorizer.h                         |  2 +-
>  gcc/tree-vrp.c                                |  4 +-
>  gcc/tree.c                                    | 22 ++---
>  gcc/var-tracking.c                            |  6 +-
>  130 files changed, 640 insertions(+), 731 deletions(-)
>
Martin Liška Nov. 12, 2019, 9:19 a.m. UTC | #2
On 11/11/19 3:36 PM, Richard Biener wrote:
> I think I've noticed at least one long line (please double-check):
> 
> -             if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT)
> INTEGER_SHARE_LIMIT)
> +             if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT)
> param_integer_share_limit)

You are right, they were quite some GNU coding style violations.

I'm sending updated version of the patch where I fixed that.

Martin

> 
> as a followup I notice that the targets *_default_params () functions should
> be merged into the default-options structs they have (did you check the
> defaults are still applied and user-overridable?)
Richard Biener Nov. 12, 2019, 9:44 a.m. UTC | #3
On Tue, Nov 12, 2019 at 10:19 AM Martin Liška <mliska@suse.cz> wrote:
>
> On 11/11/19 3:36 PM, Richard Biener wrote:
> > I think I've noticed at least one long line (please double-check):
> >
> > -             if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT)
> > INTEGER_SHARE_LIMIT)
> > +             if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT)
> > param_integer_share_limit)
>
> You are right, they were quite some GNU coding style violations.
>
> I'm sending updated version of the patch where I fixed that.

OK.

Thanks,
Richard.

> Martin
>
> >
> > as a followup I notice that the targets *_default_params () functions should
> > be merged into the default-options structs they have (did you check the
> > defaults are still applied and user-overridable?)
>
diff mbox series

Patch

diff --git a/gcc/asan.c b/gcc/asan.c
index a731bd490b4..406d829125d 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -309,13 +309,13 @@  asan_mark_p (gimple *stmt, enum asan_mark_flags flag)
 bool
 asan_sanitize_stack_p (void)
 {
-  return (sanitize_flags_p (SANITIZE_ADDRESS) && ASAN_STACK);
+  return (sanitize_flags_p (SANITIZE_ADDRESS) && param_asan_stack);
 }
 
 bool
 asan_sanitize_allocas_p (void)
 {
-  return (asan_sanitize_stack_p () && ASAN_PROTECT_ALLOCAS);
+  return (asan_sanitize_stack_p () && param_asan_protect_allocas);
 }
 
 /* Checks whether section SEC should be sanitized.  */
@@ -1429,7 +1429,7 @@  asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
 
   /* Emit the prologue sequence.  */
   if (asan_frame_size > 32 && asan_frame_size <= 65536 && pbase
-      && ASAN_USE_AFTER_RETURN)
+      && param_asan_use_after_return)
     {
       use_after_return_class = floor_log2 (asan_frame_size - 1) - 5;
       /* __asan_stack_malloc_N guarantees alignment
@@ -1750,7 +1750,7 @@  is_odr_indicator (tree decl)
 bool
 asan_protect_global (tree decl, bool ignore_decl_rtl_set_p)
 {
-  if (!ASAN_GLOBALS)
+  if (!param_asan_globals)
     return false;
 
   rtx rtl, symbol;
@@ -2190,9 +2190,9 @@  static void
 instrument_derefs (gimple_stmt_iterator *iter, tree t,
 		   location_t location, bool is_store)
 {
-  if (is_store && !ASAN_INSTRUMENT_WRITES)
+  if (is_store && !param_asan_instrument_writes)
     return;
-  if (!is_store && !ASAN_INSTRUMENT_READS)
+  if (!is_store && !param_asan_instrument_reads)
     return;
 
   tree type, base;
@@ -2253,7 +2253,7 @@  instrument_derefs (gimple_stmt_iterator *iter, tree t,
     {
       if (DECL_THREAD_LOCAL_P (inner))
 	return;
-      if (!ASAN_GLOBALS && is_global_var (inner))
+      if (!param_asan_globals && is_global_var (inner))
         return;
       if (!TREE_STATIC (inner))
 	{
@@ -2346,7 +2346,7 @@  instrument_mem_region_access (tree base, tree len,
 static bool
 instrument_builtin_call (gimple_stmt_iterator *iter)
 {
-  if (!ASAN_MEMINTRIN)
+  if (!param_asan_memintrin)
     return false;
 
   bool iter_advanced_p = false;
@@ -3219,7 +3219,7 @@  asan_expand_mark_ifn (gimple_stmt_iterator *iter)
   tree base_addr = gimple_assign_lhs (g);
 
   /* Generate direct emission if size_in_bytes is small.  */
-  if (size_in_bytes <= ASAN_PARAM_USE_AFTER_SCOPE_DIRECT_EMISSION_THRESHOLD)
+  if (size_in_bytes <= (unsigned)param_use_after_scope_direct_emission_threshold)
     {
       const unsigned HOST_WIDE_INT shadow_size
 	= shadow_mem_size (size_in_bytes);
diff --git a/gcc/auto-profile.c b/gcc/auto-profile.c
index ee1a83abce2..4fd1df6fa7f 100644
--- a/gcc/auto-profile.c
+++ b/gcc/auto-profile.c
@@ -1631,7 +1631,7 @@  auto_profile (void)
        function before annotation, so the profile inside bar@loc_foo2
        will be useful.  */
     autofdo::stmt_set promoted_stmts;
-    for (int i = 0; i < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS); i++)
+    for (int i = 0; i < param_early_inliner_max_iterations; i++)
       {
         if (!flag_value_profile_transformations
             || !autofdo::afdo_vpt_for_early_inline (&promoted_stmts))
diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c
index 0ac39140c6c..6a85c2a7fc0 100644
--- a/gcc/bb-reorder.c
+++ b/gcc/bb-reorder.c
@@ -1371,7 +1371,7 @@  copy_bb_p (const_basic_block bb, int code_may_grow)
     return false;
 
   if (code_may_grow && optimize_bb_for_speed_p (bb))
-    max_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
+    max_size *= param_max_grow_copy_bb_insns;
 
   FOR_BB_INSNS (bb, insn)
     {
@@ -2751,7 +2751,7 @@  duplicate_computed_gotos (function *fun)
 
   /* Never copy a block larger than this.  */
   int max_size
-    = uncond_jump_length * PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS);
+    = uncond_jump_length * param_max_goto_duplication_insns;
 
   bool changed = false;
 
diff --git a/gcc/builtins.c b/gcc/builtins.c
index 245fad02d9c..68baeb9bbe9 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -7214,7 +7214,7 @@  inline_expand_builtin_string_cmp (tree exp, rtx target)
   /* If the length of the comparision is larger than the threshold,
      do nothing.  */
   if (length > (unsigned HOST_WIDE_INT)
-	       PARAM_VALUE (BUILTIN_STRING_CMP_INLINE_LENGTH))
+	       param_builtin_string_cmp_inline_length)
     return NULL_RTX;
 
   machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
diff --git a/gcc/c/gimple-parser.c b/gcc/c/gimple-parser.c
index ceec758ffbe..e40cfa2ec01 100644
--- a/gcc/c/gimple-parser.c
+++ b/gcc/c/gimple-parser.c
@@ -354,7 +354,7 @@  c_parser_parse_gimple_body (c_parser *cparser, char *gimple_pass,
   if (cfun->curr_properties & PROP_cfg)
     {
       ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = entry_bb_count;
-      gcov_type t = PARAM_VALUE (PARAM_GIMPLE_FE_COMPUTED_HOT_BB_THRESHOLD);
+      gcov_type t = param_gimple_fe_computed_hot_bb_threshold;
       set_hot_bb_threshold (t);
       update_max_bb_count ();
       cgraph_node::get_create (cfun->decl);
diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c
index 835f7d79ea4..7b1dd245487 100644
--- a/gcc/cfgcleanup.c
+++ b/gcc/cfgcleanup.c
@@ -2022,7 +2022,7 @@  try_crossjump_to_edge (int mode, edge e1, edge e2,
      of matching instructions or the 'from' block was totally matched
      (such that its predecessors will hopefully be redirected and the
      block removed).  */
-  if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
+  if ((nmatch < param_min_crossjump_insns)
       && (newpos1 != BB_HEAD (src1)))
     return false;
 
@@ -2215,7 +2215,7 @@  try_crossjump_bb (int mode, basic_block bb)
      a block that falls through into BB, as that adds no branches to the
      program.  We'll try that combination first.  */
   fallthru = NULL;
-  max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
+  max = param_max_crossjump_edges;
 
   if (EDGE_COUNT (bb->preds) > max)
     return false;
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index c34a53b526b..5fed0738211 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -1548,7 +1548,7 @@  defer_stack_allocation (tree var, bool toplevel)
   bool smallish
     = (poly_int_tree_p (size_unit, &size)
        && (estimated_poly_value (size)
-	   < PARAM_VALUE (PARAM_MIN_SIZE_FOR_STACK_SHARING)));
+	   < param_min_size_for_stack_sharing));
 
   /* If stack protection is enabled, *all* stack variables must be deferred,
      so that we can re-order the strings to the top of the frame.
@@ -1788,7 +1788,7 @@  stack_protect_classify_type (tree type)
 	  || t == signed_char_type_node
 	  || t == unsigned_char_type_node)
 	{
-	  unsigned HOST_WIDE_INT max = PARAM_VALUE (PARAM_SSP_BUFFER_SIZE);
+	  unsigned HOST_WIDE_INT max = param_ssp_buffer_size;
 	  unsigned HOST_WIDE_INT len;
 
 	  if (!TYPE_SIZE_UNIT (type)
@@ -6435,7 +6435,7 @@  pass_expand::execute (function *fun)
 	warning (OPT_Wstack_protector,
 		 "stack protector not protecting function: "
 		 "all local arrays are less than %d bytes long",
-		 (int) PARAM_VALUE (PARAM_SSP_BUFFER_SIZE));
+		 (int) param_ssp_buffer_size);
     }
 
   /* Set up parameters and prepare for return, for the function.  */
@@ -6545,7 +6545,7 @@  pass_expand::execute (function *fun)
 
   /* If the function has too many markers, drop them while expanding.  */
   if (cfun->debug_marker_count
-      >= PARAM_VALUE (PARAM_MAX_DEBUG_MARKER_COUNT))
+      >= param_max_debug_marker_count)
     cfun->debug_nonbind_markers = false;
 
   lab_rtx_for_bb = new hash_map<basic_block, rtx_code_label *>;
diff --git a/gcc/cfgloopanal.c b/gcc/cfgloopanal.c
index 95ec929c7bd..84516efcfb6 100644
--- a/gcc/cfgloopanal.c
+++ b/gcc/cfgloopanal.c
@@ -256,7 +256,7 @@  expected_loop_iterations_unbounded (const class loop *loop,
     {
       if (by_profile_only)
 	return -1;
-      expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
+      expected = param_avg_loop_niter;
     }
   else if (loop->latch && (loop->latch->count.initialized_p ()
 			   || loop->header->count.initialized_p ()))
@@ -274,7 +274,7 @@  expected_loop_iterations_unbounded (const class loop *loop,
 	{
           if (by_profile_only)
 	    return -1;
-	  expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
+	  expected = param_avg_loop_niter;
 	}
       else if (!count_in.nonzero_p ())
 	{
@@ -295,7 +295,7 @@  expected_loop_iterations_unbounded (const class loop *loop,
     {
       if (by_profile_only)
 	return -1;
-      expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
+      expected = param_avg_loop_niter;
     }
 
   if (!by_profile_only)
@@ -427,7 +427,7 @@  estimate_reg_pressure_cost (unsigned n_new, unsigned n_old, bool speed,
 
   if (optimize && (flag_ira_region == IRA_REGION_ALL
 		   || flag_ira_region == IRA_REGION_MIXED)
-      && number_of_loops (cfun) <= (unsigned) IRA_MAX_LOOPS_NUM)
+      && number_of_loops (cfun) <= (unsigned) param_ira_max_loops_num)
     /* IRA regional allocation deals with high register pressure
        better.  So decrease the cost (to do more accurate the cost
        calculation for IRA, we need to know how many registers lives
diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index 699209654f8..62a36c9f6c1 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -2738,7 +2738,7 @@  cgraph_edge::maybe_hot_p (void)
   if (caller->frequency == NODE_FREQUENCY_EXECUTED_ONCE
       && sreal_frequency () * 2 < 3)
     return false;
-  if (sreal_frequency () * PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) <= 1)
+  if (sreal_frequency () * param_hot_bb_frequency_fraction <= 1)
     return false;
   return true;
 }
diff --git a/gcc/combine.c b/gcc/combine.c
index 857ea30dafd..ae3bc468910 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -1251,7 +1251,7 @@  combine_instructions (rtx_insn *f, unsigned int nregs)
   init_reg_last ();
   setup_incoming_promotions (first);
   last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
-  int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
+  int max_combine = param_max_combine_insns;
 
   FOR_EACH_BB_FN (this_basic_block, cfun)
     {
@@ -13282,7 +13282,7 @@  record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
 	    {
 	      /* If there are two or more occurrences of REG in VALUE,
 		 prevent the value from growing too much.  */
-	      if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
+	      if (count_rtxs (tem) > param_max_last_value_rtl)
 		tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
 	    }
 
diff --git a/gcc/common/config/aarch64/aarch64-common.c b/gcc/common/config/aarch64/aarch64-common.c
index 07c03253951..adb3ff71af8 100644
--- a/gcc/common/config/aarch64/aarch64-common.c
+++ b/gcc/common/config/aarch64/aarch64-common.c
@@ -73,7 +73,7 @@  static bool
 aarch64_option_validate_param (const int value, const int param)
 {
   /* Check that both parameters are the same.  */
-  if (param == (int) PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE)
+  if (param == param_stack_clash_protection_guard_size)
     {
       if (value != 12 && value != 16)
 	{
@@ -93,18 +93,15 @@  static void
 aarch64_option_default_params (void)
 {
   /* We assume the guard page is 64k.  */
-  int index = (int) PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE;
-  set_default_param_value (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE,
-			   DEFAULT_STK_CLASH_GUARD_SIZE == 0
-			     ? 16 : DEFAULT_STK_CLASH_GUARD_SIZE);
+  int index = (int) param_stack_clash_protection_guard_size;
+  param_stack_clash_protection_guard_size
+    = (DEFAULT_STK_CLASH_GUARD_SIZE == 0 ? 16 : DEFAULT_STK_CLASH_GUARD_SIZE);
 
-  int guard_size
-    = default_param_value (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+  int guard_size = param_stack_clash_protection_guard_size;
 
   /* Set the interval parameter to be the same as the guard size.  This way the
      mid-end code does the right thing for us.  */
-  set_default_param_value (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL,
-			   guard_size);
+  param_stack_clash_protection_probe_interval = guard_size;
 
   /* Validate the options.  */
   aarch64_option_validate_param (guard_size, index);
diff --git a/gcc/common/config/ia64/ia64-common.c b/gcc/common/config/ia64/ia64-common.c
index 02e297ad69b..0a187160fd2 100644
--- a/gcc/common/config/ia64/ia64-common.c
+++ b/gcc/common/config/ia64/ia64-common.c
@@ -88,13 +88,13 @@  static void
 ia64_option_default_params (void)
 {
   /* Let the scheduler form additional regions.  */
-  set_default_param_value (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS, 2);
+  param_max_sched_extend_regions_iters = 2;
 
   /* Set the default values for cache-related parameters.  */
-  set_default_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6);
-  set_default_param_value (PARAM_L1_CACHE_LINE_SIZE, 32);
+  param_simultaneous_prefetches = 6;
+  param_l1_cache_line_size = 32;
 
-  set_default_param_value (PARAM_SCHED_MEM_TRUE_DEP_COST, 4);
+  param_sched_mem_true_dep_cost = 4;
 }
 
 #undef TARGET_OPTION_OPTIMIZATION_TABLE
diff --git a/gcc/common/config/powerpcspe/powerpcspe-common.c b/gcc/common/config/powerpcspe/powerpcspe-common.c
index c949a601f57..7043a4bda31 100644
--- a/gcc/common/config/powerpcspe/powerpcspe-common.c
+++ b/gcc/common/config/powerpcspe/powerpcspe-common.c
@@ -57,7 +57,7 @@  static void
 rs6000_option_default_params (void)
 {
   /* Double growth factor to counter reduced min jump length.  */
-  set_default_param_value (PARAM_MAX_GROW_COPY_BB_INSNS, 16);
+  param_max_grow_copy_bb_insns = 16;
 }
 
 /* If not otherwise specified by a target, make 'long double' equivalent to
diff --git a/gcc/common/config/rs6000/rs6000-common.c b/gcc/common/config/rs6000/rs6000-common.c
index b9471964a66..afc1a0cfcbc 100644
--- a/gcc/common/config/rs6000/rs6000-common.c
+++ b/gcc/common/config/rs6000/rs6000-common.c
@@ -69,7 +69,7 @@  static void
 rs6000_option_default_params (void)
 {
   /* Double growth factor to counter reduced min jump length.  */
-  set_default_param_value (PARAM_MAX_GROW_COPY_BB_INSNS, 16);
+  param_max_grow_copy_bb_insns = 16;
 }
 
 /* If not otherwise specified by a target, make 'long double' equivalent to
diff --git a/gcc/common/config/sh/sh-common.c b/gcc/common/config/sh/sh-common.c
index 4a92146f0af..e6ecc3a632a 100644
--- a/gcc/common/config/sh/sh-common.c
+++ b/gcc/common/config/sh/sh-common.c
@@ -149,7 +149,7 @@  sh_handle_option (struct gcc_options *opts,
 static void
 sh_option_default_params (void)
 {
-  set_default_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 2);
+  param_simultaneous_prefetches = 2;
 }
 
 #undef TARGET_OPTION_OPTIMIZATION_TABLE
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 1dfff331a5a..c4783861c5d 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -5589,7 +5589,7 @@  aarch64_output_probe_stack_range (rtx reg1, rtx reg2)
   ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
 
   HOST_WIDE_INT stack_clash_probe_interval
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+    = 1 << param_stack_clash_protection_guard_size;
 
   /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL.  */
   xops[0] = reg1;
@@ -6842,7 +6842,7 @@  aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
 					bool final_adjustment_p)
 {
   HOST_WIDE_INT guard_size
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+    = 1 << param_stack_clash_protection_guard_size;
   HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
   HOST_WIDE_INT min_probe_threshold
     = (final_adjustment_p
@@ -7364,7 +7364,7 @@  aarch64_expand_epilogue (bool for_sibcall)
      for each allocation.  For stack clash we are in a usable state if
      the adjustment is less than GUARD_SIZE - GUARD_USED_BY_CALLER.  */
   HOST_WIDE_INT guard_size
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+    = 1 << param_stack_clash_protection_guard_size;
   HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
 
   /* We can re-use the registers when:
@@ -13306,73 +13306,62 @@  aarch64_override_options_internal (struct gcc_options *opts)
 
   /* We don't mind passing in global_options_set here as we don't use
      the *options_set structs anyway.  */
-  maybe_set_param_value (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH,
-			 queue_depth,
-			 opts->x_param_values,
-			 global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (opts, &global_options_set,
+		       param_sched_autopref_queue_depth, queue_depth);
 
   /* Set up parameters to be used in prefetching algorithm.  Do not
      override the defaults unless we are tuning for a core we have
      researched values for.  */
   if (aarch64_tune_params.prefetch->num_slots > 0)
-    maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
-			   aarch64_tune_params.prefetch->num_slots,
-			   opts->x_param_values,
-			   global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (opts, &global_options_set,
+			 param_simultaneous_prefetches,
+			 aarch64_tune_params.prefetch->num_slots);
   if (aarch64_tune_params.prefetch->l1_cache_size >= 0)
-    maybe_set_param_value (PARAM_L1_CACHE_SIZE,
-			   aarch64_tune_params.prefetch->l1_cache_size,
-			   opts->x_param_values,
-			   global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (opts, &global_options_set,
+			 param_l1_cache_size,
+			 aarch64_tune_params.prefetch->l1_cache_size);
   if (aarch64_tune_params.prefetch->l1_cache_line_size >= 0)
-    maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
-			   aarch64_tune_params.prefetch->l1_cache_line_size,
-			   opts->x_param_values,
-			   global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (opts, &global_options_set,
+			 param_l1_cache_line_size,
+			 aarch64_tune_params.prefetch->l1_cache_line_size);
   if (aarch64_tune_params.prefetch->l2_cache_size >= 0)
-    maybe_set_param_value (PARAM_L2_CACHE_SIZE,
-			   aarch64_tune_params.prefetch->l2_cache_size,
-			   opts->x_param_values,
-			   global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (opts, &global_options_set,
+			 param_l2_cache_size,
+			 aarch64_tune_params.prefetch->l2_cache_size);
   if (!aarch64_tune_params.prefetch->prefetch_dynamic_strides)
-    maybe_set_param_value (PARAM_PREFETCH_DYNAMIC_STRIDES,
-			   0,
-			   opts->x_param_values,
-			   global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (opts, &global_options_set,
+			 param_prefetch_dynamic_strides, 0);
   if (aarch64_tune_params.prefetch->minimum_stride >= 0)
-    maybe_set_param_value (PARAM_PREFETCH_MINIMUM_STRIDE,
-			   aarch64_tune_params.prefetch->minimum_stride,
-			   opts->x_param_values,
-			   global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (opts, &global_options_set,
+			 param_prefetch_minimum_stride,
+			 aarch64_tune_params.prefetch->minimum_stride);
 
   /* Use the alternative scheduling-pressure algorithm by default.  */
-  maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, SCHED_PRESSURE_MODEL,
-			 opts->x_param_values,
-			 global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (opts, &global_options_set,
+		       param_sched_pressure_algorithm,
+		       SCHED_PRESSURE_MODEL);
 
   /* If the user hasn't changed it via configure then set the default to 64 KB
      for the backend.  */
-  maybe_set_param_value (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE,
-			 DEFAULT_STK_CLASH_GUARD_SIZE == 0
-			   ? 16 : DEFAULT_STK_CLASH_GUARD_SIZE,
-			 opts->x_param_values,
-			 global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (opts, &global_options_set,
+		       param_stack_clash_protection_guard_size,
+		       (DEFAULT_STK_CLASH_GUARD_SIZE == 0
+			? 16 : DEFAULT_STK_CLASH_GUARD_SIZE));
 
   /* Validate the guard size.  */
-  int guard_size = PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+  int guard_size = param_stack_clash_protection_guard_size;
 
   /* Enforce that interval is the same size as size so the mid-end does the
      right thing.  */
-  maybe_set_param_value (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL,
-			 guard_size,
-			 opts->x_param_values,
-			 global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (opts, &global_options_set,
+		       param_stack_clash_protection_probe_interval,
+		       guard_size);
 
   /* The maybe_set calls won't update the value if the user has explicitly set
      one.  Which means we need to validate that probing interval and guard size
      are equal.  */
   int probe_interval
-    = PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+    = param_stack_clash_protection_probe_interval;
   if (guard_size != probe_interval)
     error ("stack clash guard size %<%d%> must be equal to probing interval "
 	   "%<%d%>", guard_size, probe_interval);
diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c
index a7d5454b574..8f389ead32d 100644
--- a/gcc/config/alpha/alpha.c
+++ b/gcc/config/alpha/alpha.c
@@ -68,6 +68,7 @@  along with GCC; see the file COPYING3.  If not see
 #include "builtins.h"
 #include "rtl-iter.h"
 #include "flags.h"
+#include "opts.h"
 
 /* This file should be included last.  */
 #include "target-def.h"
@@ -484,17 +485,14 @@  alpha_option_override (void)
     }
 
   if (line_size)
-    maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
-			   global_options.x_param_values,
-			   global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			 param_l1_cache_line_size, line_size);
   if (l1_size)
-    maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
-			   global_options.x_param_values,
-			   global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			 param_l1_cache_size, l1_size);
   if (l2_size)
-    maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
-			   global_options.x_param_values,
-			   global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			 param_l2_cache_size, l2_size);
 
   /* Do some sanity checks on the above options.  */
 
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index eddd3ca93ed..4437a5bc509 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -3521,9 +3521,8 @@  arm_option_override (void)
        but measurable, size reduction for PIC code.  Therefore, we decrease
        the bar for unrestricted expression hoisting to the cost of PIC address
        calculation, which is 2 instructions.  */
-    maybe_set_param_value (PARAM_GCSE_UNRESTRICTED_COST, 2,
-			   global_options.x_param_values,
-			   global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			 param_gcse_unrestricted_cost, 2);
 
   /* ARM EABI defaults to strict volatile bitfields.  */
   if (TARGET_AAPCS_BASED && flag_strict_volatile_bitfields < 0
@@ -3543,47 +3542,43 @@  arm_option_override (void)
      override the defaults unless we are tuning for a core we have
      researched values for.  */
   if (current_tune->prefetch.num_slots > 0)
-    maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
-			   current_tune->prefetch.num_slots,
-			   global_options.x_param_values,
-			   global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			 param_simultaneous_prefetches,
+			 current_tune->prefetch.num_slots);
   if (current_tune->prefetch.l1_cache_line_size >= 0)
-    maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
-			   current_tune->prefetch.l1_cache_line_size,
-			   global_options.x_param_values,
-			   global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			 param_l1_cache_line_size,
+			 current_tune->prefetch.l1_cache_line_size);
   if (current_tune->prefetch.l1_cache_size >= 0)
-    maybe_set_param_value (PARAM_L1_CACHE_SIZE,
-			   current_tune->prefetch.l1_cache_size,
-			   global_options.x_param_values,
-			   global_options_set.x_param_values);
+    SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			 param_l1_cache_size,
+			 current_tune->prefetch.l1_cache_size);
 
   /* Look through ready list and all of queue for instructions
      relevant for L2 auto-prefetcher.  */
-  int param_sched_autopref_queue_depth;
+  int sched_autopref_queue_depth;
 
   switch (current_tune->sched_autopref)
     {
     case tune_params::SCHED_AUTOPREF_OFF:
-      param_sched_autopref_queue_depth = -1;
+      sched_autopref_queue_depth = -1;
       break;
 
     case tune_params::SCHED_AUTOPREF_RANK:
-      param_sched_autopref_queue_depth = 0;
+      sched_autopref_queue_depth = 0;
       break;
 
     case tune_params::SCHED_AUTOPREF_FULL:
-      param_sched_autopref_queue_depth = max_insn_queue_index + 1;
+      sched_autopref_queue_depth = max_insn_queue_index + 1;
       break;
 
     default:
       gcc_unreachable ();
     }
 
-  maybe_set_param_value (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH,
-			 param_sched_autopref_queue_depth,
-			 global_options.x_param_values,
-			 global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+		       param_sched_autopref_queue_depth,
+		       sched_autopref_queue_depth);
 
   /* Currently, for slow flash data, we just disable literal pools.  We also
      disable it for pure-code.  */
@@ -31740,8 +31735,6 @@  arm_valid_target_attribute_p (tree fndecl, tree ARG_UNUSED (name),
 
   DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
 
-  finalize_options_struct (&func_options);
-
   return ret;
 }
 
diff --git a/gcc/config/i386/i386-options.c b/gcc/config/i386/i386-options.c
index dfc8ae23ba0..72cd6dcc98c 100644
--- a/gcc/config/i386/i386-options.c
+++ b/gcc/config/i386/i386-options.c
@@ -2618,22 +2618,14 @@  ix86_option_override_internal (bool main_args_p,
   if (!TARGET_SCHEDULE)
     opts->x_flag_schedule_insns_after_reload = opts->x_flag_schedule_insns = 0;
 
-  maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
-			 ix86_tune_cost->simultaneous_prefetches,
-			 opts->x_param_values,
-			 opts_set->x_param_values);
-  maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
-			 ix86_tune_cost->prefetch_block,
-			 opts->x_param_values,
-			 opts_set->x_param_values);
-  maybe_set_param_value (PARAM_L1_CACHE_SIZE,
-			 ix86_tune_cost->l1_cache_size,
-			 opts->x_param_values,
-			 opts_set->x_param_values);
-  maybe_set_param_value (PARAM_L2_CACHE_SIZE,
-			 ix86_tune_cost->l2_cache_size,
-			 opts->x_param_values,
-			 opts_set->x_param_values);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_simultaneous_prefetches,
+		       ix86_tune_cost->simultaneous_prefetches);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_line_size,
+		       ix86_tune_cost->prefetch_block);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_size,
+		       ix86_tune_cost->l1_cache_size);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_l2_cache_size,
+		       ix86_tune_cost->l2_cache_size);
 
   /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful.  */
   if (opts->x_flag_prefetch_loop_arrays < 0
@@ -2868,13 +2860,9 @@  ix86_option_override_internal (bool main_args_p,
       = (cf_protection_level) (opts->x_flag_cf_protection | CF_SET);
 
   if (ix86_tune_features [X86_TUNE_AVOID_256FMA_CHAINS])
-    maybe_set_param_value (PARAM_AVOID_FMA_MAX_BITS, 256,
-			   opts->x_param_values,
-			   opts_set->x_param_values);
+    SET_OPTION_IF_UNSET (opts, opts_set, param_avoid_fma_max_bits, 256);
   else if (ix86_tune_features [X86_TUNE_AVOID_128FMA_CHAINS])
-    maybe_set_param_value (PARAM_AVOID_FMA_MAX_BITS, 128,
-			   opts->x_param_values,
-			   opts_set->x_param_values);
+    SET_OPTION_IF_UNSET (opts, opts_set, param_avoid_fma_max_bits, 128);
 
   /* PR86952: jump table usage with retpolines is slow.
      The PR provides some numbers about the slowness.  */
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 03a7082d2fc..f775697f982 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -5773,7 +5773,7 @@  get_probe_interval (void)
 {
   if (flag_stack_clash_protection)
     return (HOST_WIDE_INT_1U
-	    << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
+	    << param_stack_clash_protection_probe_interval);
   else
     return (HOST_WIDE_INT_1U << STACK_CHECK_PROBE_INTERVAL_EXP);
 }
@@ -6942,7 +6942,7 @@  ix86_adjust_stack_and_probe_stack_clash (HOST_WIDE_INT size,
   /* If we allocate less than the size of the guard statically,
      then no probing is necessary, but we do need to allocate
      the stack.  */
-  if (size < (1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE)))
+  if (size < (1 << param_stack_clash_protection_guard_size))
     {
       pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
 			         GEN_INT (-size), -1,
@@ -21468,18 +21468,18 @@  static unsigned int
 ix86_max_noce_ifcvt_seq_cost (edge e)
 {
   bool predictable_p = predictable_edge_p (e);
-
-  enum compiler_param param
-    = (predictable_p
-       ? PARAM_MAX_RTL_IF_CONVERSION_PREDICTABLE_COST
-       : PARAM_MAX_RTL_IF_CONVERSION_UNPREDICTABLE_COST);
-
-  /* If we have a parameter set, use that, otherwise take a guess using
-     BRANCH_COST.  */
-  if (global_options_set.x_param_values[param])
-    return PARAM_VALUE (param);
+  if (predictable_p)
+    {
+      if (global_options_set.x_param_max_rtl_if_conversion_predictable_cost)
+	return param_max_rtl_if_conversion_predictable_cost;
+    }
   else
-    return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (2);
+    {
+      if (global_options_set.x_param_max_rtl_if_conversion_unpredictable_cost)
+	return param_max_rtl_if_conversion_unpredictable_cost;
+    }
+
+  return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (2);
 }
 
 /* Return true if SEQ is a good candidate as a replacement for the
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index 7697e907aea..44f7f2eea06 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -7307,7 +7307,7 @@  ia64_adjust_cost (rtx_insn *insn, int dep_type1, rtx_insn *dep_insn,
 
   if (dw == MIN_DEP_WEAK)
     /* Store and load are likely to alias, use higher cost to avoid stall.  */
-    return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
+    return param_sched_mem_true_dep_cost;
   else if (dw > MIN_DEP_WEAK)
     {
       /* Store and load are less likely to alias.  */
diff --git a/gcc/config/rs6000/rs6000-logue.c b/gcc/config/rs6000/rs6000-logue.c
index 04aae8052db..f0fd2065c02 100644
--- a/gcc/config/rs6000/rs6000-logue.c
+++ b/gcc/config/rs6000/rs6000-logue.c
@@ -1515,14 +1515,14 @@  static HOST_WIDE_INT
 get_stack_clash_protection_probe_interval (void)
 {
   return (HOST_WIDE_INT_1U
-	  << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
+	  << param_stack_clash_protection_probe_interval);
 }
 
 static HOST_WIDE_INT
 get_stack_clash_protection_guard_size (void)
 {
   return (HOST_WIDE_INT_1U
-	  << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
+	  << param_stack_clash_protection_guard_size);
 }
 
 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index d9d275b01c0..0e9e5a79c54 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -80,6 +80,7 @@ 
 #include "tree-vrp.h"
 #include "tree-ssanames.h"
 #include "rs6000-internal.h"
+#include "opts.h"
 
 /* This file should be included last.  */
 #include "target-def.h"
@@ -4511,46 +4512,38 @@  rs6000_option_override_internal (bool global_init_p)
 
   if (global_init_p)
     {
-      maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
-			     rs6000_cost->simultaneous_prefetches,
-			     global_options.x_param_values,
-			     global_options_set.x_param_values);
-      maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
-			     global_options.x_param_values,
-			     global_options_set.x_param_values);
-      maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
-			     rs6000_cost->cache_line_size,
-			     global_options.x_param_values,
-			     global_options_set.x_param_values);
-      maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
-			     global_options.x_param_values,
-			     global_options_set.x_param_values);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			   param_simultaneous_prefetches,
+			   rs6000_cost->simultaneous_prefetches);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			   param_l1_cache_size,
+			   rs6000_cost->l1_cache_size);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			   param_l1_cache_line_size,
+			   rs6000_cost->cache_line_size);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			   param_l2_cache_size,
+			   rs6000_cost->l2_cache_size);
 
       /* Increase loop peeling limits based on performance analysis. */
-      maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
-			     global_options.x_param_values,
-			     global_options_set.x_param_values);
-      maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
-			     global_options.x_param_values,
-			     global_options_set.x_param_values);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			   param_max_peeled_insns, 400);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			   param_max_completely_peeled_insns, 400);
 
       /* Use the 'model' -fsched-pressure algorithm by default.  */
-      maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
-			     SCHED_PRESSURE_MODEL,
-			     global_options.x_param_values,
-			     global_options_set.x_param_values);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			   param_sched_pressure_algorithm,
+			   SCHED_PRESSURE_MODEL);
 
       /* unroll very small loops 2 time if no -funroll-loops.  */
       if (!global_options_set.x_flag_unroll_loops
 	  && !global_options_set.x_flag_unroll_all_loops)
 	{
-	  maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 2,
-				 global_options.x_param_values,
-				 global_options_set.x_param_values);
-
-	  maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 20,
-				 global_options.x_param_values,
-				 global_options_set.x_param_values);
+	  SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			       param_max_unroll_times, 2);
+	  SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			       param_max_unrolled_insns, 20);
 
 	  /* If fweb or frename-registers are not specificed in command-line,
 	     do not turn them on implicitly.  */
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index ff0b43c2c29..b3a75222ac2 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -10968,9 +10968,9 @@  allocate_stack_space (rtx size, HOST_WIDE_INT last_probe_offset,
 {
   bool temp_reg_clobbered_p = false;
   HOST_WIDE_INT probe_interval
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+    = 1 << param_stack_clash_protection_probe_interval;
   HOST_WIDE_INT guard_size
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+    = 1 << param_stack_clash_protection_guard_size;
 
   if (flag_stack_clash_protection)
     {
@@ -11086,7 +11086,7 @@  s390_emit_prologue (void)
      only exception is when TARGET_BACKCHAIN is active, in which case
      we know *sp (offset 0) was written.  */
   HOST_WIDE_INT probe_interval
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+    = 1 << param_stack_clash_protection_probe_interval;
   HOST_WIDE_INT last_probe_offset
     = (TARGET_BACKCHAIN
        ? (TARGET_PACKED_STACK ? STACK_POINTER_OFFSET - UNITS_PER_LONG : 0)
@@ -15264,10 +15264,8 @@  s390_option_override_internal (struct gcc_options *opts,
      displacements.  Trim that value down to 4k if that happens.  This
      might result in too many probes being generated only on the
      oldest supported machine level z900.  */
-  if (!DISP_IN_RANGE ((1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL))))
-    set_param_value ("stack-clash-protection-probe-interval", 12,
-		     opts->x_param_values,
-		     opts_set->x_param_values);
+  if (!DISP_IN_RANGE ((1 << param_stack_clash_protection_probe_interval)))
+    param_stack_clash_protection_probe_interval = 12;
 
 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
   if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
@@ -15276,62 +15274,37 @@  s390_option_override_internal (struct gcc_options *opts,
 
   if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
     {
-      maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
-			     opts->x_param_values,
-			     opts_set->x_param_values);
-      maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
-			     opts->x_param_values,
-			     opts_set->x_param_values);
-      maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
-			     opts->x_param_values,
-			     opts_set->x_param_values);
-      maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
-			     opts->x_param_values,
-			     opts_set->x_param_values);
-    }
-
-  maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
-			 opts->x_param_values,
-			 opts_set->x_param_values);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_max_unrolled_insns,
+			   100);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_max_unroll_times, 32);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_max_completely_peeled_insns,
+			   2000);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_max_completely_peel_times,
+			   64);
+    }
+
+  SET_OPTION_IF_UNSET (opts, opts_set, param_max_pending_list_length,
+		       256);
   /* values for loop prefetching */
-  maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
-			 opts->x_param_values,
-			 opts_set->x_param_values);
-  maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
-			 opts->x_param_values,
-			 opts_set->x_param_values);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_line_size, 256);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_size, 128);
   /* s390 has more than 2 levels and the size is much larger.  Since
      we are always running virtualized assume that we only get a small
      part of the caches above l1.  */
-  maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
-			 opts->x_param_values,
-			 opts_set->x_param_values);
-  maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
-			 opts->x_param_values,
-			 opts_set->x_param_values);
-  maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
-			 opts->x_param_values,
-			 opts_set->x_param_values);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_l2_cache_size, 1500);
+  SET_OPTION_IF_UNSET (opts, opts_set,
+		       param_prefetch_min_insn_to_mem_ratio, 2);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_simultaneous_prefetches, 6);
 
   /* Use the alternative scheduling-pressure algorithm by default.  */
-  maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
-			 opts->x_param_values,
-			 opts_set->x_param_values);
-
-  maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
-			 opts->x_param_values,
-			 opts_set->x_param_values);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_sched_pressure_algorithm, 2);
+  SET_OPTION_IF_UNSET (opts, opts_set, param_min_vect_loop_bound, 2);
 
   /* Use aggressive inlining parameters.  */
   if (opts->x_s390_tune >= PROCESSOR_2964_Z13)
     {
-      maybe_set_param_value (PARAM_INLINE_MIN_SPEEDUP, 2,
-			     opts->x_param_values,
-			     opts_set->x_param_values);
-
-      maybe_set_param_value (PARAM_MAX_INLINE_INSNS_AUTO, 80,
-			     opts->x_param_values,
-			     opts_set->x_param_values);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_inline_min_speedup, 2);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_max_inline_insns_auto, 80);
     }
 
   /* Set the default alignment.  */
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index fe5e94118ef..473011aa1e6 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -61,6 +61,7 @@  along with GCC; see the file COPYING3.  If not see
 #include "context.h"
 #include "builtins.h"
 #include "tree-vector-builder.h"
+#include "opts.h"
 
 /* This file should be included last.  */
 #include "target-def.h"
@@ -2010,7 +2011,7 @@  sparc_option_override (void)
       gcc_unreachable ();
     };
 
-  /* PARAM_SIMULTANEOUS_PREFETCHES is the number of prefetches that
+  /* param_simultaneous_prefetches is the number of prefetches that
      can run at the same time.  More important, it is the threshold
      defining when additional prefetches will be dropped by the
      hardware.
@@ -2033,21 +2034,20 @@  sparc_option_override (void)
      single-threaded program.  Experimental results show that setting
      this parameter to 32 works well when the number of threads is not
      high.  */
-  maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
-			 ((sparc_cpu == PROCESSOR_ULTRASPARC
-			   || sparc_cpu == PROCESSOR_NIAGARA
-			   || sparc_cpu == PROCESSOR_NIAGARA2
-			   || sparc_cpu == PROCESSOR_NIAGARA3
-			   || sparc_cpu == PROCESSOR_NIAGARA4)
-			  ? 2
-			  : (sparc_cpu == PROCESSOR_ULTRASPARC3
-			     ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
-				     || sparc_cpu == PROCESSOR_M8)
-				    ? 32 : 3))),
-			 global_options.x_param_values,
-			 global_options_set.x_param_values);
-
-  /* PARAM_L1_CACHE_LINE_SIZE is the size of the L1 cache line, in
+  SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+		       param_simultaneous_prefetches,
+		       ((sparc_cpu == PROCESSOR_ULTRASPARC
+			 || sparc_cpu == PROCESSOR_NIAGARA
+			 || sparc_cpu == PROCESSOR_NIAGARA2
+			 || sparc_cpu == PROCESSOR_NIAGARA3
+			 || sparc_cpu == PROCESSOR_NIAGARA4)
+			? 2
+			: (sparc_cpu == PROCESSOR_ULTRASPARC3
+			   ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
+				   || sparc_cpu == PROCESSOR_M8)
+				  ? 32 : 3))));
+
+  /* param_l1_cache_line_size is the size of the L1 cache line, in
      bytes.
 
      The Oracle SPARC Architecture (previously the UltraSPARC
@@ -2064,38 +2064,33 @@  sparc_option_override (void)
      L2 and L3, but only 32B are brought into the L1D$. (Assuming it
      is a read_n prefetch, which is the only type which allocates to
      the L1.)  */
-  maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
-			 (sparc_cpu == PROCESSOR_M8
-			  ? 64 : 32),
-			 global_options.x_param_values,
-			 global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+		       param_l1_cache_line_size,
+		       (sparc_cpu == PROCESSOR_M8 ? 64 : 32));
 
-  /* PARAM_L1_CACHE_SIZE is the size of the L1D$ (most SPARC chips use
+  /* param_l1_cache_size is the size of the L1D$ (most SPARC chips use
      Hardvard level-1 caches) in kilobytes.  Both UltraSPARC and
      Niagara processors feature a L1D$ of 16KB.  */
-  maybe_set_param_value (PARAM_L1_CACHE_SIZE,
-			 ((sparc_cpu == PROCESSOR_ULTRASPARC
-			   || sparc_cpu == PROCESSOR_ULTRASPARC3
-			   || sparc_cpu == PROCESSOR_NIAGARA
-			   || sparc_cpu == PROCESSOR_NIAGARA2
-			   || sparc_cpu == PROCESSOR_NIAGARA3
-			   || sparc_cpu == PROCESSOR_NIAGARA4
-			   || sparc_cpu == PROCESSOR_NIAGARA7
-			   || sparc_cpu == PROCESSOR_M8)
-			  ? 16 : 64),
-			 global_options.x_param_values,
-			 global_options_set.x_param_values);
-
-
-  /* PARAM_L2_CACHE_SIZE is the size fo the L2 in kilobytes.  Note
+  SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+		       param_l1_cache_size,
+		       ((sparc_cpu == PROCESSOR_ULTRASPARC
+			 || sparc_cpu == PROCESSOR_ULTRASPARC3
+			 || sparc_cpu == PROCESSOR_NIAGARA
+			 || sparc_cpu == PROCESSOR_NIAGARA2
+			 || sparc_cpu == PROCESSOR_NIAGARA3
+			 || sparc_cpu == PROCESSOR_NIAGARA4
+			 || sparc_cpu == PROCESSOR_NIAGARA7
+			 || sparc_cpu == PROCESSOR_M8)
+			? 16 : 64));
+
+  /* param_l2_cache_size is the size fo the L2 in kilobytes.  Note
      that 512 is the default in params.def.  */
-  maybe_set_param_value (PARAM_L2_CACHE_SIZE,
-			 ((sparc_cpu == PROCESSOR_NIAGARA4
-			   || sparc_cpu == PROCESSOR_M8)
-			  ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
-				   ? 256 : 512)),
-			 global_options.x_param_values,
-			 global_options_set.x_param_values);
+  SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+		       param_l2_cache_size,
+		       ((sparc_cpu == PROCESSOR_NIAGARA4
+			 || sparc_cpu == PROCESSOR_M8)
+			? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
+				 ? 256 : 512)));
   
 
   /* Disable save slot sharing for call-clobbered registers by default.
diff --git a/gcc/config/visium/visium.c b/gcc/config/visium/visium.c
index 8477008320c..b1ace70b5f7 100644
--- a/gcc/config/visium/visium.c
+++ b/gcc/config/visium/visium.c
@@ -57,6 +57,7 @@ 
 #include "tree-pass.h"
 #include "context.h"
 #include "builtins.h"
+#include "opts.h"
 
 /* This file should be included last.  */
 #include "target-def.h"
@@ -457,9 +458,8 @@  visium_option_override (void)
       /* Allow the size of compilation units to double because of inlining.
 	 In practice the global size of the object code is hardly affected
 	 because the additional instructions will take up the padding.  */
-      maybe_set_param_value (PARAM_INLINE_UNIT_GROWTH, 100,
-			     global_options.x_param_values,
-			     global_options_set.x_param_values);
+      SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+			   param_inline_unit_growth, 100);
     }
 
   /* Likewise for loops.  */
diff --git a/gcc/coverage.c b/gcc/coverage.c
index bcba61c9a9a..ebe27a323b2 100644
--- a/gcc/coverage.c
+++ b/gcc/coverage.c
@@ -324,7 +324,7 @@  get_coverage_counts (unsigned counter, unsigned cfg_checksum,
 	}
       return NULL;
     }
-  if (PARAM_VALUE (PARAM_PROFILE_FUNC_INTERNAL_ID))
+  if (param_profile_func_internal_id)
     elt.ident = current_function_funcdef_no + 1;
   else
     {
@@ -560,7 +560,7 @@  coverage_compute_profile_id (struct cgraph_node *n)
     {
       expanded_location xloc
 	= expand_location (DECL_SOURCE_LOCATION (n->decl));
-      bool use_name_only = (PARAM_VALUE (PARAM_PROFILE_FUNC_INTERNAL_ID) == 0);
+      bool use_name_only = (param_profile_func_internal_id == 0);
 
       chksum = (use_name_only ? 0 : xloc.line);
       if (xloc.file)
@@ -628,7 +628,7 @@  coverage_begin_function (unsigned lineno_checksum, unsigned cfg_checksum)
 
   /* Announce function */
   offset = gcov_write_tag (GCOV_TAG_FUNCTION);
-  if (PARAM_VALUE (PARAM_PROFILE_FUNC_INTERNAL_ID))
+  if (param_profile_func_internal_id)
     gcov_write_unsigned (current_function_funcdef_no + 1);
   else
     {
@@ -682,7 +682,7 @@  coverage_end_function (unsigned lineno_checksum, unsigned cfg_checksum)
 
       item = ggc_alloc<coverage_data> ();
 
-      if (PARAM_VALUE (PARAM_PROFILE_FUNC_INTERNAL_ID))
+      if (param_profile_func_internal_id)
 	item->ident = current_function_funcdef_no + 1;
       else
 	{
diff --git a/gcc/cp/name-lookup.c b/gcc/cp/name-lookup.c
index cd0d9551aa3..be846a12141 100644
--- a/gcc/cp/name-lookup.c
+++ b/gcc/cp/name-lookup.c
@@ -5358,7 +5358,7 @@  namespace_hints::namespace_hints (location_t loc, tree name)
 
   m_candidates = vNULL;
   m_limited = false;
-  m_limit = PARAM_VALUE (CXX_MAX_NAMESPACES_FOR_DIAGNOSTIC_HELP);
+  m_limit = param_cxx_max_namespaces_for_diagnostic_help;
 
   /* Breadth-first search of namespaces.  Up to limit namespaces
      searched (limit zero == unlimited).  */
diff --git a/gcc/cp/typeck.c b/gcc/cp/typeck.c
index 38a15d14620..ce294171261 100644
--- a/gcc/cp/typeck.c
+++ b/gcc/cp/typeck.c
@@ -1498,7 +1498,7 @@  comptypes (tree t1, tree t2, int strict)
 	   perform a deep check. */
 	return structural_comptypes (t1, t2, strict);
 
-      if (flag_checking && USE_CANONICAL_TYPES)
+      if (flag_checking && param_use_canonical_types)
 	{
 	  bool result = structural_comptypes (t1, t2, strict);
 	  
@@ -1519,7 +1519,7 @@  comptypes (tree t1, tree t2, int strict)
 	  
 	  return result;
 	}
-      if (!flag_checking && USE_CANONICAL_TYPES)
+      if (!flag_checking && param_use_canonical_types)
 	return TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2);
       else
 	return structural_comptypes (t1, t2, strict);
diff --git a/gcc/cse.c b/gcc/cse.c
index 097fb94e773..b1c0276b0f7 100644
--- a/gcc/cse.c
+++ b/gcc/cse.c
@@ -6414,7 +6414,7 @@  cse_find_path (basic_block first_bb, struct cse_basic_block_data *data,
   if (follow_jumps)
     {
       bb = data->path[path_size - 1].bb;
-      while (bb && path_size < PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH))
+      while (bb && path_size < param_max_cse_path_length)
 	{
 	  if (single_succ_p (bb))
 	    e = single_succ_edge (bb);
@@ -6592,7 +6592,7 @@  cse_extended_basic_block (struct cse_basic_block_data *ebb_data)
 	     FIXME: This is a real kludge and needs to be done some other
 		    way.  */
 	  if (NONDEBUG_INSN_P (insn)
-	      && num_insns++ > PARAM_VALUE (PARAM_MAX_CSE_INSNS))
+	      && num_insns++ > param_max_cse_insns)
 	    {
 	      flush_hash_table ();
 	      num_insns = 0;
@@ -6736,7 +6736,7 @@  cse_main (rtx_insn *f ATTRIBUTE_UNUSED, int nregs)
   init_cse_reg_info (nregs);
 
   ebb_data.path = XNEWVEC (struct branch_path,
-			   PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH));
+			   param_max_cse_path_length);
 
   cse_cfg_altered = false;
   cse_jumps_altered = false;
diff --git a/gcc/cselib.c b/gcc/cselib.c
index 500793ba40e..1745256944a 100644
--- a/gcc/cselib.c
+++ b/gcc/cselib.c
@@ -2297,7 +2297,7 @@  cselib_invalidate_mem (rtx mem_rtx)
 	      p = &(*p)->next;
 	      continue;
 	    }
-	  if (num_mems < PARAM_VALUE (PARAM_MAX_CSELIB_MEMORY_LOCATIONS)
+	  if (num_mems < param_max_cselib_memory_locations
 	      && ! canon_anti_dependence (x, false, mem_rtx,
 					  GET_MODE (mem_rtx), mem_addr))
 	    {
diff --git a/gcc/dse.c b/gcc/dse.c
index 5d8c6f990ec..76abd873c78 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -2657,7 +2657,7 @@  dse_step1 (void)
 
   /* For -O1 reduce the maximum number of active local stores for RTL DSE
      since this can consume huge amounts of memory (PR89115).  */
-  int max_active_local_stores = PARAM_VALUE (PARAM_MAX_DSE_ACTIVE_LOCAL_STORES);
+  int max_active_local_stores = param_max_dse_active_local_stores;
   if (optimize < 2)
     max_active_local_stores /= 10;
 
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index feff49aa44f..5f3e549a7fe 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -2762,15 +2762,15 @@  set_new_first_and_last_insn (rtx_insn *first, rtx_insn *last)
   set_last_insn (last);
   cur_insn_uid = 0;
 
-  if (MIN_NONDEBUG_INSN_UID || MAY_HAVE_DEBUG_INSNS)
+  if (param_min_nondebug_insn_uid || MAY_HAVE_DEBUG_INSNS)
     {
       int debug_count = 0;
 
-      cur_insn_uid = MIN_NONDEBUG_INSN_UID - 1;
+      cur_insn_uid = param_min_nondebug_insn_uid - 1;
       cur_debug_insn_uid = 0;
 
       for (insn = first; insn; insn = NEXT_INSN (insn))
-	if (INSN_UID (insn) < MIN_NONDEBUG_INSN_UID)
+	if (INSN_UID (insn) < param_min_nondebug_insn_uid)
 	  cur_debug_insn_uid = MAX (cur_debug_insn_uid, INSN_UID (insn));
 	else
 	  {
@@ -2780,7 +2780,7 @@  set_new_first_and_last_insn (rtx_insn *first, rtx_insn *last)
 	  }
 
       if (debug_count)
-	cur_debug_insn_uid = MIN_NONDEBUG_INSN_UID + debug_count;
+	cur_debug_insn_uid = param_min_nondebug_insn_uid + debug_count;
       else
 	cur_debug_insn_uid++;
     }
@@ -3445,10 +3445,10 @@  get_max_insn_count (void)
      differences due to debug insns, and not be affected by
      -fmin-insn-uid, to avoid excessive table size and to simplify
      debugging of -fcompare-debug failures.  */
-  if (cur_debug_insn_uid > MIN_NONDEBUG_INSN_UID)
+  if (cur_debug_insn_uid > param_min_nondebug_insn_uid)
     n -= cur_debug_insn_uid;
   else
-    n -= MIN_NONDEBUG_INSN_UID;
+    n -= param_min_nondebug_insn_uid;
 
   return n;
 }
@@ -4085,7 +4085,7 @@  make_debug_insn_raw (rtx pattern)
 
   insn = as_a <rtx_debug_insn *> (rtx_alloc (DEBUG_INSN));
   INSN_UID (insn) = cur_debug_insn_uid++;
-  if (cur_debug_insn_uid > MIN_NONDEBUG_INSN_UID)
+  if (cur_debug_insn_uid > param_min_nondebug_insn_uid)
     INSN_UID (insn) = cur_insn_uid++;
 
   PATTERN (insn) = pattern;
@@ -5860,8 +5860,8 @@  init_emit (void)
 {
   set_first_insn (NULL);
   set_last_insn (NULL);
-  if (MIN_NONDEBUG_INSN_UID)
-    cur_insn_uid = MIN_NONDEBUG_INSN_UID;
+  if (param_min_nondebug_insn_uid)
+    cur_insn_uid = param_min_nondebug_insn_uid;
   else
     cur_insn_uid = 1;
   cur_debug_insn_uid = 1;
diff --git a/gcc/explow.c b/gcc/explow.c
index 83c786366c1..93e31cc3ba1 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -1837,7 +1837,7 @@  compute_stack_clash_protection_loop_data (rtx *rounded_size, rtx *last_addr,
 {
   /* Round SIZE down to STACK_CLASH_PROTECTION_PROBE_INTERVAL */
   *probe_interval
-    = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+    = 1 << param_stack_clash_protection_probe_interval;
   *rounded_size = simplify_gen_binary (AND, Pmode, size,
 				        GEN_INT (-*probe_interval));
 
diff --git a/gcc/final.c b/gcc/final.c
index 7cf9ef1effd..3b9a88dc8a7 100644
--- a/gcc/final.c
+++ b/gcc/final.c
@@ -657,7 +657,7 @@  compute_alignments (void)
     }
   loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
   profile_count count_threshold = cfun->cfg->count_max.apply_scale
-		 (1, PARAM_VALUE (PARAM_ALIGN_THRESHOLD));
+		 (1, param_align_threshold);
 
   if (dump_file)
     {
@@ -743,7 +743,7 @@  compute_alignments (void)
 	  && branch_count + fallthru_count > count_threshold
 	  && (branch_count
 	      > fallthru_count.apply_scale
-		    (PARAM_VALUE (PARAM_ALIGN_LOOP_ITERATIONS), 1)))
+		    (param_align_loop_iterations, 1)))
 	{
 	  align_flags alignment = LOOP_ALIGN (label);
 	  if (dump_file)
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index 88a069f4306..931be36df84 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -5925,9 +5925,9 @@  fold_range_test (location_t loc, enum tree_code code, tree type,
      short-circuited branch and the underlying object on both sides
      is the same, make a non-short-circuit operation.  */
   bool logical_op_non_short_circuit = LOGICAL_OP_NON_SHORT_CIRCUIT;
-  if (PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT) != -1)
+  if (param_logical_op_non_short_circuit != -1)
     logical_op_non_short_circuit
-      = PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT);
+      = param_logical_op_non_short_circuit;
   if (logical_op_non_short_circuit
       && !flag_sanitize_coverage
       && lhs != 0 && rhs != 0
@@ -8596,9 +8596,9 @@  fold_truth_andor (location_t loc, enum tree_code code, tree type,
     return tem;
 
   bool logical_op_non_short_circuit = LOGICAL_OP_NON_SHORT_CIRCUIT;
-  if (PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT) != -1)
+  if (param_logical_op_non_short_circuit != -1)
     logical_op_non_short_circuit
-      = PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT);
+      = param_logical_op_non_short_circuit;
   if (logical_op_non_short_circuit
       && !flag_sanitize_coverage
       && (code == TRUTH_AND_EXPR
@@ -13361,7 +13361,7 @@  tree_single_nonnegative_warnv_p (tree t, bool *strict_overflow_p, int depth)
 	 would not, passes that need this information could be revised
 	 to provide it through dataflow propagation.  */
       return (!name_registered_for_update_p (t)
-	      && depth < PARAM_VALUE (PARAM_MAX_SSA_NAME_QUERY_DEPTH)
+	      && depth < param_max_ssa_name_query_depth
 	      && gimple_stmt_nonnegative_warnv_p (SSA_NAME_DEF_STMT (t),
 						  strict_overflow_p, depth));
 
@@ -14009,7 +14009,7 @@  integer_valued_real_single_p (tree t, int depth)
 	 would not, passes that need this information could be revised
 	 to provide it through dataflow propagation.  */
       return (!name_registered_for_update_p (t)
-	      && depth < PARAM_VALUE (PARAM_MAX_SSA_NAME_QUERY_DEPTH)
+	      && depth < param_max_ssa_name_query_depth
 	      && gimple_stmt_integer_valued_real_p (SSA_NAME_DEF_STMT (t),
 						    depth));
 
diff --git a/gcc/gcse.c b/gcc/gcse.c
index aeb59c645e1..4f63bff6672 100644
--- a/gcc/gcse.c
+++ b/gcc/gcse.c
@@ -799,10 +799,10 @@  want_to_gcse_p (rtx x, machine_mode mode, HOST_WIDE_INT *max_distance_ptr)
 		      && optimize_function_for_size_p (cfun));
 	  cost = set_src_cost (x, mode, 0);
 
-	  if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
+	  if (cost < COSTS_N_INSNS (param_gcse_unrestricted_cost))
 	    {
 	      max_distance
-		= ((HOST_WIDE_INT)GCSE_COST_DISTANCE_RATIO * cost) / 10;
+		= ((HOST_WIDE_INT)param_gcse_cost_distance_ratio * cost) / 10;
 	      if (max_distance == 0)
 		return 0;
 
@@ -1844,7 +1844,7 @@  prune_insertions_deletions (int n_elems)
      PRUNE_EXPRS.  */
   for (j = 0; j < (unsigned) n_elems; j++)
     if (deletions[j]
-	&& ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
+	&& (insertions[j] / deletions[j]) > param_max_gcse_insertion_ratio)
       bitmap_set_bit (prune_exprs, j);
 
   /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS.  */
@@ -3133,7 +3133,7 @@  hoist_code (void)
      expressions, nothing gets hoisted from the entry block.  */
   FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
     {
-      domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
+      domby = get_dominated_to_depth (CDI_DOMINATORS, bb, param_max_hoist_depth);
 
       if (domby.length () == 0)
 	continue;
@@ -3982,9 +3982,9 @@  update_ld_motion_stores (struct gcse_expr * expr)
 bool
 gcse_or_cprop_is_too_expensive (const char *pass)
 {
-  unsigned int memory_request = (n_basic_blocks_for_fn (cfun)
-				 * SBITMAP_SET_SIZE (max_reg_num ())
-				 * sizeof (SBITMAP_ELT_TYPE));
+  int memory_request = (n_basic_blocks_for_fn (cfun)
+			* SBITMAP_SET_SIZE (max_reg_num ())
+			* sizeof (SBITMAP_ELT_TYPE));
   
   /* Trying to perform global optimizations on flow graphs which have
      a high connectivity will take a long time and is unlikely to be
@@ -4007,7 +4007,7 @@  gcse_or_cprop_is_too_expensive (const char *pass)
 
   /* If allocating memory for the dataflow bitmaps would take up too much
      storage it's better just to disable the optimization.  */
-  if (memory_request > MAX_GCSE_MEMORY)
+  if (memory_request > param_max_gcse_memory)
     {
       warning (OPT_Wdisabled_optimization,
 	       "%s: %d basic blocks and %d registers; "
diff --git a/gcc/ggc-common.c b/gcc/ggc-common.c
index f94c39f110d..cf7a4f68405 100644
--- a/gcc/ggc-common.c
+++ b/gcc/ggc-common.c
@@ -814,8 +814,8 @@  void
 init_ggc_heuristics (void)
 {
 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
-  set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ());
-  set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ());
+  param_ggc_min_expand = ggc_min_expand_heuristic ();
+  param_ggc_min_heapsize = ggc_min_heapsize_heuristic ();
 #endif
 }
 
diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c
index b443d87ea57..b0d26256072 100644
--- a/gcc/ggc-page.c
+++ b/gcc/ggc-page.c
@@ -2185,9 +2185,9 @@  ggc_collect (void)
      total allocations haven't expanded much since the last
      collection.  */
   float allocated_last_gc =
-    MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
+    MAX (G.allocated_last_gc, (size_t)param_ggc_min_heapsize * 1024);
 
-  float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
+  float min_expand = allocated_last_gc * param_ggc_min_expand / 100;
   if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
     return;
 
diff --git a/gcc/gimple-loop-interchange.cc b/gcc/gimple-loop-interchange.cc
index b56155b1fef..2a0cf6e41d8 100644
--- a/gcc/gimple-loop-interchange.cc
+++ b/gcc/gimple-loop-interchange.cc
@@ -78,14 +78,14 @@  along with GCC; see the file COPYING3.  If not see
    simple reduction of inner loop and double reduction of the loop nest.  */
 
 /* Maximum number of stmts in each loop that should be interchanged.  */
-#define MAX_NUM_STMT    (PARAM_VALUE (PARAM_LOOP_INTERCHANGE_MAX_NUM_STMTS))
+#define MAX_NUM_STMT    (param_loop_interchange_max_num_stmts)
 /* Maximum number of data references in loop nest.  */
-#define MAX_DATAREFS    (PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
+#define MAX_DATAREFS    (param_loop_max_datarefs_for_datadeps)
 
 /* Comparison ratio of access stride between inner/outer loops to be
    interchanged.  This is the minimum stride ratio for loop interchange
    to be profitable.  */
-#define OUTER_STRIDE_RATIO  (PARAM_VALUE (PARAM_LOOP_INTERCHANGE_STRIDE_RATIO))
+#define OUTER_STRIDE_RATIO  (param_loop_interchange_stride_ratio)
 /* The same as above, but we require higher ratio for interchanging the
    innermost two loops.  */
 #define INNER_STRIDE_RATIO  ((OUTER_STRIDE_RATIO) + 1)
diff --git a/gcc/gimple-loop-jam.c b/gcc/gimple-loop-jam.c
index 899653b0863..c1fc9ba9916 100644
--- a/gcc/gimple-loop-jam.c
+++ b/gcc/gimple-loop-jam.c
@@ -572,15 +572,15 @@  tree_loop_unroll_and_jam (void)
       /* We regard a user-specified minimum percentage of zero as a request
 	 to ignore all profitability concerns and apply the transformation
 	 always.  */
-      if (!PARAM_VALUE (PARAM_UNROLL_JAM_MIN_PERCENT))
+      if (!param_unroll_jam_min_percent)
 	profit_unroll = MAX(2, profit_unroll);
       else if (removed * 100 / datarefs.length ()
-	  < (unsigned)PARAM_VALUE (PARAM_UNROLL_JAM_MIN_PERCENT))
+	  < (unsigned)param_unroll_jam_min_percent)
 	profit_unroll = 1;
       if (unroll_factor > profit_unroll)
 	unroll_factor = profit_unroll;
-      if (unroll_factor > (unsigned)PARAM_VALUE (PARAM_UNROLL_JAM_MAX_UNROLL))
-	unroll_factor = PARAM_VALUE (PARAM_UNROLL_JAM_MAX_UNROLL);
+      if (unroll_factor > (unsigned)param_unroll_jam_max_unroll)
+	unroll_factor = param_unroll_jam_max_unroll;
       unroll = (unroll_factor > 1
 		&& can_unroll_loop_p (outer, unroll_factor, &desc));
 
diff --git a/gcc/gimple-loop-versioning.cc b/gcc/gimple-loop-versioning.cc
index 1664d875e80..18456e87958 100644
--- a/gcc/gimple-loop-versioning.cc
+++ b/gcc/gimple-loop-versioning.cc
@@ -605,8 +605,8 @@  unsigned int
 loop_versioning::max_insns_for_loop (class loop *loop)
 {
   return (loop->inner
-	  ? PARAM_VALUE (PARAM_LOOP_VERSIONING_MAX_OUTER_INSNS)
-	  : PARAM_VALUE (PARAM_LOOP_VERSIONING_MAX_INNER_INSNS));
+	  ? param_loop_versioning_max_outer_insns
+	  : param_loop_versioning_max_inner_insns);
 }
 
 /* Return true if for cost reasons we should avoid versioning any loop
diff --git a/gcc/gimple-ssa-split-paths.c b/gcc/gimple-ssa-split-paths.c
index 5bf45eeac28..49a0834d647 100644
--- a/gcc/gimple-ssa-split-paths.c
+++ b/gcc/gimple-ssa-split-paths.c
@@ -366,7 +366,7 @@  is_feasible_trace (basic_block bb)
 
   /* Upper Hard limit on the number statements to copy.  */
   if (num_stmts_in_join
-      >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS))
+      >= param_max_jump_thread_duplication_stmts)
     return false;
 
   return true;
diff --git a/gcc/gimple-ssa-store-merging.c b/gcc/gimple-ssa-store-merging.c
index 270159b518d..19c0b875a90 100644
--- a/gcc/gimple-ssa-store-merging.c
+++ b/gcc/gimple-ssa-store-merging.c
@@ -2502,7 +2502,7 @@  imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store,
     return false;
 
   bool allow_unaligned
-    = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
+    = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
   /* Punt if the combined store would not be aligned and we need alignment.  */
   if (!allow_unaligned)
     {
@@ -2762,7 +2762,7 @@  imm_store_chain_info::coalesce_immediate_stores ()
 
       if (info->order >= merged_store->first_nonmergeable_order
 	  || (((new_bitregion_end - new_bitregion_start + 1) / BITS_PER_UNIT)
-	      > (unsigned) PARAM_VALUE (PARAM_STORE_MERGING_MAX_SIZE)))
+	      > (unsigned) param_store_merging_max_size))
 	;
 
       /* |---store 1---|
@@ -3668,7 +3668,7 @@  imm_store_chain_info::output_merged_store (merged_store_group *group)
 
   auto_vec<class split_store *, 32> split_stores;
   bool allow_unaligned_store
-    = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
+    = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
   bool allow_unaligned_load = allow_unaligned_store;
   bool bzero_first = false;
   if (group->stores[0]->rhs_code == INTEGER_CST
@@ -4752,7 +4752,7 @@  pass_store_merging::process_store (gimple *stmt)
       /* If we reach the limit of stores to merge in a chain terminate and
 	 process the chain now.  */
       if ((*chain_info)->m_store_info.length ()
-	  == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE))
+	  == (unsigned int) param_max_stores_to_merge)
 	{
 	  if (dump_file && (dump_flags & TDF_DETAILS))
 	    fprintf (dump_file,
diff --git a/gcc/gimple-ssa-strength-reduction.c b/gcc/gimple-ssa-strength-reduction.c
index de7f36015ef..ab5011a7a1e 100644
--- a/gcc/gimple-ssa-strength-reduction.c
+++ b/gcc/gimple-ssa-strength-reduction.c
@@ -546,7 +546,7 @@  find_basis_for_base_expr (slsr_cand_t c, tree base_expr)
 
   // Limit potential of N^2 behavior for long candidate chains.
   int iters = 0;
-  int max_iters = PARAM_VALUE (PARAM_MAX_SLSR_CANDIDATE_SCAN);
+  int max_iters = param_max_slsr_candidate_scan;
 
   mapping_key.base_expr = base_expr;
   chain = base_cand_map->find (&mapping_key);
diff --git a/gcc/graphite-isl-ast-to-gimple.c b/gcc/graphite-isl-ast-to-gimple.c
index 40d1e8de6ae..11a4da0a623 100644
--- a/gcc/graphite-isl-ast-to-gimple.c
+++ b/gcc/graphite-isl-ast-to-gimple.c
@@ -203,7 +203,7 @@  class translate_isl_ast_to_gimple
   {
     codegen_error = true;
     gcc_assert (! flag_checking
-		|| PARAM_VALUE (PARAM_GRAPHITE_ALLOW_CODEGEN_ERRORS));
+		|| param_graphite_allow_codegen_errors);
   }
 
   bool is_constant (tree op) const
@@ -1383,7 +1383,7 @@  scop_to_isl_ast (scop_p scop)
 {
   int old_err = isl_options_get_on_error (scop->isl_context);
   int old_max_operations = isl_ctx_get_max_operations (scop->isl_context);
-  int max_operations = PARAM_VALUE (PARAM_MAX_ISL_OPERATIONS);
+  int max_operations = param_max_isl_operations;
   if (max_operations)
     isl_ctx_set_max_operations (scop->isl_context, max_operations);
   isl_options_set_on_error (scop->isl_context, ISL_ON_ERROR_CONTINUE);
diff --git a/gcc/graphite-optimize-isl.c b/gcc/graphite-optimize-isl.c
index c4b8f3bc876..1dc9c3cb7be 100644
--- a/gcc/graphite-optimize-isl.c
+++ b/gcc/graphite-optimize-isl.c
@@ -64,7 +64,7 @@  get_schedule_for_node_st (__isl_take isl_schedule_node *node, void *user)
   if (type != isl_schedule_node_leaf)
     return node;
 
-  long tile_size = PARAM_VALUE (PARAM_LOOP_BLOCK_TILE_SIZE);
+  long tile_size = param_loop_block_tile_size;
   if (dims <= 1
       || tile_size == 0
       || !isl_schedule_node_band_get_permutable (node))
@@ -115,7 +115,7 @@  optimize_isl (scop_p scop)
 {
   int old_err = isl_options_get_on_error (scop->isl_context);
   int old_max_operations = isl_ctx_get_max_operations (scop->isl_context);
-  int max_operations = PARAM_VALUE (PARAM_MAX_ISL_OPERATIONS);
+  int max_operations = param_max_isl_operations;
   if (max_operations)
     isl_ctx_set_max_operations (scop->isl_context, max_operations);
   isl_options_set_on_error (scop->isl_context, ISL_ON_ERROR_CONTINUE);
diff --git a/gcc/graphite-scop-detection.c b/gcc/graphite-scop-detection.c
index 489d0b93b42..1505a13b860 100644
--- a/gcc/graphite-scop-detection.c
+++ b/gcc/graphite-scop-detection.c
@@ -1639,7 +1639,7 @@  build_scops (vec<scop_p> *scops)
 	  continue;
 	}
 
-      unsigned max_arrays = PARAM_VALUE (PARAM_GRAPHITE_MAX_ARRAYS_PER_SCOP);
+      unsigned max_arrays = param_graphite_max_arrays_per_scop;
       if (max_arrays > 0
 	  && scop->drs.length () >= max_arrays)
 	{
@@ -1652,7 +1652,7 @@  build_scops (vec<scop_p> *scops)
 	}
 
       find_scop_parameters (scop);
-      graphite_dim_t max_dim = PARAM_VALUE (PARAM_GRAPHITE_MAX_NB_SCOP_PARAMS);
+      graphite_dim_t max_dim = param_graphite_max_nb_scop_params;
       if (max_dim > 0
 	  && scop_nb_params (scop) > max_dim)
 	{
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index 41cf1f362e8..c634e83441a 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -584,7 +584,7 @@  set_modulo_params (int ii, int max_stages, int insns, int max_uid)
   modulo_max_stages = max_stages;
   modulo_n_insns = insns;
   modulo_iter0_max_uid = max_uid;
-  modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
+  modulo_backtracks_left = param_max_modulo_backtrack_attempts;
 }
 
 /* A structure to record a pair of insns where the first one is a real
@@ -2712,7 +2712,7 @@  rank_for_schedule (const void *x, const void *y)
   if (flag_sched_critical_path_heuristic && priority_val)
     return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2);
 
-  if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) >= 0)
+  if (param_sched_autopref_queue_depth >= 0)
     {
       int autopref = autopref_rank_for_schedule (tmp, tmp2);
       if (autopref != 0)
@@ -3413,7 +3413,7 @@  model_remove_from_worklist (struct model_insn_info *insn)
 }
 
 /* Add INSN to the model worklist.  Start looking for a suitable position
-   between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
+   between neighbors PREV and NEXT, testing at most param_max_sched_ready_insns
    insns either side.  A null PREV indicates the beginning of the list and
    a null NEXT indicates the end.  */
 
@@ -3424,7 +3424,7 @@  model_add_to_worklist (struct model_insn_info *insn,
 {
   int count;
 
-  count = MAX_SCHED_READY_INSNS;
+  count = param_max_sched_ready_insns;
   if (count > 0 && prev && model_order_p (insn, prev))
     do
       {
@@ -3452,7 +3452,7 @@  model_promote_insn (struct model_insn_info *insn)
   int count;
 
   prev = insn->prev;
-  count = MAX_SCHED_READY_INSNS;
+  count = param_max_sched_ready_insns;
   while (count > 0 && prev && model_order_p (insn, prev))
     {
       count--;
@@ -3738,7 +3738,7 @@  model_choose_insn (void)
     {
       fprintf (sched_dump, ";;\t+--- worklist:\n");
       insn = model_worklist;
-      count = MAX_SCHED_READY_INSNS;
+      count = param_max_sched_ready_insns;
       while (count > 0 && insn)
 	{
 	  fprintf (sched_dump, ";;\t+---   %d [%d, %d, %d, %d]\n",
@@ -3770,7 +3770,7 @@  model_choose_insn (void)
 
      Failing that, just pick the highest-priority instruction in the
      worklist.  */
-  count = MAX_SCHED_READY_INSNS;
+  count = param_max_sched_ready_insns;
   insn = model_worklist;
   fallback = 0;
   for (;;)
@@ -5147,12 +5147,12 @@  queue_to_ready (struct ready_list *ready)
       /* If the ready list is full, delay the insn for 1 cycle.
 	 See the comment in schedule_block for the rationale.  */
       if (!reload_completed
-	  && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
+	  && (ready->n_ready - ready->n_debug > param_max_sched_ready_insns
 	      || (sched_pressure == SCHED_PRESSURE_MODEL
-		  /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
+		  /* Limit pressure recalculations to param_max_sched_ready_insns
 		     instructions too.  */
 		  && model_index (insn) > (model_curr_point
-					   + MAX_SCHED_READY_INSNS)))
+					   + param_max_sched_ready_insns)))
 	  && !(sched_pressure == SCHED_PRESSURE_MODEL
 	       && model_curr_point < model_num_insns
 	       /* Always allow the next model instruction to issue.  */
@@ -5743,7 +5743,7 @@  autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index)
   /* Exit early if the param forbids this or if we're not entering here through
      normal haifa scheduling.  This can happen if selective scheduling is
      explicitly enabled.  */
-  if (!insn_queue || PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) <= 0)
+  if (!insn_queue || param_sched_autopref_queue_depth <= 0)
     return 0;
 
   if (sched_verbose >= 2 && ready_index == 0)
@@ -5796,14 +5796,14 @@  autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index)
 	    }
 	}
 
-      if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) == 1)
+      if (param_sched_autopref_queue_depth == 1)
 	continue;
 
       /* Everything from the current queue slot should have been moved to
 	 the ready list.  */
       gcc_assert (insn_queue[NEXT_Q_AFTER (q_ptr, 0)] == NULL_RTX);
 
-      int n_stalls = PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) - 1;
+      int n_stalls = param_sched_autopref_queue_depth - 1;
       if (n_stalls > max_insn_queue_index)
 	n_stalls = max_insn_queue_index;
 
@@ -6552,14 +6552,14 @@  schedule_block (basic_block *target_bb, state_t init_state)
      time in the worst case.  Before reload we are more likely to have
      big lists so truncate them to a reasonable size.  */
   if (!reload_completed
-      && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
+      && ready.n_ready - ready.n_debug > param_max_sched_ready_insns)
     {
       ready_sort_debug (&ready);
       ready_sort_real (&ready);
 
-      /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
+      /* Find first free-standing insn past param_max_sched_ready_insns.
          If there are debug insns, we know they're first.  */
-      for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
+      for (i = param_max_sched_ready_insns + ready.n_debug; i < ready.n_ready; i++)
 	if (!SCHED_GROUP_P (ready_element (&ready, i)))
 	  break;
 
@@ -7258,7 +7258,7 @@  sched_init (void)
 	   && !reload_completed
 	   && common_sched_info->sched_pass_id == SCHED_RGN_PASS)
     sched_pressure = ((enum sched_pressure_algorithm)
-		      PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM));
+		      param_sched_pressure_algorithm);
   else
     sched_pressure = SCHED_PRESSURE_NONE;
 
@@ -7274,9 +7274,9 @@  sched_init (void)
       if (spec_info->mask != 0)
         {
           spec_info->data_weakness_cutoff =
-            (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
+            (param_sched_spec_prob_cutoff * MAX_DEP_WEAK) / 100;
           spec_info->control_weakness_cutoff =
-            (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
+            (param_sched_spec_prob_cutoff
              * REG_BR_PROB_BASE) / 100;
         }
       else
diff --git a/gcc/hsa-gen.c b/gcc/hsa-gen.c
index 436f4c5f9f5..c4a03f938cf 100644
--- a/gcc/hsa-gen.c
+++ b/gcc/hsa-gen.c
@@ -5940,7 +5940,7 @@  init_prologue (void)
   unsigned index = hsa_get_number_decl_kernel_mappings ();
 
   /* Emit store to debug argument.  */
-  if (PARAM_VALUE (PARAM_HSA_GEN_DEBUG_STORES) > 0)
+  if (param_hsa_gen_debug_stores > 0)
     set_debug_value (prologue, new hsa_op_immed (1000 + index, BRIG_TYPE_U64));
 }
 
diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c
index 8bc6f53cb38..5df8a43a5f7 100644
--- a/gcc/ifcvt.c
+++ b/gcc/ifcvt.c
@@ -3311,7 +3311,7 @@  bb_ok_for_noce_convert_multiple_sets (basic_block test_bb)
 {
   rtx_insn *insn;
   unsigned count = 0;
-  unsigned param = PARAM_VALUE (PARAM_MAX_RTL_IF_CONVERSION_INSNS);
+  unsigned param = param_max_rtl_if_conversion_insns;
 
   FOR_BB_INSNS (test_bb, insn)
     {
@@ -3838,7 +3838,7 @@  cond_move_process_if_block (struct noce_if_info *if_info)
   vec<rtx> else_regs = vNULL;
   unsigned int i;
   int success_p = FALSE;
-  int limit = PARAM_VALUE (PARAM_MAX_RTL_IF_CONVERSION_INSNS);
+  int limit = param_max_rtl_if_conversion_insns;
 
   /* Build a mapping for each block to the value used for each
      register.  */
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index c6bd265f593..5f52a4b0ad7 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -1609,7 +1609,7 @@  ipcp_lattice<valtype>::add_value (valtype newval, cgraph_edge *cs,
 	return false;
       }
 
-  if (values_count == PARAM_VALUE (PARAM_IPA_CP_VALUE_LIST_SIZE))
+  if (values_count == param_ipa_cp_value_list_size)
     {
       /* We can only free sources, not the values themselves, because sources
 	 of other values in this SCC might point to them.   */
@@ -2086,7 +2086,7 @@  merge_agg_lats_step (class ipcp_param_lattices *dest_plats,
 	  set_agg_lats_to_bottom (dest_plats);
 	  return false;
 	}
-      if (dest_plats->aggs_count == PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS))
+      if (dest_plats->aggs_count == param_ipa_max_agg_items)
 	return false;
       dest_plats->aggs_count++;
       new_al = ipcp_agg_lattice_pool.allocate ();
@@ -2639,11 +2639,11 @@  devirtualization_time_bonus (struct cgraph_node *node,
       int size = ipa_size_summaries->get (callee)->size;
       /* FIXME: The values below need re-considering and perhaps also
 	 integrating into the cost metrics, at lest in some very basic way.  */
-      if (size <= MAX_INLINE_INSNS_AUTO / 4)
+      if (size <= param_max_inline_insns_auto / 4)
 	res += 31 / ((int)speculative + 1);
-      else if (size <= MAX_INLINE_INSNS_AUTO / 2)
+      else if (size <= param_max_inline_insns_auto / 2)
 	res += 15 / ((int)speculative + 1);
-      else if (size <= MAX_INLINE_INSNS_AUTO
+      else if (size <= param_max_inline_insns_auto
 	       || DECL_DECLARED_INLINE_P (callee->decl))
 	res += 7 / ((int)speculative + 1);
     }
@@ -2658,7 +2658,7 @@  hint_time_bonus (ipa_hints hints)
 {
   int result = 0;
   if (hints & (INLINE_HINT_loop_iterations | INLINE_HINT_loop_stride))
-    result += PARAM_VALUE (PARAM_IPA_CP_LOOP_HINT_BONUS);
+    result += param_ipa_cp_loop_hint_bonus;
   return result;
 }
 
@@ -2670,11 +2670,11 @@  incorporate_penalties (ipa_node_params *info, int64_t evaluation)
 {
   if (info->node_within_scc)
     evaluation = (evaluation
-		  * (100 - PARAM_VALUE (PARAM_IPA_CP_RECURSION_PENALTY))) / 100;
+		  * (100 - param_ipa_cp_recursion_penalty)) / 100;
 
   if (info->node_calling_single_call)
     evaluation = (evaluation
-		  * (100 - PARAM_VALUE (PARAM_IPA_CP_SINGLE_CALL_PENALTY)))
+		  * (100 - param_ipa_cp_single_call_penalty))
       / 100;
 
   return evaluation;
@@ -2714,10 +2714,10 @@  good_cloning_opportunity_p (struct cgraph_node *node, int time_benefit,
 		 ", threshold: %i\n",
 		 info->node_within_scc ? ", scc" : "",
 		 info->node_calling_single_call ? ", single_call" : "",
-		 evaluation, PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD));
+		 evaluation, param_ipa_cp_eval_threshold);
 	}
 
-      return evaluation >= PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD);
+      return evaluation >= param_ipa_cp_eval_threshold;
     }
   else
     {
@@ -2732,9 +2732,9 @@  good_cloning_opportunity_p (struct cgraph_node *node, int time_benefit,
 		 time_benefit, size_cost, freq_sum,
 		 info->node_within_scc ? ", scc" : "",
 		 info->node_calling_single_call ? ", single_call" : "",
-		 evaluation, PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD));
+		 evaluation, param_ipa_cp_eval_threshold);
 
-      return evaluation >= PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD);
+      return evaluation >= param_ipa_cp_eval_threshold;
     }
 }
 
@@ -3349,9 +3349,9 @@  ipcp_propagate_stage (class ipa_topo_info *topo)
   }
 
   max_new_size = overall_size;
-  if (max_new_size < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
-    max_new_size = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
-  max_new_size += max_new_size * PARAM_VALUE (PARAM_IPCP_UNIT_GROWTH) / 100 + 1;
+  if (max_new_size < param_large_unit_insns)
+    max_new_size = param_large_unit_insns;
+  max_new_size += max_new_size * param_ipcp_unit_growth / 100 + 1;
 
   if (dump_file)
     fprintf (dump_file, "\noverall_size: %li, max_new_size: %li\n",
diff --git a/gcc/ipa-fnsummary.c b/gcc/ipa-fnsummary.c
index 64c4d95ccd4..4aac57446f2 100644
--- a/gcc/ipa-fnsummary.c
+++ b/gcc/ipa-fnsummary.c
@@ -1201,7 +1201,7 @@  decompose_param_expr (struct ipa_func_body_info *fbi,
 		      struct agg_position_info *aggpos,
 		      expr_eval_ops *param_ops_p = NULL)
 {
-  int op_limit = PARAM_VALUE (PARAM_IPA_MAX_PARAM_EXPR_OPS);
+  int op_limit = param_ipa_max_param_expr_ops;
   int op_count = 0;
 
   if (param_ops_p)
@@ -1432,7 +1432,7 @@  set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
 
   auto_vec<std::pair<tree, tree> > ranges;
   tree type = TREE_TYPE (op);
-  int bound_limit = PARAM_VALUE (PARAM_IPA_MAX_SWITCH_PREDICATE_BOUNDS);
+  int bound_limit = param_ipa_max_switch_predicate_bounds;
   int bound_count = 0;
   wide_int vr_wmin, vr_wmax;
   value_range_kind vr_type = get_range_info (op, &vr_wmin, &vr_wmax);
@@ -2277,9 +2277,9 @@  fp_expression_p (gimple *stmt)
 static void
 analyze_function_body (struct cgraph_node *node, bool early)
 {
-  sreal time = PARAM_VALUE (PARAM_UNINLINED_FUNCTION_TIME);
+  sreal time = param_uninlined_function_time;
   /* Estimate static overhead for function prologue/epilogue and alignment. */
-  int size = PARAM_VALUE (PARAM_UNINLINED_FUNCTION_INSNS);
+  int size = param_uninlined_function_insns;
   /* Benefits are scaled by probability of elimination that is in range
      <0,2>.  */
   basic_block bb;
@@ -2328,7 +2328,7 @@  analyze_function_body (struct cgraph_node *node, bool early)
 	  fbi.bb_infos = vNULL;
 	  fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
 	  fbi.param_count = count_formal_params (node->decl);
-	  fbi.aa_walk_budget = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+	  fbi.aa_walk_budget = param_ipa_max_aa_steps;
 
 	  nonconstant_names.safe_grow_cleared
 	    (SSANAMES (my_function)->length ());
@@ -2345,9 +2345,9 @@  analyze_function_body (struct cgraph_node *node, bool early)
   info->account_size_time (0, 0, bb_predicate, bb_predicate);
 
   bb_predicate = predicate::not_inlined ();
-  info->account_size_time (PARAM_VALUE (PARAM_UNINLINED_FUNCTION_INSNS)
+  info->account_size_time (param_uninlined_function_insns
 			   * ipa_fn_summary::size_scale,
-			   PARAM_VALUE (PARAM_UNINLINED_FUNCTION_TIME),
+			   param_uninlined_function_time,
 			   bb_predicate,
 		           bb_predicate);
 
@@ -2745,10 +2745,8 @@  compute_fn_summary (struct cgraph_node *node, bool early)
       es->call_stmt_size = eni_size_weights.call_cost;
       es->call_stmt_time = eni_time_weights.call_cost;
       info->account_size_time (ipa_fn_summary::size_scale
-			       * PARAM_VALUE
-				 (PARAM_UNINLINED_FUNCTION_THUNK_INSNS),
-			       PARAM_VALUE
-				 (PARAM_UNINLINED_FUNCTION_THUNK_TIME), t, t);
+			       * param_uninlined_function_thunk_insns,
+			       param_uninlined_function_thunk_time, t, t);
       t = predicate::not_inlined ();
       info->account_size_time (2 * ipa_fn_summary::size_scale, 0, t, t);
       ipa_update_overall_fn_summary (node);
diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c
index ea1fae484ff..33822e11308 100644
--- a/gcc/ipa-inline-analysis.c
+++ b/gcc/ipa-inline-analysis.c
@@ -448,7 +448,7 @@  estimate_growth (struct cgraph_node *node)
       else if (DECL_COMDAT (node->decl)
 	       && node->can_remove_if_no_direct_calls_p ())
 	d.growth -= (info->size
-		     * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
+		     * (100 - param_comdat_sharing_probability)
 		     + 50) / 100;
     }
 
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index b2c90cc2dd9..b765a509564 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -179,13 +179,13 @@  caller_growth_limits (struct cgraph_edge *e)
   if (limit < what_size_info->self_size)
     limit = what_size_info->self_size;
 
-  limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
+  limit += limit * param_large_function_growth / 100;
 
   /* Check the size after inlining against the function limits.  But allow
      the function to shrink if it went over the limits by forced inlining.  */
   newsize = estimate_size_after_inlining (to, e);
   if (newsize >= ipa_size_summaries->get (what)->size
-      && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
+      && newsize > param_large_function_insns
       && newsize > limit)
     {
       e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
@@ -201,7 +201,7 @@  caller_growth_limits (struct cgraph_edge *e)
      on every invocation of the caller (i.e. its call statement dominates
      exit block).  We do not track this information, yet.  */
   stack_size_limit += ((gcov_type)stack_size_limit
-		       * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
+		       * param_stack_frame_growth / 100);
 
   inlined_stack = (ipa_get_stack_frame_offset (to)
 		   + outer_info->estimated_self_stack_size
@@ -214,7 +214,7 @@  caller_growth_limits (struct cgraph_edge *e)
 	 This bit overoptimistically assume that we are good at stack
 	 packing.  */
       && inlined_stack > ipa_fn_summaries->get (to)->estimated_stack_size
-      && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
+      && inlined_stack > param_large_stack_frame)
     {
       e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
       return false;
@@ -399,16 +399,16 @@  inline_insns_single (cgraph_node *n, bool hint)
   if (opt_for_fn (n->decl, optimize) >= 3)
     {
       if (hint)
-	return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SINGLE)
-	       * PARAM_VALUE (PARAM_INLINE_HEURISTICS_HINT_PERCENT) / 100;
-      return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SINGLE);
+	return param_max_inline_insns_single
+	       * param_inline_heuristics_hint_percent / 100;
+      return param_max_inline_insns_single;
     }
   else
     {
       if (hint)
-	return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SINGLE_O2)
-	       * PARAM_VALUE (PARAM_INLINE_HEURISTICS_HINT_PERCENT_O2) / 100;
-      return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SINGLE_O2);
+	return param_max_inline_insns_single_o2
+	       * param_inline_heuristics_hint_percent_o2 / 100;
+      return param_max_inline_insns_single_o2;
     }
 }
 
@@ -421,16 +421,16 @@  inline_insns_auto (cgraph_node *n, bool hint)
   if (opt_for_fn (n->decl, optimize) >= 3)
     {
       if (hint)
-	return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_AUTO)
-	       * PARAM_VALUE (PARAM_INLINE_HEURISTICS_HINT_PERCENT) / 100;
-      return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_AUTO);
+	return param_max_inline_insns_auto
+	       * param_inline_heuristics_hint_percent / 100;
+      return param_max_inline_insns_auto;
     }
   else
     {
       if (hint)
-	return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_AUTO_O2)
-	       * PARAM_VALUE (PARAM_INLINE_HEURISTICS_HINT_PERCENT_O2) / 100;
-      return PARAM_VALUE (PARAM_MAX_INLINE_INSNS_AUTO_O2);
+	return param_max_inline_insns_auto_o2
+	       * param_inline_heuristics_hint_percent_o2 / 100;
+      return param_max_inline_insns_auto_o2;
     }
 }
 
@@ -567,14 +567,14 @@  can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report,
 	  inlinable = false;
 	}
       /* If callee is optimized for size and caller is not, allow inlining if
-	 code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
+	 code shrinks or we are in param_max_inline_insns_single limit and callee
 	 is inline (and thus likely an unified comdat).  This will allow caller
 	 to run faster.  */
       else if (opt_for_fn (callee->decl, optimize_size)
 	       > opt_for_fn (caller->decl, optimize_size))
 	{
 	  int growth = estimate_edge_growth (e);
-	  if (growth > PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE)
+	  if (growth > param_max_inline_insns_size
 	      && (!DECL_DECLARED_INLINE_P (callee->decl)
 		  && growth >= MAX (inline_insns_single (caller, false),
 				    inline_insns_auto (caller, false))))
@@ -686,11 +686,11 @@  want_early_inline_function_p (struct cgraph_edge *e)
       int growth = estimate_edge_growth (e);
       int n;
       int early_inlining_insns = opt_for_fn (e->caller->decl, optimize) >= 3
-				 ? PARAM_VALUE (PARAM_EARLY_INLINING_INSNS)
-				 : PARAM_VALUE (PARAM_EARLY_INLINING_INSNS_O2);
+				 ? param_early_inlining_insns
+				 : param_early_inlining_insns_o2;
 
 
-      if (growth <= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE))
+      if (growth <= param_max_inline_insns_size)
 	;
       else if (!e->maybe_hot_p ())
 	{
@@ -793,8 +793,8 @@  big_speedup_p (struct cgraph_edge *e)
 			 ? e->caller->inlined_to
 			 : e->caller);
   int limit = opt_for_fn (caller->decl, optimize) >= 3
-	      ? PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP)
-	      : PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP_O2);
+	      ? param_inline_min_speedup
+	      : param_inline_min_speedup_o2;
 
   if ((time - inlined_time) * 100 > time * limit)
     return true;
@@ -861,9 +861,9 @@  want_inline_small_function_p (struct cgraph_edge *e, bool report)
 				   | INLINE_HINT_loop_iterations
 				   | INLINE_HINT_loop_stride));
 
-      if (growth <= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SIZE))
+      if (growth <= param_max_inline_insns_size)
 	;
-      /* Apply MAX_INLINE_INSNS_SINGLE limit.  Do not do so when
+      /* Apply param_max_inline_insns_single limit.  Do not do so when
 	 hints suggests that inlining given function is very profitable.
 	 Avoid computation of big_speedup_p when not necessary to change
 	 outcome of decision.  */
@@ -881,7 +881,7 @@  want_inline_small_function_p (struct cgraph_edge *e, bool report)
 	}
       else if (!DECL_DECLARED_INLINE_P (callee->decl)
 	       && !opt_for_fn (e->caller->decl, flag_inline_functions)
-	       && growth >= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SMALL))
+	       && growth >= param_max_inline_insns_small)
 	{
 	  /* growth_likely_positive is expensive, always test it last.  */
           if (growth >= inline_insns_single (e->caller, false)
@@ -891,7 +891,7 @@  want_inline_small_function_p (struct cgraph_edge *e, bool report)
 	      want_inline = false;
  	    }
 	}
-      /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline.
+      /* Apply param_max_inline_insns_auto limit for functions not declared inline.
 	 Bypass the limit when speedup seems big.  */
       else if (!DECL_DECLARED_INLINE_P (callee->decl)
 	       && growth >= inline_insns_auto (e->caller, apply_hints)
@@ -944,10 +944,10 @@  want_inline_self_recursive_call_p (struct cgraph_edge *edge,
   char const *reason = NULL;
   bool want_inline = true;
   sreal caller_freq = 1;
-  int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
+  int max_depth = param_max_inline_recursive_depth_auto;
 
   if (DECL_DECLARED_INLINE_P (edge->caller->decl))
-    max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
+    max_depth = param_max_inline_recursive_depth;
 
   if (!edge->maybe_hot_p ())
     {
@@ -1009,7 +1009,7 @@  want_inline_self_recursive_call_p (struct cgraph_edge *edge,
     {
       if (edge->sreal_frequency () * 100
           <= caller_freq
-	     * PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY))
+	     * param_min_inline_recursive_probability)
 	{
 	  reason = "frequency of recursive call is too small";
 	  want_inline = false;
@@ -1206,8 +1206,7 @@  edge_badness (struct cgraph_edge *edge, bool dump)
 		 frequency still indicates splitting is a win ... */
 	      || (callee->split_part && !caller->split_part
 		  && edge->sreal_frequency () * 100
-		     < PARAM_VALUE
-			  (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY)
+		     < param_partial_inlining_entry_probability
 		  /* ... and do not overwrite user specified hints.   */
 		  && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
 		      || DECL_DECLARED_INLINE_P (caller->decl)))))
@@ -1537,7 +1536,7 @@  static bool
 recursive_inlining (struct cgraph_edge *edge,
 		    vec<cgraph_edge *> *new_edges)
 {
-  int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
+  int limit = param_max_inline_insns_recursive_auto;
   edge_heap_t heap (sreal::min ());
   struct cgraph_node *node;
   struct cgraph_edge *e;
@@ -1550,7 +1549,7 @@  recursive_inlining (struct cgraph_edge *edge,
     node = node->inlined_to;
 
   if (DECL_DECLARED_INLINE_P (node->decl))
-    limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
+    limit = param_max_inline_insns_recursive;
 
   /* Make sure that function is small enough to be considered for inlining.  */
   if (estimate_size_after_inlining (node, edge)  >= limit)
@@ -1675,11 +1674,11 @@  static int
 compute_max_insns (int insns)
 {
   int max_insns = insns;
-  if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
-    max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
+  if (max_insns < param_large_unit_insns)
+    max_insns = param_large_unit_insns;
 
   return ((int64_t) max_insns
-	  * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
+	  * (100 + param_inline_unit_growth) / 100);
 }
 
 
@@ -2895,7 +2894,7 @@  early_inliner (function *fun)
 	}
       /* We iterate incremental inlining to get trivial cases of indirect
 	 inlining.  */
-      while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
+      while (iterations < param_early_inliner_max_iterations
 	     && early_inline_small_functions (node))
 	{
 	  timevar_push (TV_INTEGRATION);
@@ -2922,7 +2921,7 @@  early_inliner (function *fun)
 		  edge->call_stmt_cannot_inline_p = true;
 		}
 	    }
-	  if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
+	  if (iterations < param_early_inliner_max_iterations - 1)
 	    ipa_update_overall_fn_summary (node);
 	  timevar_pop (TV_INTEGRATION);
 	  iterations++;
diff --git a/gcc/ipa-polymorphic-call.c b/gcc/ipa-polymorphic-call.c
index 705af03d20c..7c2b4c795fa 100644
--- a/gcc/ipa-polymorphic-call.c
+++ b/gcc/ipa-polymorphic-call.c
@@ -1400,7 +1400,7 @@  record_known_type (struct type_change_info *tci, tree type, HOST_WIDE_INT offset
 static inline bool
 csftc_abort_walking_p (unsigned speculative)
 {
-  unsigned max = PARAM_VALUE (PARAM_MAX_SPECULATIVE_DEVIRT_MAYDEFS);
+  unsigned max = param_max_speculative_devirt_maydefs;
   return speculative > max ? true : false;
 }
 
diff --git a/gcc/ipa-profile.c b/gcc/ipa-profile.c
index a1acd2e413a..19cf8eae18f 100644
--- a/gcc/ipa-profile.c
+++ b/gcc/ipa-profile.c
@@ -506,7 +506,7 @@  ipa_profile (void)
 
       gcc_assert (overall_size);
 
-      cutoff = (overall_time * PARAM_VALUE (HOT_BB_COUNT_WS_PERMILLE) + 500) / 1000;
+      cutoff = (overall_time * param_hot_bb_count_ws_permille + 500) / 1000;
       threshold = 0;
       for (i = 0; cumulated < cutoff; i++)
 	{
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 094e68178ec..b763aefa478 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -1601,7 +1601,7 @@  determine_known_aggregate_parts (gcall *call, tree arg,
   struct ipa_known_agg_contents_list *list = NULL, *all_list = NULL;
   bitmap visited = NULL;
   int item_count = 0, const_count = 0;
-  int ipa_max_agg_items = PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS);
+  int ipa_max_agg_items = param_ipa_max_agg_items;
   HOST_WIDE_INT arg_offset, arg_size;
   tree arg_base;
   bool check_ref, by_ref;
@@ -2632,7 +2632,7 @@  ipa_analyze_node (struct cgraph_node *node)
   fbi.bb_infos = vNULL;
   fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
   fbi.param_count = ipa_get_param_count (info);
-  fbi.aa_walk_budget = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+  fbi.aa_walk_budget = param_ipa_max_aa_steps;
 
   for (struct cgraph_edge *cs = node->callees; cs; cs = cs->next_callee)
     {
@@ -5291,7 +5291,7 @@  ipcp_transform_function (struct cgraph_node *node)
   fbi.bb_infos = vNULL;
   fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
   fbi.param_count = param_count;
-  fbi.aa_walk_budget = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+  fbi.aa_walk_budget = param_ipa_max_aa_steps;
 
   vec_safe_grow_cleared (descriptors, param_count);
   ipa_populate_param_decls (node, *descriptors);
diff --git a/gcc/ipa-split.c b/gcc/ipa-split.c
index 0444bda704d..c73b257ca7f 100644
--- a/gcc/ipa-split.c
+++ b/gcc/ipa-split.c
@@ -453,7 +453,7 @@  consider_split (class split_point *current, bitmap non_ssa_vars,
      is unknown.  */
   if (!(current->count
        < (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.apply_scale
-	   (PARAM_VALUE (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY), 100))))
+	   (param_partial_inlining_entry_probability, 100))))
     {
       /* When profile is guessed, we cannot expect it to give us
 	 realistic estimate on likelyness of function taking the
@@ -563,8 +563,8 @@  consider_split (class split_point *current, bitmap non_ssa_vars,
      that.  Next stage1 we should try to be more meaningful here.  */
   if (current->header_size + call_overhead
       >= (unsigned int)(DECL_DECLARED_INLINE_P (current_function_decl)
-			? MAX_INLINE_INSNS_SINGLE
-			: MAX_INLINE_INSNS_AUTO) + 10)
+			? param_max_inline_insns_single
+			: param_max_inline_insns_auto) + 10)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
 	fprintf (dump_file,
@@ -577,7 +577,7 @@  consider_split (class split_point *current, bitmap non_ssa_vars,
      Limit this duplication.  This is consistent with limit in tree-sra.c  
      FIXME: with LTO we ought to be able to do better!  */
   if (DECL_ONE_ONLY (current_function_decl)
-      && current->split_size >= (unsigned int) MAX_INLINE_INSNS_AUTO + 10)
+      && current->split_size >= (unsigned int) param_max_inline_insns_auto + 10)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
 	fprintf (dump_file,
@@ -589,7 +589,7 @@  consider_split (class split_point *current, bitmap non_ssa_vars,
      FIXME: with LTO we ought to be able to do better!  */
   if (DECL_ONE_ONLY (current_function_decl)
       && current->split_size
-	 <= (unsigned int) PARAM_VALUE (PARAM_EARLY_INLINING_INSNS) / 2)
+	 <= (unsigned int) param_early_inlining_insns / 2)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
 	fprintf (dump_file,
diff --git a/gcc/ipa-sra.c b/gcc/ipa-sra.c
index aceb5c722ea..365c0eeedff 100644
--- a/gcc/ipa-sra.c
+++ b/gcc/ipa-sra.c
@@ -1266,7 +1266,7 @@  allocate_access (gensum_param_desc *desc,
 		 HOST_WIDE_INT offset, HOST_WIDE_INT size)
 {
   if (desc->access_count
-      == (unsigned) PARAM_VALUE (PARAM_IPA_SRA_MAX_REPLACEMENTS))
+      == (unsigned) param_ipa_sra_max_replacements)
     {
       disqualify_split_candidate (desc, "Too many replacement candidates");
       return NULL;
@@ -2280,8 +2280,7 @@  process_scan_results (cgraph_node *node, struct function *fun,
       if (!desc->by_ref || optimize_function_for_size_p (fun))
 	param_size_limit = cur_param_size;
       else
-	param_size_limit = (PARAM_VALUE (PARAM_IPA_SRA_PTR_GROWTH_FACTOR)
-			   * cur_param_size);
+	param_size_limit = param_ipa_sra_ptr_growth_factor * cur_param_size;
       if (nonarg_acc_size > param_size_limit
 	  || (!desc->by_ref && nonarg_acc_size == param_size_limit))
 	{
@@ -2501,7 +2500,7 @@  ipa_sra_summarize_function (cgraph_node *node)
 	  bb_dereferences = XCNEWVEC (HOST_WIDE_INT,
 				      by_ref_count
 				      * last_basic_block_for_fn (fun));
-	  aa_walking_limit = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+	  aa_walking_limit = param_ipa_max_aa_steps;
 	  scan_function (node, fun);
 
 	  if (dump_file)
@@ -3337,7 +3336,7 @@  pull_accesses_from_callee (isra_param_desc *param_desc,
       return NULL;
 
     if ((prop_count + pclen
-	 > (unsigned) PARAM_VALUE (PARAM_IPA_SRA_MAX_REPLACEMENTS))
+	 > (unsigned) param_ipa_sra_max_replacements)
 	|| size_would_violate_limit_p (param_desc,
 				       param_desc->size_reached + prop_size))
       return "propagating accesses would violate the count or size limit";
diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index 47ce189af47..e53bb813f37 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -2217,7 +2217,7 @@  loop_compare_func (const void *v1p, const void *v2p)
    hardly helps (for irregular register file architecture it could
    help by choosing a better hard register in the loop but we prefer
    faster allocation even in this case).  We also remove cheap loops
-   if there are more than IRA_MAX_LOOPS_NUM of them.  Loop with EH
+   if there are more than param_ira_max_loops_num of them.  Loop with EH
    exit or enter edges are removed too because the allocation might
    require put pseudo moves on the EH edges (we could still do this
    for pseudos with caller saved hard registers in some cases but it
@@ -2253,7 +2253,7 @@  mark_loops_for_removal (void)
 	     );
       }
   qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
-  for (i = 0; i < n - IRA_MAX_LOOPS_NUM; i++)
+  for (i = 0; i < n - param_ira_max_loops_num; i++)
     {
       sorted_loops[i]->to_remove_p = true;
       if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
diff --git a/gcc/ira-conflicts.c b/gcc/ira-conflicts.c
index a0aefaa0549..8e7c0c41f2f 100644
--- a/gcc/ira-conflicts.c
+++ b/gcc/ira-conflicts.c
@@ -113,13 +113,13 @@  build_conflict_bit_table (void)
 	     / IRA_INT_BITS);
 	allocated_words_num += conflict_bit_vec_words_num;
 	if ((uint64_t) allocated_words_num * sizeof (IRA_INT_TYPE)
-	    > (uint64_t) IRA_MAX_CONFLICT_TABLE_SIZE * 1024 * 1024)
+	    > (uint64_t) param_ira_max_conflict_table_size * 1024 * 1024)
 	  {
 	    if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
 	      fprintf
 		(ira_dump_file,
 		 "+++Conflict table will be too big(>%dMB) -- don't use it\n",
-		 IRA_MAX_CONFLICT_TABLE_SIZE);
+		 param_ira_max_conflict_table_size);
 	    return false;
 	  }
       }
diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c
index ccd020a2dba..6a956a03b03 100644
--- a/gcc/loop-doloop.c
+++ b/gcc/loop-doloop.c
@@ -651,7 +651,7 @@  doloop_optimize (class loop *loop)
     }
 
   max_cost
-    = COSTS_N_INSNS (PARAM_VALUE (PARAM_MAX_ITERATIONS_COMPUTATION_COST));
+    = COSTS_N_INSNS (param_max_iterations_computation_cost);
   if (set_src_cost (desc->niter_expr, mode, optimize_loop_for_speed_p (loop))
       > max_cost)
     {
diff --git a/gcc/loop-invariant.c b/gcc/loop-invariant.c
index ef46c8aea22..d40ad37cced 100644
--- a/gcc/loop-invariant.c
+++ b/gcc/loop-invariant.c
@@ -1491,7 +1491,7 @@  gain_for_invariant (struct invariant *inv, unsigned *regs_needed,
 	  if ((int) new_regs[pressure_class]
 	      + (int) regs_needed[pressure_class]
 	      + LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
-	      + IRA_LOOP_RESERVED_REGS
+	      + param_ira_loop_reserved_regs
 	      > ira_class_hard_regs_num[pressure_class])
 	    break;
 	}
@@ -2279,7 +2279,7 @@  move_loop_invariants (void)
       /* move_single_loop_invariants for very large loops is time consuming
 	 and might need a lot of memory.  For -O1 only do loop invariant
 	 motion for very small loops.  */
-      unsigned max_bbs = LOOP_INVARIANT_MAX_BBS_IN_LOOP;
+      unsigned max_bbs = param_loop_invariant_max_bbs_in_loop;
       if (optimize < 2)
 	max_bbs /= 10;
       if (loop->num_nodes <= max_bbs)
diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c
index 63fccd23fae..551405ad0d1 100644
--- a/gcc/loop-unroll.c
+++ b/gcc/loop-unroll.c
@@ -364,13 +364,13 @@  decide_unroll_constant_iterations (class loop *loop, int flags)
 
   /* nunroll = total number of copies of the original loop body in
      unrolled loop (i.e. if it is 2, we have to duplicate loop body once).  */
-  nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
+  nunroll = param_max_unrolled_insns / loop->ninsns;
   nunroll_by_av
-    = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
+    = param_max_average_unrolled_insns / loop->av_ninsns;
   if (nunroll > nunroll_by_av)
     nunroll = nunroll_by_av;
-  if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
-    nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+  if (nunroll > (unsigned) param_max_unroll_times)
+    nunroll = param_max_unroll_times;
 
   if (targetm.loop_unroll_adjust)
     nunroll = targetm.loop_unroll_adjust (nunroll, loop);
@@ -684,12 +684,12 @@  decide_unroll_runtime_iterations (class loop *loop, int flags)
 
   /* nunroll = total number of copies of the original loop body in
      unrolled loop (i.e. if it is 2, we have to duplicate loop body once.  */
-  nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
-  nunroll_by_av = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
+  nunroll = param_max_unrolled_insns / loop->ninsns;
+  nunroll_by_av = param_max_average_unrolled_insns / loop->av_ninsns;
   if (nunroll > nunroll_by_av)
     nunroll = nunroll_by_av;
-  if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
-    nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+  if (nunroll > (unsigned) param_max_unroll_times)
+    nunroll = param_max_unroll_times;
 
   if (targetm.loop_unroll_adjust)
     nunroll = targetm.loop_unroll_adjust (nunroll, loop);
@@ -1167,13 +1167,13 @@  decide_unroll_stupid (class loop *loop, int flags)
 
   /* nunroll = total number of copies of the original loop body in
      unrolled loop (i.e. if it is 2, we have to duplicate loop body once.  */
-  nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
+  nunroll = param_max_unrolled_insns / loop->ninsns;
   nunroll_by_av
-    = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
+    = param_max_average_unrolled_insns / loop->av_ninsns;
   if (nunroll > nunroll_by_av)
     nunroll = nunroll_by_av;
-  if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
-    nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+  if (nunroll > (unsigned) param_max_unroll_times)
+    nunroll = param_max_unroll_times;
 
   if (targetm.loop_unroll_adjust)
     nunroll = targetm.loop_unroll_adjust (nunroll, loop);
@@ -1824,7 +1824,7 @@  expand_var_during_unrolling (struct var_to_expand *ve, rtx_insn *insn)
 
   /* Generate a new register only if the expansion limit has not been
      reached.  Else reuse an already existing expansion.  */
-  if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS) > ve->expansion_count)
+  if (param_max_variable_expansions > ve->expansion_count)
     {
       really_new_expansion = true;
       new_reg = gen_reg_rtx (GET_MODE (ve->reg));
diff --git a/gcc/lra-assigns.c b/gcc/lra-assigns.c
index e14a246c0d2..56ab9631089 100644
--- a/gcc/lra-assigns.c
+++ b/gcc/lra-assigns.c
@@ -1008,7 +1008,7 @@  spill_for (int regno, bitmap spilled_pseudo_bitmap, bool first_p)
 	}
       n = 0;
       if (sparseset_cardinality (live_range_reload_inheritance_pseudos)
-	  <= (unsigned)LRA_MAX_CONSIDERED_RELOAD_PSEUDOS)
+	  <= (unsigned)param_lra_max_considered_reload_pseudos)
 	EXECUTE_IF_SET_IN_SPARSESET (live_range_reload_inheritance_pseudos,
 				     reload_regno)
 	  if ((int) reload_regno != regno
diff --git a/gcc/lra-constraints.c b/gcc/lra-constraints.c
index 0db6d3151cd..ced7be79017 100644
--- a/gcc/lra-constraints.c
+++ b/gcc/lra-constraints.c
@@ -6682,7 +6682,7 @@  inherit_in_ebb (rtx_insn *head, rtx_insn *tail)
    a BB is not greater than the following value, we don't add the BB
    to EBB.  */
 #define EBB_PROBABILITY_CUTOFF \
-  ((REG_BR_PROB_BASE * LRA_INHERITANCE_EBB_PROBABILITY_CUTOFF) / 100)
+  ((REG_BR_PROB_BASE * param_lra_inheritance_ebb_probability_cutoff) / 100)
 
 /* Current number of inheritance/split iteration.  */
 int lra_inheritance_iter;
diff --git a/gcc/lto/lto-partition.c b/gcc/lto/lto-partition.c
index 32090359814..5354350378b 100644
--- a/gcc/lto/lto-partition.c
+++ b/gcc/lto/lto-partition.c
@@ -560,13 +560,13 @@  lto_balanced_map (int n_lto_partitions, int max_partition_size)
   varpool_order.qsort (varpool_node_cmp);
 
   /* Compute partition size and create the first partition.  */
-  if (PARAM_VALUE (MIN_PARTITION_SIZE) > max_partition_size)
+  if (param_min_partition_size > max_partition_size)
     fatal_error (input_location, "min partition size cannot be greater "
 		 "than max partition size");
 
   partition_size = total_size / n_lto_partitions;
-  if (partition_size < PARAM_VALUE (MIN_PARTITION_SIZE))
-    partition_size = PARAM_VALUE (MIN_PARTITION_SIZE);
+  if (partition_size < param_min_partition_size)
+    partition_size = param_min_partition_size;
   npartitions = 1;
   partition = new_partition ("");
   if (dump_file)
@@ -816,8 +816,8 @@  lto_balanced_map (int n_lto_partitions, int max_partition_size)
 	    fprintf (dump_file,
 		     "Total size: %" PRId64 " partition_size: %" PRId64 "\n",
 		     total_size, partition_size);
-	  if (partition_size < PARAM_VALUE (MIN_PARTITION_SIZE))
-	    partition_size = PARAM_VALUE (MIN_PARTITION_SIZE);
+	  if (partition_size < param_min_partition_size)
+	    partition_size = param_min_partition_size;
 	  npartitions ++;
 	}
     }
diff --git a/gcc/lto/lto.c b/gcc/lto/lto.c
index 86d58010e0e..ef0923c7791 100644
--- a/gcc/lto/lto.c
+++ b/gcc/lto/lto.c
@@ -436,14 +436,14 @@  do_whole_program_analysis (void)
 
   /* TODO: jobserver communication is not supported, yet.  */
   if (!strcmp (flag_wpa, "jobserver"))
-    lto_parallelism = PARAM_VALUE (PARAM_MAX_LTO_STREAMING_PARALLELISM);
+    lto_parallelism = param_max_lto_streaming_parallelism;
   else
     {
       lto_parallelism = atoi (flag_wpa);
       if (lto_parallelism <= 0)
 	lto_parallelism = 0;
-      if (lto_parallelism >= PARAM_VALUE (PARAM_MAX_LTO_STREAMING_PARALLELISM))
-	lto_parallelism = PARAM_VALUE (PARAM_MAX_LTO_STREAMING_PARALLELISM);
+      if (lto_parallelism >= param_max_lto_streaming_parallelism)
+	lto_parallelism = param_max_lto_streaming_parallelism;
     }
 
   timevar_start (TV_PHASE_OPT_GEN);
@@ -492,8 +492,8 @@  do_whole_program_analysis (void)
   else if (flag_lto_partition == LTO_PARTITION_ONE)
     lto_balanced_map (1, INT_MAX);
   else if (flag_lto_partition == LTO_PARTITION_BALANCED)
-    lto_balanced_map (PARAM_VALUE (PARAM_LTO_PARTITIONS),
-		      PARAM_VALUE (MAX_PARTITION_SIZE));
+    lto_balanced_map (param_lto_partitions,
+		      param_max_partition_size);
   else
     gcc_unreachable ();
 
diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c
index c355594bb6b..3127fb1481e 100644
--- a/gcc/modulo-sched.c
+++ b/gcc/modulo-sched.c
@@ -1433,7 +1433,7 @@  sms_schedule (void)
       if ( latch_edge->count () > profile_count::zero ()
           && (latch_edge->count()
 	      < single_exit (loop)->count ().apply_scale
-				 (SMS_LOOP_AVERAGE_COUNT_THRESHOLD, 1)))
+				 (param_sms_loop_average_count_threshold, 1)))
 	{
 	  if (dump_file)
 	    {
@@ -1640,7 +1640,7 @@  sms_schedule (void)
 	  /* The default value of PARAM_SMS_MIN_SC is 2 as stage count of
 	     1 means that there is no interleaving between iterations thus
 	     we let the scheduling passes do the job in this case.  */
-	  if (stage_count < PARAM_VALUE (PARAM_SMS_MIN_SC)
+	  if (stage_count < param_sms_min_sc
 	      || (count_init && (loop_count <= stage_count))
 	      || (max_trip_count >= 0 && max_trip_count <= stage_count)
 	      || (trip_count >= 0 && trip_count <= stage_count))
@@ -1832,7 +1832,7 @@  sms_schedule (void)
 /* A limit on the number of cycles that resource conflicts can span.  ??? Should
    be provided by DFA, and be dependent on the type of insn scheduled.  Currently
    set to 0 to save compile time.  */
-#define DFA_HISTORY SMS_DFA_HISTORY
+#define DFA_HISTORY param_sms_dfa_history
 
 /* A threshold for the number of repeated unsuccessful attempts to insert
    an empty row, before we flush the partial schedule and start over.  */
diff --git a/gcc/opts.c b/gcc/opts.c
index 394cbfd1c56..a47517df80e 100644
--- a/gcc/opts.c
+++ b/gcc/opts.c
@@ -667,25 +667,18 @@  default_options_optimization (struct gcc_options *opts,
     opts->x_flag_ipa_pta = true;
 
   /* Track fields in field-sensitive alias analysis.  */
-  maybe_set_param_value
-    (PARAM_MAX_FIELDS_FOR_FIELD_SENSITIVE,
-     opt2 ? 100 : default_param_value (PARAM_MAX_FIELDS_FOR_FIELD_SENSITIVE),
-     opts->x_param_values, opts_set->x_param_values);
+  if (opt2)
+    SET_OPTION_IF_UNSET (opts, opts_set, param_max_fields_for_field_sensitive,
+			 100);
 
   if (opts->x_optimize_size)
     /* We want to crossjump as much as possible.  */
-    maybe_set_param_value (PARAM_MIN_CROSSJUMP_INSNS, 1,
-			   opts->x_param_values, opts_set->x_param_values);
-  else
-    maybe_set_param_value (PARAM_MIN_CROSSJUMP_INSNS,
-			   default_param_value (PARAM_MIN_CROSSJUMP_INSNS),
-			   opts->x_param_values, opts_set->x_param_values);
+    SET_OPTION_IF_UNSET (opts, opts_set, param_min_crossjump_insns, 1);
 
   /* Restrict the amount of work combine does at -Og while retaining
      most of its useful transforms.  */
   if (opts->x_optimize_debug)
-    maybe_set_param_value (PARAM_MAX_COMBINE_INSNS, 2,
-			   opts->x_param_values, opts_set->x_param_values);
+    SET_OPTION_IF_UNSET (opts, opts_set, param_max_combine_insns, 2);
 
   /* Allow default optimizations to be specified on a per-machine basis.  */
   maybe_default_options (opts, opts_set,
@@ -1036,10 +1029,8 @@  finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
 
   if (opts->x_flag_conserve_stack)
     {
-      maybe_set_param_value (PARAM_LARGE_STACK_FRAME, 100,
-			     opts->x_param_values, opts_set->x_param_values);
-      maybe_set_param_value (PARAM_STACK_FRAME_GROWTH, 40,
-			     opts->x_param_values, opts_set->x_param_values);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_large_stack_frame, 100);
+      SET_OPTION_IF_UNSET (opts, opts_set, param_stack_frame_growth, 40);
     }
 
   if (opts->x_flag_lto)
@@ -2272,19 +2263,13 @@  common_handle_option (struct gcc_options *opts,
 	 all features.  */
       if (opts->x_flag_sanitize & SANITIZE_KERNEL_ADDRESS)
 	{
-	  maybe_set_param_value (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
-				 0, opts->x_param_values,
-				 opts_set->x_param_values);
-	  maybe_set_param_value (PARAM_ASAN_GLOBALS, 0, opts->x_param_values,
-				 opts_set->x_param_values);
-	  maybe_set_param_value (PARAM_ASAN_STACK, 0, opts->x_param_values,
-				 opts_set->x_param_values);
-	  maybe_set_param_value (PARAM_ASAN_PROTECT_ALLOCAS, 0,
-				 opts->x_param_values,
-				 opts_set->x_param_values);
-	  maybe_set_param_value (PARAM_ASAN_USE_AFTER_RETURN, 0,
-				 opts->x_param_values,
-				 opts_set->x_param_values);
+	  SET_OPTION_IF_UNSET (opts, opts_set,
+			       param_asan_instrumentation_with_call_threshold,
+			       0);
+	  SET_OPTION_IF_UNSET (opts, opts_set, param_asan_globals, 0);
+	  SET_OPTION_IF_UNSET (opts, opts_set, param_asan_stack, 0);
+	  SET_OPTION_IF_UNSET (opts, opts_set, param_asan_protect_allocas, 0);
+	  SET_OPTION_IF_UNSET (opts, opts_set, param_asan_use_after_return, 0);
 	}
       break;
 
@@ -2586,9 +2571,8 @@  common_handle_option (struct gcc_options *opts,
       enable_fdo_optimizations (opts, opts_set, value);
       if (!opts_set->x_flag_profile_correction)
 	opts->x_flag_profile_correction = value;
-      maybe_set_param_value (
-	PARAM_EARLY_INLINER_MAX_ITERATIONS, 10,
-	opts->x_param_values, opts_set->x_param_values);
+      SET_OPTION_IF_UNSET (opts, opts_set,
+			   param_early_inliner_max_iterations, 10);
       break;
 
     case OPT_fprofile_generate_:
diff --git a/gcc/postreload-gcse.c b/gcc/postreload-gcse.c
index 0c12b3808a6..83df1825f16 100644
--- a/gcc/postreload-gcse.c
+++ b/gcc/postreload-gcse.c
@@ -1146,17 +1146,17 @@  eliminate_partially_redundant_load (basic_block bb, rtx_insn *insn,
 
   /* Check if it's worth applying the partial redundancy elimination.  */
   if (ok_count.to_gcov_type ()
-      < GCSE_AFTER_RELOAD_PARTIAL_FRACTION * not_ok_count.to_gcov_type ())
+      < param_gcse_after_reload_partial_fraction * not_ok_count.to_gcov_type ())
     goto cleanup;
 
   gcov_type threshold;
 #if (GCC_VERSION >= 5000)
-  if (__builtin_mul_overflow (GCSE_AFTER_RELOAD_CRITICAL_FRACTION,
+  if (__builtin_mul_overflow (param_gcse_after_reload_critical_fraction,
 			      critical_count.to_gcov_type (), &threshold))
     threshold = profile_count::max_count;
 #else
   threshold
-    = GCSE_AFTER_RELOAD_CRITICAL_FRACTION * critical_count.to_gcov_type ();
+    = param_gcse_after_reload_critical_fraction * critical_count.to_gcov_type ();
 #endif
 
   if (ok_count.to_gcov_type () < threshold)
diff --git a/gcc/predict.c b/gcc/predict.c
index 915f0806b11..5ee56a33fd7 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -132,7 +132,7 @@  get_hot_bb_threshold ()
 {
   if (min_count == -1)
     {
-      const int hot_frac = PARAM_VALUE (HOT_BB_COUNT_FRACTION);
+      const int hot_frac = param_hot_bb_count_fraction;
       const gcov_type min_hot_count
 	= hot_frac
 	  ? profile_info->sum_max / hot_frac
@@ -177,7 +177,7 @@  maybe_hot_count_p (struct function *fun, profile_count count)
       if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
 	  && count < (ENTRY_BLOCK_PTR_FOR_FN (fun)->count.apply_scale (2, 3)))
 	return false;
-      if (count.apply_scale (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION), 1)
+      if (count.apply_scale (param_hot_bb_frequency_fraction, 1)
 	  < ENTRY_BLOCK_PTR_FOR_FN (fun)->count)
 	return false;
       return true;
@@ -223,7 +223,7 @@  probably_never_executed (struct function *fun, profile_count count)
      desirable.  */
   if (count.precise_p () && profile_status_for_fn (fun) == PROFILE_READ)
     {
-      const int unlikely_frac = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
+      const int unlikely_frac = param_unlikely_bb_count_fraction;
       if (count.apply_scale (unlikely_frac, 1) >= profile_info->runs)
 	return false;
       return true;
@@ -412,9 +412,9 @@  predictable_edge_p (edge e)
   if (!e->probability.initialized_p ())
     return false;
   if ((e->probability.to_reg_br_prob_base ()
-       <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100)
+       <= param_predictable_branch_outcome * REG_BR_PROB_BASE / 100)
       || (REG_BR_PROB_BASE - e->probability.to_reg_br_prob_base ()
-          <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100))
+	  <= param_predictable_branch_outcome * REG_BR_PROB_BASE / 100))
     return true;
   return false;
 }
@@ -1963,7 +1963,7 @@  predict_loops (void)
 	{
 	  tree niter = NULL;
 	  HOST_WIDE_INT nitercst;
-	  int max = PARAM_VALUE (PARAM_MAX_PREDICTED_ITERATIONS);
+	  int max = param_max_predicted_iterations;
 	  int probability;
 	  enum br_predictor predictor;
 	  widest_int nit;
@@ -2443,7 +2443,7 @@  expr_expected_value_1 (tree type, tree op0, enum tree_code code,
 		  *predictor = (enum br_predictor) tree_to_uhwi (val2);
 		  if (*predictor == PRED_BUILTIN_EXPECT)
 		    *probability
-		      = HITRATE (PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY));
+		      = HITRATE (param_builtin_expect_probability);
 		  return gimple_call_arg (def, 1);
 		}
 	      return NULL;
@@ -2469,7 +2469,7 @@  expr_expected_value_1 (tree type, tree op0, enum tree_code code,
 		    return val;
 		  *predictor = PRED_BUILTIN_EXPECT;
 		  *probability
-		    = HITRATE (PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY));
+		    = HITRATE (param_builtin_expect_probability);
 		  return gimple_call_arg (def, 1);
 		}
 	      case BUILT_IN_EXPECT_WITH_PROBABILITY:
@@ -2660,7 +2660,7 @@  tree_predict_by_opcode (basic_block bb)
 	  edge e = find_taken_edge_switch_expr (sw, val);
 	  if (predictor == PRED_BUILTIN_EXPECT)
 	    {
-	      int percent = PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY);
+	      int percent = param_builtin_expect_probability;
 	      gcc_assert (percent >= 0 && percent <= 100);
 	      predict_edge (e, PRED_BUILTIN_EXPECT,
 			    HITRATE (percent));
@@ -3531,7 +3531,7 @@  drop_profile (struct cgraph_node *node, profile_count call_count)
 void
 handle_missing_profiles (void)
 {
-  const int unlikely_frac = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
+  const int unlikely_frac = param_unlikely_bb_count_fraction;
   struct cgraph_node *node;
   auto_vec<struct cgraph_node *, 64> worklist;
 
diff --git a/gcc/reload.c b/gcc/reload.c
index 8582b4840ae..b8178238ff9 100644
--- a/gcc/reload.c
+++ b/gcc/reload.c
@@ -6717,7 +6717,7 @@  find_equiv_reg (rtx goal, rtx_insn *insn, enum reg_class rclass, int other,
 	continue;
       num++;
       if (p == 0 || LABEL_P (p)
-	  || num > PARAM_VALUE (PARAM_MAX_RELOAD_SEARCH_INSNS))
+	  || num > param_max_reload_search_insns)
 	return 0;
 
       /* Don't reuse register contents from before a setjmp-type
diff --git a/gcc/reorg.c b/gcc/reorg.c
index cba183e9c72..460741213f2 100644
--- a/gcc/reorg.c
+++ b/gcc/reorg.c
@@ -1489,7 +1489,7 @@  redundant_insn (rtx insn, rtx_insn *target, const vec<rtx_insn *> &delay_list)
 
   /* Scan backwards looking for a match.  */
   for (trial = PREV_INSN (target),
-	 insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
+	 insns_to_search = param_max_delay_slot_insn_search;
        trial && insns_to_search > 0;
        trial = PREV_INSN (trial))
     {
@@ -1593,7 +1593,7 @@  redundant_insn (rtx insn, rtx_insn *target, const vec<rtx_insn *> &delay_list)
      INSN sets or sets something insn uses or sets.  */
 
   for (trial = PREV_INSN (target),
-	 insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH;
+	 insns_to_search = param_max_delay_slot_insn_search;
        trial && !LABEL_P (trial) && insns_to_search > 0;
        trial = PREV_INSN (trial))
     {
diff --git a/gcc/resource.c b/gcc/resource.c
index bf2d6beaf39..5d16100ef48 100644
--- a/gcc/resource.c
+++ b/gcc/resource.c
@@ -928,7 +928,7 @@  mark_target_live_regs (rtx_insn *insns, rtx target_maybe_return, struct resource
     }
 
   if (b == -1)
-    b = find_basic_block (target, MAX_DELAY_SLOT_LIVE_SEARCH);
+    b = find_basic_block (target, param_max_delay_slot_live_search);
 
   if (target_hash_table != NULL)
     {
@@ -1289,7 +1289,7 @@  clear_hashed_info_for_insn (rtx_insn *insn)
 void
 incr_ticks_for_insn (rtx_insn *insn)
 {
-  int b = find_basic_block (insn, MAX_DELAY_SLOT_LIVE_SEARCH);
+  int b = find_basic_block (insn, param_max_delay_slot_live_search);
 
   if (b != -1)
     bb_ticks[b]++;
diff --git a/gcc/sanopt.c b/gcc/sanopt.c
index 00ade872832..7a4daeaf458 100644
--- a/gcc/sanopt.c
+++ b/gcc/sanopt.c
@@ -1289,8 +1289,8 @@  pass_sanopt::execute (function *fun)
   if (asan_sanitize_stack_p ())
     sanitize_rewrite_addressable_params (fun);
 
-  bool use_calls = ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD < INT_MAX
-    && asan_num_accesses >= ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD;
+  bool use_calls = param_asan_instrumentation_with_call_threshold < INT_MAX
+    && asan_num_accesses >= param_asan_instrumentation_with_call_threshold;
 
   hash_map<tree, tree> shadow_vars_mapping;
   bool need_commit_edge_insert = false;
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 308db4e3ca0..8b544b88592 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -2480,7 +2480,7 @@  sched_analyze_1 (class deps_desc *deps, rtx x, rtx_insn *insn)
       /* Pending lists can't get larger with a readonly context.  */
       if (!deps->readonly
           && ((deps->pending_read_list_length + deps->pending_write_list_length)
-              >= MAX_PENDING_LIST_LENGTH))
+              >= param_max_pending_list_length))
 	{
 	  /* Flush all pending reads and writes to prevent the pending lists
 	     from getting any larger.  Insn scheduling runs too slowly when
@@ -2697,7 +2697,7 @@  sched_analyze_2 (class deps_desc *deps, rtx x, rtx_insn *insn)
 	  {
 	    if ((deps->pending_read_list_length
 		 + deps->pending_write_list_length)
-		>= MAX_PENDING_LIST_LENGTH
+		>= param_max_pending_list_length
 		&& !DEBUG_INSN_P (insn))
 	      flush_pending_lists (deps, insn, true, true);
 	    add_insn_mem_dependence (deps, true, insn, x);
@@ -3222,8 +3222,8 @@  sched_analyze_insn (class deps_desc *deps, rtx x, rtx_insn *insn)
 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
 	    {
 	      struct deps_reg *reg_last = &deps->reg_last[i];
-	      if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
-		  || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
+	      if (reg_last->uses_length >= param_max_pending_list_length
+		  || reg_last->clobbers_length >= param_max_pending_list_length)
 		{
 		  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
 						REG_DEP_OUTPUT, false);
@@ -3679,7 +3679,7 @@  deps_analyze_insn (class deps_desc *deps, rtx_insn *insn)
                && sel_insn_is_speculation_check (insn)))
         {
           /* Keep the list a reasonable size.  */
-          if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
+          if (deps->pending_flush_length++ >= param_max_pending_list_length)
             flush_pending_lists (deps, insn, true, true);
           else
 	    deps->pending_jump_insns
diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c
index a594b49ec66..c60afa340e7 100644
--- a/gcc/sched-ebb.c
+++ b/gcc/sched-ebb.c
@@ -620,9 +620,9 @@  schedule_ebbs (void)
     return;
 
   if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
-    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+    probability_cutoff = param_tracer_min_branch_probability_feedback;
   else
-    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+    probability_cutoff = param_tracer_min_branch_probability;
   probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
 
   schedule_ebbs_init ();
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index 3e0825075a3..99b6619607b 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -485,9 +485,9 @@  find_single_block_region (bool ebbs_p)
   if (ebbs_p) {
     int probability_cutoff;
     if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
-      probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+      probability_cutoff = param_tracer_min_branch_probability_feedback;
     else
-      probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+      probability_cutoff = param_tracer_min_branch_probability;
     probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
 
     FOR_EACH_BB_FN (ebb_start, cfun)
@@ -569,8 +569,8 @@  too_large (int block, int *num_bbs, int *num_insns)
   (*num_insns) += (common_sched_info->estimate_number_of_insns
                    (BASIC_BLOCK_FOR_FN (cfun, block)));
 
-  return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
-	  || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
+  return ((*num_bbs > param_max_sched_region_blocks)
+	  || (*num_insns > param_max_sched_region_insns));
 }
 
 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
@@ -800,7 +800,7 @@  haifa_find_rgns (void)
 
       queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
 
-      extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
+      extend_regions_p = param_max_sched_extend_regions_iters > 0;
       if (extend_regions_p)
         {
           degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun));
@@ -1161,7 +1161,7 @@  extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
   int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
   int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
 
-  max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
+  max_iter = param_max_sched_extend_regions_iters;
 
   max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
 
@@ -2224,7 +2224,7 @@  new_ready (rtx_insn *next, ds_t ts)
 	  || (IS_SPECULATIVE_INSN (next)
 	      && ((recog_memoized (next) >= 0
 		   && min_insn_conflict_delay (curr_state, next, next)
-                   > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
+                   > param_max_sched_insn_conflict_delay)
                   || IS_SPECULATION_CHECK_P (next)
 		  || !check_live (next, INSN_BB (next))
 		  || (not_ex_free = !is_exception_free (next, INSN_BB (next),
@@ -3189,7 +3189,7 @@  schedule_region (int rgn)
 	  if (f
 	      && (!f->probability.initialized_p ()
 		  || f->probability.to_reg_br_prob_base () * 100 / REG_BR_PROB_BASE >=
-	             PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF)))
+	             param_sched_state_edge_prob_cutoff))
 	    {
 	      memcpy (bb_state[f->dest->index], curr_state,
 		      dfa_state_size);
@@ -3229,7 +3229,7 @@  schedule_region (int rgn)
 void
 sched_rgn_init (bool single_blocks_p)
 {
-  min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
+  min_spec_prob = ((param_min_spec_prob * REG_BR_PROB_BASE)
 		    / 100);
 
   nr_inter = 0;
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index 8a1d41473b9..d6513b136a0 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -6012,7 +6012,7 @@  make_region_from_loop (class loop *loop)
   basic_block preheader_block;
 
   if (loop->num_nodes
-      > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
+      > (unsigned) param_max_pipeline_region_blocks)
     return -1;
 
   /* Don't pipeline loops whose latch belongs to some of its inner loops.  */
@@ -6021,7 +6021,7 @@  make_region_from_loop (class loop *loop)
       return -1;
 
   loop->ninsns = num_loop_insns (loop);
-  if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
+  if ((int) loop->ninsns > param_max_pipeline_region_insns)
     return -1;
 
   loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
diff --git a/gcc/sel-sched-ir.h b/gcc/sel-sched-ir.h
index ddc76a73ede..531b0129cd9 100644
--- a/gcc/sel-sched-ir.h
+++ b/gcc/sel-sched-ir.h
@@ -969,7 +969,7 @@  extern bool preheader_removed;
 /* Software lookahead window size.
    According to the results in Nakatani and Ebcioglu [1993], window size of 16
    is enough to extract most ILP in integer code.  */
-#define MAX_WS (PARAM_VALUE (PARAM_SELSCHED_MAX_LOOKAHEAD))
+#define MAX_WS (param_selsched_max_lookahead)
 
 extern regset sel_all_regs;
 
diff --git a/gcc/sel-sched.c b/gcc/sel-sched.c
index 652784e79ed..2ecc06d0f17 100644
--- a/gcc/sel-sched.c
+++ b/gcc/sel-sched.c
@@ -3454,7 +3454,7 @@  process_pipelined_exprs (av_set_t *av_ptr)
   FOR_EACH_EXPR_1 (expr, si, av_ptr)
     {
       if (EXPR_SCHED_TIMES (expr)
-	  >= PARAM_VALUE (PARAM_SELSCHED_MAX_SCHED_TIMES))
+	  >= param_selsched_max_sched_times)
 	av_set_iter_remove (&si);
     }
 }
@@ -6806,7 +6806,7 @@  sel_setup_region_sched_flags (void)
                   && (flag_sel_sched_pipelining != 0)
 		  && current_loop_nest != NULL
 		  && loop_has_exit_edges (current_loop_nest));
-  max_insns_to_rename = PARAM_VALUE (PARAM_SELSCHED_INSNS_TO_RENAME);
+  max_insns_to_rename = param_selsched_insns_to_rename;
   max_ws = MAX_WS;
 }
 
diff --git a/gcc/shrink-wrap.c b/gcc/shrink-wrap.c
index 2dc92c34b17..e612f85c15a 100644
--- a/gcc/shrink-wrap.c
+++ b/gcc/shrink-wrap.c
@@ -775,7 +775,7 @@  try_shrink_wrapping (edge *entry_edge, rtx_insn *prologue_seq)
   vec.quick_push (pro);
 
   unsigned max_grow_size = get_uncond_jump_length ();
-  max_grow_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
+  max_grow_size *= param_max_grow_copy_bb_insns;
 
   while (!vec.is_empty () && pro != entry)
     {
diff --git a/gcc/targhooks.c b/gcc/targhooks.c
index fee4cc271cd..2f26e468b8a 100644
--- a/gcc/targhooks.c
+++ b/gcc/targhooks.c
@@ -2274,17 +2274,18 @@  default_max_noce_ifcvt_seq_cost (edge e)
 {
   bool predictable_p = predictable_edge_p (e);
 
-  enum compiler_param param
-    = (predictable_p
-       ? PARAM_MAX_RTL_IF_CONVERSION_PREDICTABLE_COST
-       : PARAM_MAX_RTL_IF_CONVERSION_UNPREDICTABLE_COST);
-
-  /* If we have a parameter set, use that, otherwise take a guess using
-     BRANCH_COST.  */
-  if (global_options_set.x_param_values[param])
-    return PARAM_VALUE (param);
+  if (predictable_p)
+    {
+      if (global_options_set.x_param_max_rtl_if_conversion_predictable_cost)
+	return param_max_rtl_if_conversion_predictable_cost;
+    }
   else
-    return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
+    {
+      if (global_options_set.x_param_max_rtl_if_conversion_unpredictable_cost)
+	return param_max_rtl_if_conversion_unpredictable_cost;
+    }
+
+  return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
 }
 
 /* Default implementation of TARGET_MIN_ARITHMETIC_PRECISION.  */
diff --git a/gcc/toplev.c b/gcc/toplev.c
index 18fea1c3dd1..a836646f8a1 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -679,7 +679,7 @@  print_version (FILE *file, const char *indent, bool show_global_state)
       fprintf (file,
 	       file == stderr ? _(fmt4) : fmt4,
 	       indent, *indent != 0 ? " " : "",
-	       PARAM_VALUE (GGC_MIN_EXPAND), PARAM_VALUE (GGC_MIN_HEAPSIZE));
+	       param_ggc_min_expand, param_ggc_min_heapsize);
 
       print_plugins_versions (file, indent);
     }
@@ -1863,7 +1863,7 @@  process_options (void)
 
   if (flag_checking >= 2)
     hash_table_sanitize_eq_limit
-      = PARAM_VALUE (PARAM_HASH_TABLE_VERIFICATION_LIMIT);
+      = param_hash_table_verification_limit;
 
   /* Please don't change global_options after this point, those changes won't
      be reflected in optimization_{default,current}_node.  */
diff --git a/gcc/tracer.c b/gcc/tracer.c
index 52f07c56f03..02203923369 100644
--- a/gcc/tracer.c
+++ b/gcc/tracer.c
@@ -276,13 +276,13 @@  tail_duplicate (void)
   initialize_original_copy_tables ();
 
   if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
-    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
+    probability_cutoff = param_tracer_min_branch_probability_feedback;
   else
-    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
+    probability_cutoff = param_tracer_min_branch_probability;
   probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
 
   branch_ratio_cutoff =
-    (REG_BR_PROB_BASE / 100 * PARAM_VALUE (TRACER_MIN_BRANCH_RATIO));
+    (REG_BR_PROB_BASE / 100 * param_tracer_min_branch_ratio);
 
   FOR_EACH_BB_FN (bb, cfun)
     {
@@ -296,11 +296,11 @@  tail_duplicate (void)
     }
 
   if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
-    cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE_FEEDBACK);
+    cover_insns = param_tracer_dynamic_coverage_feedback;
   else
-    cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE);
+    cover_insns = param_tracer_dynamic_coverage;
   cover_insns = (weighted_insns * cover_insns + 50) / 100;
-  max_dup_insns = (ninsns * PARAM_VALUE (TRACER_MAX_CODE_GROWTH) + 50) / 100;
+  max_dup_insns = (ninsns * param_tracer_max_code_growth + 50) / 100;
 
   while (traced_insns < cover_insns && nduplicated < max_dup_insns
          && !heap.empty ())
diff --git a/gcc/trans-mem.c b/gcc/trans-mem.c
index 2e775286540..4a0f6e7dee9 100644
--- a/gcc/trans-mem.c
+++ b/gcc/trans-mem.c
@@ -1108,7 +1108,7 @@  tm_log_add (basic_block entry_block, tree addr, gimple *stmt)
 	  && TYPE_SIZE_UNIT (type) != NULL
 	  && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
 	  && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type))
-	      < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
+	      < param_tm_max_aggregate_size)
 	  /* We must be able to copy this type normally.  I.e., no
 	     special constructors and the like.  */
 	  && !TREE_ADDRESSABLE (type))
diff --git a/gcc/tree-chrec.c b/gcc/tree-chrec.c
index 5ed62260993..8d5fce30289 100644
--- a/gcc/tree-chrec.c
+++ b/gcc/tree-chrec.c
@@ -333,9 +333,9 @@  chrec_fold_plus_1 (enum tree_code code, tree type,
 	    int size = 0;
 	    if ((tree_contains_chrecs (op0, &size)
 		 || tree_contains_chrecs (op1, &size))
-		&& size < PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
+		&& size < param_scev_max_expr_size)
 	      return build2 (code, type, op0, op1);
-	    else if (size < PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
+	    else if (size < param_scev_max_expr_size)
 	      {
 		if (code == POINTER_PLUS_EXPR)
 		  return fold_build_pointer_plus (fold_convert (type, op0),
diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c
index 7f75b7e3afe..e9fa4ae69c3 100644
--- a/gcc/tree-data-ref.c
+++ b/gcc/tree-data-ref.c
@@ -836,7 +836,7 @@  split_constant_offset (tree exp, tree *var, tree *off,
 void
 split_constant_offset (tree exp, tree *var, tree *off)
 {
-  unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
+  unsigned limit = param_ssa_name_def_chain_limit;
   static hash_map<tree, std::pair<tree, tree> > *cache;
   if (!cache)
     cache = new hash_map<tree, std::pair<tree, tree> > (37);
@@ -4917,7 +4917,7 @@  compute_all_dependences (vec<data_reference_p> datarefs,
   unsigned int i, j;
 
   if ((int) datarefs.length ()
-      > PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
+      > param_loop_max_datarefs_for_datadeps)
     {
       struct data_dependence_relation *ddr;
 
diff --git a/gcc/tree-if-conv.c b/gcc/tree-if-conv.c
index df9046a3014..09560198539 100644
--- a/gcc/tree-if-conv.c
+++ b/gcc/tree-if-conv.c
@@ -125,7 +125,7 @@  along with GCC; see the file COPYING3.  If not see
 /* Only handle PHIs with no more arguments unless we are asked to by
    simd pragma.  */
 #define MAX_PHI_ARG_NUM \
-  ((unsigned) PARAM_VALUE (PARAM_MAX_TREE_IF_CONVERSION_PHI_ARGS))
+  ((unsigned) param_max_tree_if_conversion_phi_args)
 
 /* True if we've converted a statement that was only executed when some
    condition C was true, and if for correctness we need to predicate the
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index 2b8b9ee58c1..b1b6dca9070 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -1812,7 +1812,7 @@  remap_gimple_stmt (gimple *stmt, copy_body_data *id)
 	  /* If the inlined function has too many debug markers,
 	     don't copy them.  */
 	  if (id->src_cfun->debug_marker_count
-	      > PARAM_VALUE (PARAM_MAX_DEBUG_MARKER_COUNT))
+	      > param_max_debug_marker_count)
 	    return stmts;
 
 	  gdebug *copy = as_a <gdebug *> (gimple_copy (stmt));
diff --git a/gcc/tree-loop-distribution.c b/gcc/tree-loop-distribution.c
index 81784866ad1..9930daaafa9 100644
--- a/gcc/tree-loop-distribution.c
+++ b/gcc/tree-loop-distribution.c
@@ -119,7 +119,7 @@  along with GCC; see the file COPYING3.  If not see
 
 
 #define MAX_DATAREFS_NUM \
-	((unsigned) PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
+	((unsigned) param_loop_max_datarefs_for_datadeps)
 
 /* Threshold controlling number of distributed partitions.  Given it may
    be unnecessary if a memory stream cost model is invented in the future,
diff --git a/gcc/tree-parloops.c b/gcc/tree-parloops.c
index ae880e151db..1a35c7dbdc3 100644
--- a/gcc/tree-parloops.c
+++ b/gcc/tree-parloops.c
@@ -890,7 +890,7 @@  parloops_force_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info
 
 /* Minimal number of iterations of a loop that should be executed in each
    thread.  */
-#define MIN_PER_THREAD PARAM_VALUE (PARAM_PARLOOPS_MIN_PER_THREAD)
+#define MIN_PER_THREAD param_parloops_min_per_thread
 
 /* Element of the hashtable, representing a
    reduction in the current loop.  */
@@ -2875,25 +2875,23 @@  create_parallel_loop (class loop *loop, tree loop_fn, tree data,
   else
     {
       t = build_omp_clause (loc, OMP_CLAUSE_SCHEDULE);
-      int chunk_size = PARAM_VALUE (PARAM_PARLOOPS_CHUNK_SIZE);
-      enum PARAM_PARLOOPS_SCHEDULE_KIND schedule_type \
-	= (enum PARAM_PARLOOPS_SCHEDULE_KIND) PARAM_VALUE (PARAM_PARLOOPS_SCHEDULE);
-      switch (schedule_type)
+      int chunk_size = param_parloops_chunk_size;
+      switch (param_parloops_schedule)
 	{
-	case PARAM_PARLOOPS_SCHEDULE_KIND_static:
+	case PARLOOPS_SCHEDULE_STATIC:
 	  OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_STATIC;
 	  break;
-	case PARAM_PARLOOPS_SCHEDULE_KIND_dynamic:
+	case PARLOOPS_SCHEDULE_DYNAMIC:
 	  OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
 	  break;
-	case PARAM_PARLOOPS_SCHEDULE_KIND_guided:
+	case PARLOOPS_SCHEDULE_GUIDED:
 	  OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_GUIDED;
 	  break;
-	case PARAM_PARLOOPS_SCHEDULE_KIND_auto:
+	case PARLOOPS_SCHEDULE_AUTO:
 	  OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_AUTO;
 	  chunk_size = 0;
 	  break;
-	case PARAM_PARLOOPS_SCHEDULE_KIND_runtime:
+	case PARLOOPS_SCHEDULE_RUNTIME:
 	  OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_RUNTIME;
 	  chunk_size = 0;
 	  break;
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index 299c45e287b..3fe2a6dcb0a 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -2194,7 +2194,7 @@  determine_unroll_factor (vec<chain_p> chains)
 {
   chain_p chain;
   unsigned factor = 1, af, nfactor, i;
-  unsigned max = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
+  unsigned max = param_max_unroll_times;
 
   FOR_EACH_VEC_ELT (chains, i, chain)
     {
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c
index 50b2700834e..ed5d5663a3a 100644
--- a/gcc/tree-scalar-evolution.c
+++ b/gcc/tree-scalar-evolution.c
@@ -1149,7 +1149,7 @@  tail_recurse:
 	return t_false;
 
       /* Give up if the path is longer than the MAX that we allow.  */
-      if (limit > PARAM_VALUE (PARAM_SCEV_MAX_EXPR_COMPLEXITY))
+      if (limit > param_scev_max_expr_complexity)
 	{
 	  *evolution_of_loop = chrec_dont_know;
 	  return t_dont_know;
@@ -2623,7 +2623,7 @@  instantiate_scev_r (edge instantiate_below,
 		    bool *fold_conversions, int size_expr)
 {
   /* Give up if the expression is larger than the MAX that we allow.  */
-  if (size_expr++ > PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
+  if (size_expr++ > param_scev_max_expr_size)
     return chrec_dont_know;
 
   if (chrec == NULL_TREE
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 44862690559..209f9718d33 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -2786,16 +2786,21 @@  analyze_all_variable_accesses (void)
   unsigned i;
   bool optimize_speed_p = !optimize_function_for_size_p (cfun);
 
-  enum compiler_param param = optimize_speed_p
-			? PARAM_SRA_MAX_SCALARIZATION_SIZE_SPEED
-			: PARAM_SRA_MAX_SCALARIZATION_SIZE_SIZE;
-
   /* If the user didn't set PARAM_SRA_MAX_SCALARIZATION_SIZE_<...>,
      fall back to a target default.  */
   unsigned HOST_WIDE_INT max_scalarization_size
-    = global_options_set.x_param_values[param]
-      ? PARAM_VALUE (param)
-      : get_move_ratio (optimize_speed_p) * UNITS_PER_WORD;
+    = get_move_ratio (optimize_speed_p) * UNITS_PER_WORD;
+
+  if (optimize_speed_p)
+    {
+      if (global_options_set.x_param_sra_max_scalarization_size_speed)
+	max_scalarization_size = param_sra_max_scalarization_size_speed;
+    }
+  else
+    {
+      if (global_options_set.x_param_sra_max_scalarization_size_size)
+	max_scalarization_size = param_sra_max_scalarization_size_size;
+    }
 
   max_scalarization_size *= BITS_PER_UNIT;
 
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index 567aef8bc26..335787e0517 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -2195,7 +2195,7 @@  fold_builtin_alloca_with_align (gimple *stmt)
   size = tree_to_uhwi (arg);
 
   /* Heuristic: don't fold large allocas.  */
-  threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
+  threshold = (unsigned HOST_WIDE_INT)param_large_stack_frame;
   /* In case the alloca is located at function entry, it has the same lifetime
      as a declared array, so we allow a larger size.  */
   block = gimple_block (stmt);
diff --git a/gcc/tree-ssa-dse.c b/gcc/tree-ssa-dse.c
index 21a15eef690..1b060d9e408 100644
--- a/gcc/tree-ssa-dse.c
+++ b/gcc/tree-ssa-dse.c
@@ -238,7 +238,7 @@  setup_live_bytes_from_ref (ao_ref *ref, sbitmap live_bytes)
   if (valid_ao_ref_for_dse (ref)
       && ref->size.is_constant (&const_size)
       && (const_size / BITS_PER_UNIT
-	  <= PARAM_VALUE (PARAM_DSE_MAX_OBJECT_SIZE)))
+	  <= param_dse_max_object_size))
     {
       bitmap_clear (live_bytes);
       bitmap_set_range (live_bytes, 0, const_size / BITS_PER_UNIT);
@@ -611,7 +611,7 @@  dse_optimize_redundant_stores (gimple *stmt)
   FOR_EACH_IMM_USE_STMT (use_stmt, ui, defvar)
     {
       /* Limit stmt walking.  */
-      if (++cnt > PARAM_VALUE (PARAM_DSE_MAX_ALIAS_QUERIES_PER_STORE))
+      if (++cnt > param_dse_max_alias_queries_per_store)
 	BREAK_FROM_IMM_USE_STMT (ui);
 
       /* If USE_STMT stores 0 into one or more of the same locations
@@ -704,7 +704,7 @@  dse_classify_store (ao_ref *ref, gimple *stmt,
       FOR_EACH_IMM_USE_STMT (use_stmt, ui, defvar)
 	{
 	  /* Limit stmt walking.  */
-	  if (++cnt > PARAM_VALUE (PARAM_DSE_MAX_ALIAS_QUERIES_PER_STORE))
+	  if (++cnt > param_dse_max_alias_queries_per_store)
 	    {
 	      fail = true;
 	      BREAK_FROM_IMM_USE_STMT (ui);
@@ -853,7 +853,7 @@  class dse_dom_walker : public dom_walker
 public:
   dse_dom_walker (cdi_direction direction)
     : dom_walker (direction),
-    m_live_bytes (PARAM_VALUE (PARAM_DSE_MAX_OBJECT_SIZE)),
+    m_live_bytes (param_dse_max_object_size),
     m_byte_tracking_enabled (false) {}
 
   virtual edge before_dom_children (basic_block);
diff --git a/gcc/tree-ssa-ifcombine.c b/gcc/tree-ssa-ifcombine.c
index 21c1b0e8918..fa3bc0a4377 100644
--- a/gcc/tree-ssa-ifcombine.c
+++ b/gcc/tree-ssa-ifcombine.c
@@ -565,9 +565,9 @@  ifcombine_ifandif (basic_block inner_cond_bb, bool inner_inv,
 	  tree t1, t2;
 	  gimple_stmt_iterator gsi;
 	  bool logical_op_non_short_circuit = LOGICAL_OP_NON_SHORT_CIRCUIT;
-	  if (PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT) != -1)
+	  if (param_logical_op_non_short_circuit != -1)
 	    logical_op_non_short_circuit
-	      = PARAM_VALUE (PARAM_LOGICAL_OP_NON_SHORT_CIRCUIT);
+	      = param_logical_op_non_short_circuit;
 	  if (!logical_op_non_short_circuit || flag_sanitize_coverage)
 	    return false;
 	  /* Only do this optimization if the inner bb contains only the conditional. */
diff --git a/gcc/tree-ssa-loop-ch.c b/gcc/tree-ssa-loop-ch.c
index 5a30a296d5e..fd6d74d5891 100644
--- a/gcc/tree-ssa-loop-ch.c
+++ b/gcc/tree-ssa-loop-ch.c
@@ -368,7 +368,7 @@  ch_base::copy_headers (function *fun)
 
   FOR_EACH_LOOP (loop, 0)
     {
-      int initial_limit = PARAM_VALUE (PARAM_MAX_LOOP_HEADER_INSNS);
+      int initial_limit = param_max_loop_header_insns;
       int remaining_limit = initial_limit;
       if (dump_file && (dump_flags & TDF_DETAILS))
 	fprintf (dump_file,
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index 78664188c45..cd1aa563e21 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -230,7 +230,7 @@  static bool ref_indep_loop_p (class loop *, im_mem_ref *);
 static bool ref_always_accessed_p (class loop *, im_mem_ref *, bool);
 
 /* Minimum cost of an expensive expression.  */
-#define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
+#define LIM_EXPENSIVE ((unsigned) param_lim_expensive)
 
 /* The outermost loop for which execution of the header guarantees that the
    block will be executed.  */
diff --git a/gcc/tree-ssa-loop-ivcanon.c b/gcc/tree-ssa-loop-ivcanon.c
index c505f85f91a..7b352431225 100644
--- a/gcc/tree-ssa-loop-ivcanon.c
+++ b/gcc/tree-ssa-loop-ivcanon.c
@@ -739,7 +739,7 @@  try_unroll_loop_completely (class loop *loop,
     return false;
 
   if (!loop->unroll
-      && n_unroll > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES))
+      && n_unroll > (unsigned) param_max_completely_peel_times)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
 	fprintf (dump_file, "Not unrolling loop %d "
@@ -780,7 +780,7 @@  try_unroll_loop_completely (class loop *loop,
 	  bool large
 	    = tree_estimate_loop_size
 		(loop, remove_exit ? exit : NULL, edge_to_cancel, &size,
-		 PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS));
+		 param_max_completely_peeled_insns);
 	  if (large)
 	    {
 	      if (dump_file && (dump_flags & TDF_DETAILS))
@@ -864,7 +864,7 @@  try_unroll_loop_completely (class loop *loop,
 	     blow the branch predictor tables.  Limit number of
 	     branches on the hot path through the peeled sequence.  */
 	  else if (size.num_branches_on_hot_path * (int)n_unroll
-		   > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES))
+		   > param_max_peel_branches)
 	    {
 	      if (dump_file && (dump_flags & TDF_DETAILS))
 		fprintf (dump_file, "Not unrolling loop %d: "
@@ -874,7 +874,7 @@  try_unroll_loop_completely (class loop *loop,
 	      return false;
 	    }
 	  else if (unr_insns
-		   > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))
+		   > (unsigned) param_max_completely_peeled_insns)
 	    {
 	      if (dump_file && (dump_flags & TDF_DETAILS))
 		fprintf (dump_file, "Not unrolling loop %d: "
@@ -998,7 +998,7 @@  try_peel_loop (class loop *loop,
   int peeled_size;
 
   if (!flag_peel_loops
-      || PARAM_VALUE (PARAM_MAX_PEEL_TIMES) <= 0
+      || param_max_peel_times <= 0
       || !peeled_loops)
     return false;
 
@@ -1057,7 +1057,7 @@  try_peel_loop (class loop *loop,
   /* We want to peel estimated number of iterations + 1 (so we never
      enter the loop on quick path).  Check against PARAM_MAX_PEEL_TIMES
      and be sure to avoid overflows.  */
-  if (npeel > PARAM_VALUE (PARAM_MAX_PEEL_TIMES) - 1)
+  if (npeel > param_max_peel_times - 1)
     {
       if (dump_file)
 	fprintf (dump_file, "Not peeling: rolls too much "
@@ -1068,9 +1068,9 @@  try_peel_loop (class loop *loop,
 
   /* Check peeled loops size.  */
   tree_estimate_loop_size (loop, exit, NULL, &size,
-			   PARAM_VALUE (PARAM_MAX_PEELED_INSNS));
+			   param_max_peeled_insns);
   if ((peeled_size = estimated_peeled_sequence_size (&size, (int) npeel))
-      > PARAM_VALUE (PARAM_MAX_PEELED_INSNS))
+      > param_max_peeled_insns)
     {
       if (dump_file)
 	fprintf (dump_file, "Not peeling: peeled sequence size is too large "
@@ -1502,7 +1502,7 @@  tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
         BITMAP_FREE (loop_closed_ssa_invalidated);
     }
   while (changed
-	 && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
+	 && ++iteration <= param_max_unroll_iterations);
 
   BITMAP_FREE (father_bbs);
 
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index 5938cfec08b..6d0b8fdefd7 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -151,8 +151,8 @@  avg_loop_niter (class loop *loop)
     {
       niter = likely_max_stmt_executions_int (loop);
 
-      if (niter == -1 || niter > PARAM_VALUE (PARAM_AVG_LOOP_NITER))
-	return PARAM_VALUE (PARAM_AVG_LOOP_NITER);
+      if (niter == -1 || niter > param_avg_loop_niter)
+	return param_avg_loop_niter;
     }
 
   return niter;
@@ -715,19 +715,19 @@  struct iv_ca_delta
 /* Bound on number of candidates below that all candidates are considered.  */
 
 #define CONSIDER_ALL_CANDIDATES_BOUND \
-  ((unsigned) PARAM_VALUE (PARAM_IV_CONSIDER_ALL_CANDIDATES_BOUND))
+  ((unsigned) param_iv_consider_all_candidates_bound)
 
 /* If there are more iv occurrences, we just give up (it is quite unlikely that
    optimizing such a loop would help, and it would take ages).  */
 
 #define MAX_CONSIDERED_GROUPS \
-  ((unsigned) PARAM_VALUE (PARAM_IV_MAX_CONSIDERED_USES))
+  ((unsigned) param_iv_max_considered_uses)
 
 /* If there are at most this number of ivs in the set, try removing unnecessary
    ivs from the set always.  */
 
 #define ALWAYS_PRUNE_CAND_SET_BOUND \
-  ((unsigned) PARAM_VALUE (PARAM_IV_ALWAYS_PRUNE_CAND_SET_BOUND))
+  ((unsigned) param_iv_always_prune_cand_set_bound)
 
 /* The list of trees for that the decl_rtl field must be reset is stored
    here.  */
diff --git a/gcc/tree-ssa-loop-manip.c b/gcc/tree-ssa-loop-manip.c
index 6a1bbaae573..06f90160047 100644
--- a/gcc/tree-ssa-loop-manip.c
+++ b/gcc/tree-ssa-loop-manip.c
@@ -984,7 +984,7 @@  can_unroll_loop_p (class loop *loop, unsigned factor,
 
   /* The final loop should be small enough.  */
   if (tree_num_loop_insns (loop, &eni_size_weights) * factor
-      > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS))
+      > (unsigned) param_max_unrolled_insns)
     return false;
 
   return true;
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index db666f01980..fe24a70451d 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -2863,7 +2863,7 @@  finite_loop_p (class loop *loop)
 /* Bound on the number of iterations we try to evaluate.  */
 
 #define MAX_ITERATIONS_TO_TRACK \
-  ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
+  ((unsigned) param_max_iterations_to_track)
 
 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
    result by a chain of operations such that all but exactly one of their
diff --git a/gcc/tree-ssa-loop-prefetch.c b/gcc/tree-ssa-loop-prefetch.c
index 04ff5244b69..fbb65191ca0 100644
--- a/gcc/tree-ssa-loop-prefetch.c
+++ b/gcc/tree-ssa-loop-prefetch.c
@@ -167,7 +167,7 @@  along with GCC; see the file COPYING3.  If not see
    of cache hierarchy).  */
 
 #ifndef PREFETCH_BLOCK
-#define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
+#define PREFETCH_BLOCK param_l1_cache_line_size
 #endif
 
 /* Do we have a forward hardware sequential prefetching?  */
@@ -191,8 +191,8 @@  along with GCC; see the file COPYING3.  If not see
 #define ACCEPTABLE_MISS_RATE 50
 #endif
 
-#define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
-#define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
+#define L1_CACHE_SIZE_BYTES ((unsigned) (param_l1_cache_size * 1024))
+#define L2_CACHE_SIZE_BYTES ((unsigned) (param_l2_cache_size * 1024))
 
 /* We consider a memory access nontemporal if it is not reused sooner than
    after L2_CACHE_SIZE_BYTES of memory are accessed.  However, we ignore
@@ -993,7 +993,7 @@  static bool
 should_issue_prefetch_p (struct mem_ref *ref)
 {
   /* Do we want to issue prefetches for non-constant strides?  */
-  if (!cst_and_fits_in_hwi (ref->group->step) && PREFETCH_DYNAMIC_STRIDES == 0)
+  if (!cst_and_fits_in_hwi (ref->group->step) && param_prefetch_dynamic_strides == 0)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
 	fprintf (dump_file,
@@ -1008,14 +1008,14 @@  should_issue_prefetch_p (struct mem_ref *ref)
      range.  */
   if (cst_and_fits_in_hwi (ref->group->step)
       && abs_hwi (int_cst_value (ref->group->step))
-	  < (HOST_WIDE_INT) PREFETCH_MINIMUM_STRIDE)
+	  < (HOST_WIDE_INT) param_prefetch_minimum_stride)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
 	fprintf (dump_file,
 		 "Step for reference %u:%u (" HOST_WIDE_INT_PRINT_DEC
 		 ") is less than the mininum required stride of %d\n",
 		 ref->group->uid, ref->uid, int_cst_value (ref->group->step),
-		 PREFETCH_MINIMUM_STRIDE);
+		 param_prefetch_minimum_stride);
       return false;
     }
 
@@ -1055,8 +1055,8 @@  schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
   struct mem_ref *ref;
   bool any = false;
 
-  /* At most SIMULTANEOUS_PREFETCHES should be running at the same time.  */
-  remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
+  /* At most param_simultaneous_prefetches should be running at the same time.  */
+  remaining_prefetch_slots = param_simultaneous_prefetches;
 
   /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
      AHEAD / UNROLL_FACTOR iterations of the unrolled loop.  In each iteration,
@@ -1406,7 +1406,7 @@  determine_unroll_factor (class loop *loop, struct mem_ref_group *refs,
      us from unrolling the loops too many times in cases where we only expect
      gains from better scheduling and decreasing loop overhead, which is not
      the case here.  */
-  upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
+  upper_bound = param_max_unrolled_insns / ninsns;
 
   /* If we unrolled the loop more times than it iterates, the unrolled version
      of the loop would be never entered.  */
@@ -1459,7 +1459,7 @@  volume_of_references (struct mem_ref_group *refs)
 	   accessed in each iteration.  TODO -- in the latter case, we should
 	   take the size of the reference into account, rounding it up on cache
 	   line size multiple.  */
-	volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
+	volume += param_l1_cache_line_size / ref->prefetch_mod;
       }
   return volume;
 }
@@ -1512,7 +1512,7 @@  add_subscript_strides (tree access_fn, unsigned stride,
       if (tree_fits_shwi_p (step))
 	astep = tree_to_shwi (step);
       else
-	astep = L1_CACHE_LINE_SIZE;
+	astep = param_l1_cache_line_size;
 
       strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
 
@@ -1562,7 +1562,7 @@  self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
 	  if (tree_fits_uhwi_p (stride))
 	    astride = tree_to_uhwi (stride);
 	  else
-	    astride = L1_CACHE_LINE_SIZE;
+	    astride = param_l1_cache_line_size;
 
 	  ref = TREE_OPERAND (ref, 0);
 	}
@@ -1578,7 +1578,7 @@  self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
 
       s = strides[i] < 0 ?  -strides[i] : strides[i];
 
-      if (s < (unsigned) L1_CACHE_LINE_SIZE
+      if (s < (unsigned) param_l1_cache_line_size
 	  && (loop_sizes[i]
 	      > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
 	{
@@ -1825,7 +1825,7 @@  mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
      should account for cache misses.  */
   insn_to_mem_ratio = ninsns / mem_ref_count;
 
-  if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
+  if (insn_to_mem_ratio < param_prefetch_min_insn_to_mem_ratio)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
         fprintf (dump_file,
@@ -1862,7 +1862,7 @@  insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
      and the exit branches will get eliminated), so it might be better to use
      tree_estimate_loop_size + estimated_unrolled_size.  */
   insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
-  if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
+  if (insn_to_prefetch_ratio < param_min_insn_to_prefetch_ratio)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
         fprintf (dump_file,
@@ -1902,7 +1902,7 @@  loop_prefetch_arrays (class loop *loop)
   if (time == 0)
     return false;
 
-  ahead = (PREFETCH_LATENCY + time - 1) / time;
+  ahead = (param_prefetch_latency + time - 1) / time;
   est_niter = estimated_stmt_executions_int (loop);
   if (est_niter == -1)
     est_niter = likely_max_stmt_executions_int (loop);
@@ -1998,17 +1998,17 @@  tree_ssa_prefetch_arrays (void)
     {
       fprintf (dump_file, "Prefetching parameters:\n");
       fprintf (dump_file, "    simultaneous prefetches: %d\n",
-	       SIMULTANEOUS_PREFETCHES);
-      fprintf (dump_file, "    prefetch latency: %d\n", PREFETCH_LATENCY);
+	       param_simultaneous_prefetches);
+      fprintf (dump_file, "    prefetch latency: %d\n", param_prefetch_latency);
       fprintf (dump_file, "    prefetch block size: %d\n", PREFETCH_BLOCK);
       fprintf (dump_file, "    L1 cache size: %d lines, %d kB\n",
-	       L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
-      fprintf (dump_file, "    L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
-      fprintf (dump_file, "    L2 cache size: %d kB\n", L2_CACHE_SIZE);
+	       L1_CACHE_SIZE_BYTES / param_l1_cache_line_size, param_l1_cache_size);
+      fprintf (dump_file, "    L1 cache line size: %d\n", param_l1_cache_line_size);
+      fprintf (dump_file, "    L2 cache size: %d kB\n", param_l2_cache_size);
       fprintf (dump_file, "    min insn-to-prefetch ratio: %d \n",
-	       MIN_INSN_TO_PREFETCH_RATIO);
+	       param_min_insn_to_prefetch_ratio);
       fprintf (dump_file, "    min insn-to-mem ratio: %d \n",
-	       PREFETCH_MIN_INSN_TO_MEM_RATIO);
+	       param_prefetch_min_insn_to_mem_ratio);
       fprintf (dump_file, "\n");
     }
 
diff --git a/gcc/tree-ssa-loop-unswitch.c b/gcc/tree-ssa-loop-unswitch.c
index e60019db946..4e3aa7c41b7 100644
--- a/gcc/tree-ssa-loop-unswitch.c
+++ b/gcc/tree-ssa-loop-unswitch.c
@@ -288,7 +288,7 @@  tree_unswitch_single_loop (class loop *loop, int num)
 
       /* The loop should not be too large, to limit code growth. */
       if (tree_num_loop_insns (loop, &eni_size_weights)
-	  > (unsigned) PARAM_VALUE (PARAM_MAX_UNSWITCH_INSNS))
+	  > (unsigned) param_max_unswitch_insns)
 	{
 	  if (dump_file && (dump_flags & TDF_DETAILS))
 	    fprintf (dump_file, ";; Not unswitching, loop too big\n");
@@ -323,7 +323,7 @@  tree_unswitch_single_loop (class loop *loop, int num)
       if (i == loop->num_nodes)
 	{
 	  if (dump_file
-	      && num > PARAM_VALUE (PARAM_MAX_UNSWITCH_LEVEL)
+	      && num > param_max_unswitch_level
 	      && (dump_flags & TDF_DETAILS))
 	    fprintf (dump_file, ";; Not unswitching anymore, hit max level\n");
 
@@ -352,7 +352,7 @@  tree_unswitch_single_loop (class loop *loop, int num)
 	  changed = true;
 	}
       /* Do not unswitch too much.  */
-      else if (num > PARAM_VALUE (PARAM_MAX_UNSWITCH_LEVEL))
+      else if (num > param_max_unswitch_level)
 	{
 	  i++;
 	  continue;
diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
index 013ef93e7ad..8b5f3149cbc 100644
--- a/gcc/tree-ssa-math-opts.c
+++ b/gcc/tree-ssa-math-opts.c
@@ -1975,7 +1975,7 @@  gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
       && !HONOR_SIGNED_ZEROS (mode))
     {
       unsigned int max_depth = speed_p
-				? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
+				? param_max_pow_sqrt_depth
 				: 2;
 
       tree expand_with_sqrts
@@ -3089,7 +3089,7 @@  convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2,
   bool check_defer
     = (state->m_deferring_p
        && (tree_to_shwi (TYPE_SIZE (type))
-	   <= PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS)));
+	   <= param_avoid_fma_max_bits));
   bool defer = check_defer;
   bool seen_negate_p = false;
   /* Make sure that the multiplication statement becomes dead after
@@ -3744,7 +3744,7 @@  math_opts_dom_walker::after_dom_children (basic_block bb)
 {
   gimple_stmt_iterator gsi;
 
-  fma_deferring_state fma_state (PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS) > 0);
+  fma_deferring_state fma_state (param_avoid_fma_max_bits > 0);
 
   for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
     {
diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
index 38bb8b24155..43990b79644 100644
--- a/gcc/tree-ssa-phiopt.c
+++ b/gcc/tree-ssa-phiopt.c
@@ -2469,7 +2469,7 @@  cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
 
   /* If either vectorization or if-conversion is disabled then do
      not sink any stores.  */
-  if (MAX_STORES_TO_SINK == 0
+  if (param_max_stores_to_sink == 0
       || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
       || !flag_tree_loop_if_convert)
     return false;
@@ -2528,7 +2528,7 @@  cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
 
   /* No pairs of stores found.  */
   if (!then_stores.length ()
-      || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
+      || then_stores.length () > (unsigned) param_max_stores_to_sink)
     {
       free_data_refs (then_datarefs);
       free_data_refs (else_datarefs);
@@ -2658,7 +2658,7 @@  static void
 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
 		      basic_block bb2, basic_block bb3)
 {
-  int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
+  int param_align = param_l1_cache_line_size;
   unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
   gphi_iterator gsi;
 
@@ -2808,7 +2808,7 @@  static bool
 gate_hoist_loads (void)
 {
   return (flag_hoist_adjacent_loads == 1
-	  && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
+	  && param_l1_cache_line_size
 	  && HAVE_conditional_move);
 }
 
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index 363dec6f4dd..58a147036af 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -1156,7 +1156,7 @@  translate_vuse_through_block (vec<vn_reference_op_s> operands,
   if (gimple_bb (phi) != phiblock)
     return vuse;
 
-  unsigned int cnt = PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS);
+  unsigned int cnt = param_sccvn_max_alias_queries_per_access;
   use_oracle = ao_ref_init_from_vn_reference (&ref, set, type, operands);
 
   /* Use the alias-oracle to find either the PHI node in this block,
@@ -2235,7 +2235,7 @@  compute_partial_antic_aux (basic_block block,
   bitmap_set_t PA_OUT;
   edge e;
   edge_iterator ei;
-  unsigned long max_pa = PARAM_VALUE (PARAM_MAX_PARTIAL_ANTIC_LENGTH);
+  unsigned long max_pa = param_max_partial_antic_length;
 
   old_PA_IN = PA_OUT = NULL;
 
diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index 510dfd1e188..9e5b5290b55 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -4945,7 +4945,7 @@  static int
 get_reassociation_width (int ops_num, enum tree_code opc,
 			 machine_mode mode)
 {
-  int param_width = PARAM_VALUE (PARAM_TREE_REASSOC_WIDTH);
+  int param_width = param_tree_reassoc_width;
   int width;
   int width_min;
   int cycles_best;
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index 7465bedb349..98fbe81fa7b 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -3073,7 +3073,7 @@  vn_reference_lookup_pieces (tree vuse, alias_set_type set, tree type,
       && vr1.vuse)
     {
       ao_ref r;
-      unsigned limit = PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS);
+      unsigned limit = param_sccvn_max_alias_queries_per_access;
       vn_walk_cb_data data (&vr1, NULL_TREE, NULL, kind, true);
       if (ao_ref_init_from_vn_reference (&r, set, type, vr1.operands))
 	*vnresult =
@@ -3124,7 +3124,7 @@  vn_reference_lookup (tree op, tree vuse, vn_lookup_kind kind,
     {
       vn_reference_t wvnresult;
       ao_ref r;
-      unsigned limit = PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS);
+      unsigned limit = param_sccvn_max_alias_queries_per_access;
       /* Make sure to use a valueized reference if we valueized anything.
          Otherwise preserve the full reference for advanced TBAA.  */
       if (!valuezied_anything
@@ -6984,7 +6984,7 @@  do_rpo_vn (function *fn, edge entry, bitmap exit_bbs,
   if (iterate)
     {
       loop_p loop;
-      unsigned max_depth = PARAM_VALUE (PARAM_RPO_VN_MAX_LOOP_DEPTH);
+      unsigned max_depth = param_rpo_vn_max_loop_depth;
       FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
 	if (loop_depth (loop) > max_depth)
 	  for (unsigned i = 2;
diff --git a/gcc/tree-ssa-scopedtables.c b/gcc/tree-ssa-scopedtables.c
index 574bc30eee1..9fb2f500f46 100644
--- a/gcc/tree-ssa-scopedtables.c
+++ b/gcc/tree-ssa-scopedtables.c
@@ -292,7 +292,7 @@  avail_exprs_stack::lookup_avail_expr (gimple *stmt, bool insert, bool tbaa_p)
 	 up the virtual use-def chain using walk_non_aliased_vuses.
 	 But don't do this when removing expressions from the hash.  */
       ao_ref ref;
-      unsigned limit = PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS);
+      unsigned limit = param_sccvn_max_alias_queries_per_access;
       if (!(vuse1 && vuse2
 	    && gimple_assign_single_p (stmt)
 	    && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
diff --git a/gcc/tree-ssa-sink.c b/gcc/tree-ssa-sink.c
index 3bfad0f90ed..cbad34b26de 100644
--- a/gcc/tree-ssa-sink.c
+++ b/gcc/tree-ssa-sink.c
@@ -215,7 +215,7 @@  select_best_block (basic_block early_bb,
   /* Get the sinking threshold.  If the statement to be moved has memory
      operands, then increase the threshold by 7% as those are even more
      profitable to avoid, clamping at 100%.  */
-  threshold = PARAM_VALUE (PARAM_SINK_FREQUENCY_THRESHOLD);
+  threshold = param_sink_frequency_threshold;
   if (gimple_vuse (stmt) || gimple_vdef (stmt))
     {
       threshold += 7;
diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c
index 163be2d67f6..626d7f9dc8a 100644
--- a/gcc/tree-ssa-strlen.c
+++ b/gcc/tree-ssa-strlen.c
@@ -528,7 +528,7 @@  static int
 new_stridx (tree exp)
 {
   int idx;
-  if (max_stridx >= PARAM_VALUE (PARAM_MAX_TRACKED_STRLENS))
+  if (max_stridx >= param_max_tracked_strlens)
     return 0;
   if (TREE_CODE (exp) == SSA_NAME)
     {
@@ -557,7 +557,7 @@  static int
 new_addr_stridx (tree exp)
 {
   int *pidx;
-  if (max_stridx >= PARAM_VALUE (PARAM_MAX_TRACKED_STRLENS))
+  if (max_stridx >= param_max_tracked_strlens)
     return 0;
   pidx = addr_stridxptr (exp);
   if (pidx != NULL)
@@ -1082,7 +1082,7 @@  get_range_strlen_dynamic (tree src, c_strlen_data *pdata,
   bitmap visited = NULL;
   tree maxbound = pdata->maxbound;
 
-  unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
+  unsigned limit = param_ssa_name_def_chain_limit;
   if (!get_range_strlen_dynamic (src, pdata, &visited, rvals, &limit))
     {
       /* On failure extend the length range to an impossible maximum
@@ -3972,7 +3972,7 @@  class ssa_name_limit_t
 
   ssa_name_limit_t ()
     : visited (NULL),
-    ssa_def_max (PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT)) { }
+    ssa_def_max (param_ssa_name_def_chain_limit) { }
 
   int next_ssa_name (tree);
 
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index 6e7d4dbc5b3..74edcd4458f 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -5691,9 +5691,9 @@  push_fields_onto_fieldstack (tree type, vec<fieldoff_s> *fieldstack,
     return false;
 
   /* If the vector of fields is growing too big, bail out early.
-     Callers check for vec::length <= MAX_FIELDS_FOR_FIELD_SENSITIVE, make
+     Callers check for vec::length <= param_max_fields_for_field_sensitive, make
      sure this fails.  */
-  if (fieldstack->length () > MAX_FIELDS_FOR_FIELD_SENSITIVE)
+  if (fieldstack->length () > (unsigned)param_max_fields_for_field_sensitive)
     return false;
 
   for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
@@ -6114,7 +6114,7 @@  create_variable_info_for_1 (tree decl, const char *name, bool add_id,
   /* If we didn't end up collecting sub-variables create a full
      variable for the decl.  */
   if (fieldstack.length () == 0
-      || fieldstack.length () > MAX_FIELDS_FOR_FIELD_SENSITIVE)
+      || fieldstack.length () > (unsigned)param_max_fields_for_field_sensitive)
     {
       vi = new_var_info (decl, name, add_id);
       vi->offset = 0;
@@ -7179,7 +7179,7 @@  init_base_vars (void)
 static void
 init_alias_vars (void)
 {
-  use_field_sensitive = (MAX_FIELDS_FOR_FIELD_SENSITIVE > 1);
+  use_field_sensitive = (param_max_fields_for_field_sensitive > 1);
 
   bitmap_obstack_initialize (&pta_obstack);
   bitmap_obstack_initialize (&oldpta_obstack);
diff --git a/gcc/tree-ssa-tail-merge.c b/gcc/tree-ssa-tail-merge.c
index cbd5a277b39..ddf7449d945 100644
--- a/gcc/tree-ssa-tail-merge.c
+++ b/gcc/tree-ssa-tail-merge.c
@@ -1469,7 +1469,7 @@  find_clusters_1 (same_succ *same_succ)
   unsigned int i, j;
   bitmap_iterator bi, bj;
   int nr_comparisons;
-  int max_comparisons = PARAM_VALUE (PARAM_MAX_TAIL_MERGE_COMPARISONS);
+  int max_comparisons = param_max_tail_merge_comparisons;
 
   EXECUTE_IF_SET_IN_BITMAP (same_succ->bbs, 0, i, bi)
     {
@@ -1731,7 +1731,7 @@  tail_merge_optimize (unsigned int todo)
   int nr_bbs_removed;
   bool loop_entered = false;
   int iteration_nr = 0;
-  int max_iterations = PARAM_VALUE (PARAM_MAX_TAIL_MERGE_ITERATIONS);
+  int max_iterations = param_max_tail_merge_iterations;
 
   if (!flag_tree_tail_merge
       || max_iterations == 0)
diff --git a/gcc/tree-ssa-threadbackward.c b/gcc/tree-ssa-threadbackward.c
index 1ff870ad00b..6d534647c60 100644
--- a/gcc/tree-ssa-threadbackward.c
+++ b/gcc/tree-ssa-threadbackward.c
@@ -157,7 +157,7 @@  thread_jumps::profitable_jump_thread_path (basic_block bbi, tree name,
       return NULL;
 
   if (m_path.length () + 1
-      > (unsigned) PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
+      > (unsigned) param_max_fsm_thread_length)
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
 	fprintf (dump_file, "FSM jump-thread path not considered: "
@@ -367,7 +367,7 @@  thread_jumps::profitable_jump_thread_path (basic_block bbi, tree name,
      as in PR 78407 this leads to noticeable improvements.  */
   if (m_speed_p && (optimize_edge_for_speed_p (taken_edge) || contains_hot_bb))
     {
-      if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
+      if (n_insns >= param_max_fsm_thread_path_insns)
 	{
 	  if (dump_file && (dump_flags & TDF_DETAILS))
 	    fprintf (dump_file, "FSM jump-thread path not considered: "
@@ -397,9 +397,9 @@  thread_jumps::profitable_jump_thread_path (basic_block bbi, tree name,
      optimizer would have done anyway, so an irreducible loop is not
      so bad.  */
   if (!threaded_multiway_branch && *creates_irreducible_loop
-      && (n_insns * (unsigned) PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
+      && (n_insns * (unsigned) param_fsm_scale_path_stmts
 	  > (m_path.length () *
-	     (unsigned) PARAM_VALUE (PARAM_FSM_SCALE_PATH_BLOCKS))))
+	     (unsigned) param_fsm_scale_path_blocks)))
 
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
@@ -419,8 +419,8 @@  thread_jumps::profitable_jump_thread_path (basic_block bbi, tree name,
      So for that case, drastically reduce the number of statements
      we are allowed to copy.  */
   if (!(threaded_through_latch && threaded_multiway_branch)
-      && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
-	  >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS)))
+      && (n_insns * param_fsm_scale_path_stmts
+	  >= param_max_jump_thread_duplication_stmts))
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
 	fprintf (dump_file,
@@ -683,7 +683,7 @@  thread_jumps::fsm_find_control_statement_thread_paths (tree name)
 
   if (gimple_code (def_stmt) == GIMPLE_PHI
       && (gimple_phi_num_args (def_stmt)
-	  >= (unsigned) PARAM_VALUE (PARAM_FSM_MAXIMUM_PHI_ARGUMENTS)))
+	  >= (unsigned) param_fsm_maximum_phi_arguments))
     return;
 
   if (is_gimple_assign (def_stmt)
@@ -771,7 +771,7 @@  thread_jumps::find_jump_threads_backwards (basic_block bb, bool speed_p)
   m_visited_bbs.empty ();
   m_seen_loop_phi = false;
   m_speed_p = speed_p;
-  m_max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
+  m_max_threaded_paths = param_max_fsm_thread_paths;
 
   fsm_find_control_statement_thread_paths (name);
 }
diff --git a/gcc/tree-ssa-threadedge.c b/gcc/tree-ssa-threadedge.c
index a5d87662159..c43d7c5c39e 100644
--- a/gcc/tree-ssa-threadedge.c
+++ b/gcc/tree-ssa-threadedge.c
@@ -234,7 +234,7 @@  record_temporary_equivalences_from_stmts_at_dest (edge e,
   gimple_stmt_iterator gsi;
   int max_stmt_count;
 
-  max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
+  max_stmt_count = param_max_jump_thread_duplication_stmts;
 
   /* Walk through each statement in the block recording equivalences
      we discover.  Note any equivalences we discover are context
@@ -275,7 +275,7 @@  record_temporary_equivalences_from_stmts_at_dest (edge e,
 	     killed due to threading, grow the max count
 	     accordingly.  */
 	  if (max_stmt_count
-	      == PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS))
+	      == param_max_jump_thread_duplication_stmts)
 	    {
 	      max_stmt_count += estimate_threading_killed_stmts (e->dest);
 	      if (dump_file)
diff --git a/gcc/tree-ssa-uninit.c b/gcc/tree-ssa-uninit.c
index fe8f8f0bc28..ae441067789 100644
--- a/gcc/tree-ssa-uninit.c
+++ b/gcc/tree-ssa-uninit.c
@@ -545,7 +545,7 @@  compute_control_dep_chain (basic_block bb, basic_block dep_bb,
   bool found_cd_chain = false;
   size_t cur_chain_len = 0;
 
-  if (*num_calls > PARAM_VALUE (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS))
+  if (*num_calls > param_uninit_control_dep_attempts)
     return false;
   ++*num_calls;
 
diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c
index b7149039ae4..166e40c3931 100644
--- a/gcc/tree-switch-conversion.c
+++ b/gcc/tree-switch-conversion.c
@@ -194,7 +194,7 @@  switch_conversion::check_range ()
     }
 
   if (tree_to_uhwi (m_range_size)
-      > ((unsigned) m_count * SWITCH_CONVERSION_BRANCH_RATIO))
+      > ((unsigned) m_count * param_switch_conversion_branch_ratio))
     {
       m_reason = "the maximum range-branch ratio exceeded";
       return false;
@@ -1268,8 +1268,8 @@  jump_table_cluster::can_be_handled (const vec<cluster *> &clusters,
 
   unsigned HOST_WIDE_INT max_ratio
     = (optimize_insn_for_size_p ()
-       ? PARAM_VALUE (PARAM_JUMP_TABLE_MAX_GROWTH_RATIO_FOR_SIZE)
-       : PARAM_VALUE (PARAM_JUMP_TABLE_MAX_GROWTH_RATIO_FOR_SPEED));
+       ? param_jump_table_max_growth_ratio_for_size
+       : param_jump_table_max_growth_ratio_for_speed);
   unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
 					    clusters[end]->get_high ());
   /* Check overflow.  */
diff --git a/gcc/tree-switch-conversion.h b/gcc/tree-switch-conversion.h
index 653007f0e17..d37841afb34 100644
--- a/gcc/tree-switch-conversion.h
+++ b/gcc/tree-switch-conversion.h
@@ -476,7 +476,7 @@  case_tree_node::case_tree_node ():
 unsigned int
 jump_table_cluster::case_values_threshold (void)
 {
-  unsigned int threshold = PARAM_VALUE (PARAM_CASE_VALUES_THRESHOLD);
+  unsigned int threshold = param_case_values_threshold;
 
   if (threshold == 0)
     threshold = targetm.case_values_threshold ();
@@ -683,7 +683,7 @@  is changed into:
 	b_b = PHI <b_6, b_7>
 
 There are further constraints.  Specifically, the range of values across all
-case labels must not be bigger than SWITCH_CONVERSION_BRANCH_RATIO (default
+case labels must not be bigger than param_switch_conversion_branch_ratio (default
 eight) times the number of the actual switch branches.
 
 This transformation was contributed by Martin Jambor, see this e-mail:
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index 9dd18d26536..12d8a96b879 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -185,7 +185,7 @@  vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
 {
   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
 
-  if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
+  if ((unsigned) param_vect_max_version_for_alias_checks == 0)
     return opt_result::failure_at (vect_location,
 				   "will not create alias checks, as"
 				   " --param vect-max-version-for-alias-checks"
@@ -2074,7 +2074,7 @@  vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
       if (do_peeling)
         {
           unsigned max_allowed_peel
-            = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
+            = param_vect_max_peeling_for_alignment;
 	  if (flag_vect_cost_model == VECT_COST_MODEL_CHEAP)
 	    max_allowed_peel = 0;
           if (max_allowed_peel != (unsigned)-1)
@@ -2215,7 +2215,7 @@  vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
 
               if (known_alignment_for_access_p (dr_info)
                   || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
-                     >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
+                     >= (unsigned) param_vect_max_version_for_alignment_checks)
                 {
                   do_versioning = false;
                   break;
@@ -3644,10 +3644,9 @@  vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
     dump_printf_loc (MSG_NOTE, vect_location,
 		     "improved number of alias checks from %d to %d\n",
 		     may_alias_ddrs.length (), count);
-  unsigned limit = PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS);
+  unsigned limit = param_vect_max_version_for_alias_checks;
   if (flag_simd_cost_model == VECT_COST_MODEL_CHEAP)
-    limit = default_param_value
-	      (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) * 6 / 10;
+    limit = param_vect_max_version_for_alias_checks * 6 / 10;
   if (count > limit)
     return opt_result::failure_at
       (vect_location,
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 788cc8786ae..d55d0e3158f 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -1665,7 +1665,7 @@  vect_analyze_loop_costing (loop_vec_info loop_vinfo)
       return -1;
     }
 
-  int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
+  int min_scalar_loop_bound = (param_min_vect_loop_bound
 			       * assumed_vf);
 
   /* Use the cost model only if it is more conservative than user specified
@@ -1775,7 +1775,7 @@  vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs,
 	/* If dependence analysis will give up due to the limit on the
 	   number of datarefs stop here and fail fatally.  */
 	if (datarefs->length ()
-	    > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
+	    > (unsigned)param_loop_max_datarefs_for_datadeps)
 	  return opt_result::failure_at (stmt, "exceeded param "
 					 "loop-max-datarefs-for-datadeps\n");
       }
@@ -2458,7 +2458,7 @@  vect_analyze_loop (class loop *loop, vec_info_shared *shared)
 	     enabled, this is not a simd loop and it is the innermost loop.  */
 	  vect_epilogues = (!loop->simdlen
 			    && loop->inner == NULL
-			    && PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK)
+			    && param_vect_epilogues_nomask
 			    && LOOP_VINFO_PEELING_FOR_NITER (first_loop_vinfo)
 			    /* For now only allow one epilogue loop.  */
 			    && first_loop_vinfo->epilogue_vinfos.is_empty ());
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index f4b445ac1ef..9d3d991e516 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -3289,7 +3289,7 @@  vect_slp_bb (basic_block bb)
 
       gimple_stmt_iterator region_end = gsi;
 
-      if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
+      if (insns > param_slp_max_insns_in_bb)
 	{
 	  if (dump_enabled_p ())
 	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index 6b4e92e78af..a8a9c9d497b 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -489,7 +489,7 @@  public:
 
   /* Threshold of number of iterations below which vectorization will not be
      performed. It is calculated from MIN_PROFITABLE_ITERS and
-     PARAM_MIN_VECT_LOOP_BOUND.  */
+     param_min_vect_loop_bound.  */
   unsigned int th;
 
   /* When applying loop versioning, the vector form should only be used
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 9889095fa13..21877c11c65 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -3395,7 +3395,7 @@  find_switch_asserts (basic_block bb, gswitch *last)
 
   /* Now register along the default label assertions that correspond to the
      anti-range of each label.  */
-  int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
+  int insertion_limit = param_max_vrp_switch_assertions;
   if (insertion_limit == 0)
     return;
 
@@ -4306,7 +4306,7 @@  vrp_prop::check_mem_ref (location_t location, tree ref,
      The loop computes the range of the final offset for expressions such
      as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs in
      some range.  */
-  const unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
+  const unsigned limit = param_ssa_name_def_chain_limit;
   for (unsigned n = 0; TREE_CODE (arg) == SSA_NAME && n < limit; ++n)
     {
       gimple *def = SSA_NAME_DEF_STMT (arg);
diff --git a/gcc/tree.c b/gcc/tree.c
index d08141bcfc2..ecc18b393a4 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -1549,15 +1549,15 @@  wide_int_to_tree_1 (tree type, const wide_int_ref &pcst)
 	  if (TYPE_SIGN (type) == UNSIGNED)
 	    {
 	      /* Cache [0, N).  */
-	      limit = INTEGER_SHARE_LIMIT;
-	      if (IN_RANGE (hwi, 0, INTEGER_SHARE_LIMIT - 1))
+	      limit = param_integer_share_limit;
+	      if (IN_RANGE (hwi, 0, param_integer_share_limit - 1))
 		ix = hwi;
 	    }
 	  else
 	    {
 	      /* Cache [-1, N).  */
-	      limit = INTEGER_SHARE_LIMIT + 1;
-	      if (IN_RANGE (hwi, -1, INTEGER_SHARE_LIMIT - 1))
+	      limit = param_integer_share_limit + 1;
+	      if (IN_RANGE (hwi, -1, param_integer_share_limit - 1))
 		ix = hwi + 1;
 	    }
 	  break;
@@ -1733,23 +1733,23 @@  cache_integer_cst (tree t)
       if (TYPE_UNSIGNED (type))
 	{
 	  /* Cache 0..N */
-	  limit = INTEGER_SHARE_LIMIT;
+	  limit = param_integer_share_limit;
 
 	  /* This is a little hokie, but if the prec is smaller than
-	     what is necessary to hold INTEGER_SHARE_LIMIT, then the
+	     what is necessary to hold param_integer_share_limit, then the
 	     obvious test will not get the correct answer.  */
 	  if (prec < HOST_BITS_PER_WIDE_INT)
 	    {
-	      if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT)
+	      if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT) param_integer_share_limit)
 		ix = tree_to_uhwi (t);
 	    }
-	  else if (wi::ltu_p (wi::to_wide (t), INTEGER_SHARE_LIMIT))
+	  else if (wi::ltu_p (wi::to_wide (t), param_integer_share_limit))
 	    ix = tree_to_uhwi (t);
 	}
       else
 	{
 	  /* Cache -1..N */
-	  limit = INTEGER_SHARE_LIMIT + 1;
+	  limit = param_integer_share_limit + 1;
 
 	  if (integer_minus_onep (t))
 	    ix = 0;
@@ -1757,10 +1757,10 @@  cache_integer_cst (tree t)
 	    {
 	      if (prec < HOST_BITS_PER_WIDE_INT)
 		{
-		  if (tree_to_shwi (t) < INTEGER_SHARE_LIMIT)
+		  if (tree_to_shwi (t) < param_integer_share_limit)
 		    ix = tree_to_shwi (t) + 1;
 		}
-	      else if (wi::ltu_p (wi::to_wide (t), INTEGER_SHARE_LIMIT))
+	      else if (wi::ltu_p (wi::to_wide (t), param_integer_share_limit))
 		ix = tree_to_shwi (t) + 1;
 	    }
 	}
diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index 5de36ae2f47..c80a06c92b7 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -5844,7 +5844,7 @@  add_uses_1 (rtx *x, void *cui)
    compile time for ridiculously complex expressions, although they're
    seldom useful, and they may often have to be discarded as not
    representable anyway.  */
-#define EXPR_USE_DEPTH (PARAM_VALUE (PARAM_MAX_VARTRACK_EXPR_DEPTH))
+#define EXPR_USE_DEPTH (param_max_vartrack_expr_depth)
 
 /* Attempt to reverse the EXPR operation in the debug info and record
    it in the cselib table.  Say for reg1 = reg2 + 6 even when reg2 is
@@ -5904,7 +5904,7 @@  reverse_op (rtx val, const_rtx expr, rtx_insn *insn)
 	&& (GET_CODE (l->loc) != CONST || !references_value_p (l->loc, 0)))
       return;
     /* Avoid creating too large locs lists.  */
-    else if (count == PARAM_VALUE (PARAM_MAX_VARTRACK_REVERSE_OP_SIZE))
+    else if (count == param_max_vartrack_reverse_op_size)
       return;
 
   switch (GET_CODE (src))
@@ -7054,7 +7054,7 @@  vt_find_locations (void)
   int *rc_order;
   int i;
   int htabsz = 0;
-  int htabmax = PARAM_VALUE (PARAM_MAX_VARTRACK_SIZE);
+  int htabmax = param_max_vartrack_size;
   bool success = true;
 
   timevar_push (TV_VAR_TRACKING_DATAFLOW);