[go: nahoru, domu]

Skip to content

Commit

Permalink
Revert "[flang] Add basic -mtune support" (#96678)
Browse files Browse the repository at this point in the history
Reverts #95043
  • Loading branch information
tarunprabhu committed Jun 25, 2024
1 parent ec2fb59 commit 8dd9494
Show file tree
Hide file tree
Showing 26 changed files with 14 additions and 160 deletions.
7 changes: 3 additions & 4 deletions clang/include/clang/Driver/Options.td
Original file line number Diff line number Diff line change
Expand Up @@ -5421,7 +5421,6 @@ def module_file_info : Flag<["-"], "module-file-info">, Flags<[]>,
HelpText<"Provide information about a particular module file">;
def mthumb : Flag<["-"], "mthumb">, Group<m_Group>;
def mtune_EQ : Joined<["-"], "mtune=">, Group<m_Group>,
Visibility<[ClangOption, FlangOption]>,
HelpText<"Only supported on AArch64, PowerPC, RISC-V, SPARC, SystemZ, and X86">;
def multi__module : Flag<["-"], "multi_module">;
def multiply__defined__unused : Separate<["-"], "multiply_defined_unused">;
Expand Down Expand Up @@ -6738,6 +6737,9 @@ def emit_hlfir : Flag<["-"], "emit-hlfir">, Group<Action_Group>,

let Visibility = [CC1Option, CC1AsOption] in {

def tune_cpu : Separate<["-"], "tune-cpu">,
HelpText<"Tune for a specific cpu type">,
MarshallingInfoString<TargetOpts<"TuneCPU">>;
def target_abi : Separate<["-"], "target-abi">,
HelpText<"Target a particular ABI type">,
MarshallingInfoString<TargetOpts<"ABI">>;
Expand All @@ -6764,9 +6766,6 @@ def darwin_target_variant_triple : Separate<["-"], "darwin-target-variant-triple

let Visibility = [CC1Option, CC1AsOption, FC1Option] in {

def tune_cpu : Separate<["-"], "tune-cpu">,
HelpText<"Tune for a specific cpu type">,
MarshallingInfoString<TargetOpts<"TuneCPU">>;
def target_cpu : Separate<["-"], "target-cpu">,
HelpText<"Target a specific cpu type">,
MarshallingInfoString<TargetOpts<"CPU">>;
Expand Down
10 changes: 1 addition & 9 deletions clang/lib/Driver/ToolChains/Flang.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
#include "llvm/Frontend/Debug/Options.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/TargetParser/Host.h"
#include "llvm/TargetParser/RISCVISAInfo.h"
#include "llvm/TargetParser/RISCVTargetParser.h"

Expand Down Expand Up @@ -412,13 +411,6 @@ void Flang::addTargetOptions(const ArgList &Args,
}

// TODO: Add target specific flags, ABI, mtune option etc.
if (const Arg *A = Args.getLastArg(options::OPT_mtune_EQ)) {
CmdArgs.push_back("-tune-cpu");
if (A->getValue() == StringRef{"native"})
CmdArgs.push_back(Args.MakeArgString(llvm::sys::getHostCPUName()));
else
CmdArgs.push_back(A->getValue());
}
}

void Flang::addOffloadOptions(Compilation &C, const InputInfoList &Inputs,
Expand Down Expand Up @@ -810,7 +802,7 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
case CodeGenOptions::FramePointerKind::None:
FPKeepKindStr = "-mframe-pointer=none";
break;
case CodeGenOptions::FramePointerKind::Reserved:
case CodeGenOptions::FramePointerKind::Reserved:
FPKeepKindStr = "-mframe-pointer=reserved";
break;
case CodeGenOptions::FramePointerKind::NonLeaf:
Expand Down
3 changes: 0 additions & 3 deletions flang/include/flang/Frontend/TargetOptions.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,6 @@ class TargetOptions {
/// If given, the name of the target CPU to generate code for.
std::string cpu;

/// If given, the name of the target CPU to tune code for.
std::string cpuToTuneFor;

/// The list of target specific features to enable or disable, as written on
/// the command line.
std::vector<std::string> featuresAsWritten;
Expand Down
6 changes: 3 additions & 3 deletions flang/include/flang/Lower/Bridge.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,11 +65,11 @@ class LoweringBridge {
const Fortran::lower::LoweringOptions &loweringOptions,
const std::vector<Fortran::lower::EnvironmentDefault> &envDefaults,
const Fortran::common::LanguageFeatureControl &languageFeatures,
const llvm::TargetMachine &targetMachine, llvm::StringRef tuneCPU) {
const llvm::TargetMachine &targetMachine) {
return LoweringBridge(ctx, semanticsContext, defaultKinds, intrinsics,
targetCharacteristics, allCooked, triple, kindMap,
loweringOptions, envDefaults, languageFeatures,
targetMachine, tuneCPU);
targetMachine);
}

//===--------------------------------------------------------------------===//
Expand Down Expand Up @@ -148,7 +148,7 @@ class LoweringBridge {
const Fortran::lower::LoweringOptions &loweringOptions,
const std::vector<Fortran::lower::EnvironmentDefault> &envDefaults,
const Fortran::common::LanguageFeatureControl &languageFeatures,
const llvm::TargetMachine &targetMachine, const llvm::StringRef tuneCPU);
const llvm::TargetMachine &targetMachine);
LoweringBridge() = delete;
LoweringBridge(const LoweringBridge &) = delete;

Expand Down
4 changes: 0 additions & 4 deletions flang/include/flang/Optimizer/CodeGen/CGPasses.td
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@ def FIRToLLVMLowering : Pass<"fir-to-llvm-ir", "mlir::ModuleOp"> {
"Override module's data layout.">,
Option<"forcedTargetCPU", "target-cpu", "std::string", /*default=*/"",
"Override module's target CPU.">,
Option<"forcedTuneCPU", "tune-cpu", "std::string", /*default=*/"",
"Override module's tune CPU.">,
Option<"forcedTargetFeatures", "target-features", "std::string",
/*default=*/"", "Override module's target features.">,
Option<"applyTBAA", "apply-tbaa", "bool", /*default=*/"false",
Expand Down Expand Up @@ -70,8 +68,6 @@ def TargetRewritePass : Pass<"target-rewrite", "mlir::ModuleOp"> {
"Override module's target triple.">,
Option<"forcedTargetCPU", "target-cpu", "std::string", /*default=*/"",
"Override module's target CPU.">,
Option<"forcedTuneCPU", "tune-cpu", "std::string", /*default=*/"",
"Override module's tune CPU.">,
Option<"forcedTargetFeatures", "target-features", "std::string",
/*default=*/"", "Override module's target features.">,
Option<"noCharacterConversion", "no-character-conversion",
Expand Down
19 changes: 1 addition & 18 deletions flang/include/flang/Optimizer/CodeGen/Target.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,29 +76,14 @@ class CodeGenSpecifics {
llvm::StringRef targetCPU, mlir::LLVM::TargetFeaturesAttr targetFeatures,
const mlir::DataLayout &dl);

static std::unique_ptr<CodeGenSpecifics>
get(mlir::MLIRContext *ctx, llvm::Triple &&trp, KindMapping &&kindMap,
llvm::StringRef targetCPU, mlir::LLVM::TargetFeaturesAttr targetFeatures,
const mlir::DataLayout &dl, llvm::StringRef tuneCPU);

static TypeAndAttr getTypeAndAttr(mlir::Type t) { return TypeAndAttr{t, {}}; }

CodeGenSpecifics(mlir::MLIRContext *ctx, llvm::Triple &&trp,
KindMapping &&kindMap, llvm::StringRef targetCPU,
mlir::LLVM::TargetFeaturesAttr targetFeatures,
const mlir::DataLayout &dl)
: context{*ctx}, triple{std::move(trp)}, kindMap{std::move(kindMap)},
targetCPU{targetCPU}, targetFeatures{targetFeatures}, dataLayout{&dl},
tuneCPU{""} {}

CodeGenSpecifics(mlir::MLIRContext *ctx, llvm::Triple &&trp,
KindMapping &&kindMap, llvm::StringRef targetCPU,
mlir::LLVM::TargetFeaturesAttr targetFeatures,
const mlir::DataLayout &dl, llvm::StringRef tuneCPU)
: context{*ctx}, triple{std::move(trp)}, kindMap{std::move(kindMap)},
targetCPU{targetCPU}, targetFeatures{targetFeatures}, dataLayout{&dl},
tuneCPU{tuneCPU} {}

targetCPU{targetCPU}, targetFeatures{targetFeatures}, dataLayout{&dl} {}
CodeGenSpecifics() = delete;
virtual ~CodeGenSpecifics() {}

Expand Down Expand Up @@ -180,7 +165,6 @@ class CodeGenSpecifics {
virtual unsigned char getCIntTypeWidth() const = 0;

llvm::StringRef getTargetCPU() const { return targetCPU; }
llvm::StringRef getTuneCPU() const { return tuneCPU; }

mlir::LLVM::TargetFeaturesAttr getTargetFeatures() const {
return targetFeatures;
Expand All @@ -198,7 +182,6 @@ class CodeGenSpecifics {
llvm::StringRef targetCPU;
mlir::LLVM::TargetFeaturesAttr targetFeatures;
const mlir::DataLayout *dataLayout = nullptr;
llvm::StringRef tuneCPU;
};

} // namespace fir
Expand Down
7 changes: 0 additions & 7 deletions flang/include/flang/Optimizer/Dialect/Support/FIRContext.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,13 +58,6 @@ void setTargetCPU(mlir::ModuleOp mod, llvm::StringRef cpu);
/// Get the target CPU string from the Module or return a null reference.
llvm::StringRef getTargetCPU(mlir::ModuleOp mod);

/// Set the tune CPU for the module. `cpu` must not be deallocated while
/// module `mod` is still live.
void setTuneCPU(mlir::ModuleOp mod, llvm::StringRef cpu);

/// Get the tune CPU string from the Module or return a null reference.
llvm::StringRef getTuneCPU(mlir::ModuleOp mod);

/// Set the target features for the module.
void setTargetFeatures(mlir::ModuleOp mod, llvm::StringRef features);

Expand Down
3 changes: 0 additions & 3 deletions flang/include/flang/Optimizer/Transforms/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -411,9 +411,6 @@ def FunctionAttr : Pass<"function-attr", "mlir::func::FuncOp"> {
Option<"unsafeFPMath", "unsafe-fp-math",
"bool", /*default=*/"false",
"Set the unsafe-fp-math attribute on functions in the module.">,
Option<"tuneCPU", "tune-cpu",
"llvm::StringRef", /*default=*/"llvm::StringRef{}",
"Set the tune-cpu attribute on functions in the module.">,
];
}

Expand Down
4 changes: 0 additions & 4 deletions flang/lib/Frontend/CompilerInvocation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -407,10 +407,6 @@ static void parseTargetArgs(TargetOptions &opts, llvm::opt::ArgList &args) {
args.getLastArg(clang::driver::options::OPT_target_cpu))
opts.cpu = a->getValue();

if (const llvm::opt::Arg *a =
args.getLastArg(clang::driver::options::OPT_tune_cpu))
opts.cpuToTuneFor = a->getValue();

for (const llvm::opt::Arg *currentArg :
args.filtered(clang::driver::options::OPT_target_feature))
opts.featuresAsWritten.emplace_back(currentArg->getValue());
Expand Down
3 changes: 1 addition & 2 deletions flang/lib/Frontend/FrontendActions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -297,8 +297,7 @@ bool CodeGenAction::beginSourceFileAction() {
ci.getParsing().allCooked(), ci.getInvocation().getTargetOpts().triple,
kindMap, ci.getInvocation().getLoweringOpts(),
ci.getInvocation().getFrontendOpts().envDefaults,
ci.getInvocation().getFrontendOpts().features, targetMachine,
ci.getInvocation().getTargetOpts().cpuToTuneFor);
ci.getInvocation().getFrontendOpts().features, targetMachine);

// Fetch module from lb, so we can set
mlirModule = std::make_unique<mlir::ModuleOp>(lb.getModule());
Expand Down
3 changes: 1 addition & 2 deletions flang/lib/Lower/Bridge.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5929,7 +5929,7 @@ Fortran::lower::LoweringBridge::LoweringBridge(
const Fortran::lower::LoweringOptions &loweringOptions,
const std::vector<Fortran::lower::EnvironmentDefault> &envDefaults,
const Fortran::common::LanguageFeatureControl &languageFeatures,
const llvm::TargetMachine &targetMachine, const llvm::StringRef tuneCPU)
const llvm::TargetMachine &targetMachine)
: semanticsContext{semanticsContext}, defaultKinds{defaultKinds},
intrinsics{intrinsics}, targetCharacteristics{targetCharacteristics},
cooked{&cooked}, context{context}, kindMap{kindMap},
Expand Down Expand Up @@ -5986,7 +5986,6 @@ Fortran::lower::LoweringBridge::LoweringBridge(
fir::setTargetTriple(*module.get(), triple);
fir::setKindMapping(*module.get(), kindMap);
fir::setTargetCPU(*module.get(), targetMachine.getTargetCPU());
fir::setTuneCPU(*module.get(), tuneCPU);
fir::setTargetFeatures(*module.get(), targetMachine.getTargetFeatureString());
fir::support::setMLIRDataLayout(*module.get(),
targetMachine.createDataLayout());
Expand Down
6 changes: 1 addition & 5 deletions flang/lib/Optimizer/CodeGen/CodeGen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3618,9 +3618,6 @@ class FIRToLLVMLowering
if (!forcedTargetCPU.empty())
fir::setTargetCPU(mod, forcedTargetCPU);

if (!forcedTuneCPU.empty())
fir::setTuneCPU(mod, forcedTuneCPU);

if (!forcedTargetFeatures.empty())
fir::setTargetFeatures(mod, forcedTargetFeatures);

Expand Down Expand Up @@ -3717,8 +3714,7 @@ class FIRToLLVMLowering
signalPassFailure();
}

// Run pass to add comdats to functions that have weak linkage on relevant
// platforms
// Run pass to add comdats to functions that have weak linkage on relevant platforms
if (fir::getTargetTriple(mod).supportsCOMDAT()) {
mlir::OpPassManager comdatPM("builtin.module");
comdatPM.addPass(mlir::LLVM::createLLVMAddComdats());
Expand Down
11 changes: 0 additions & 11 deletions flang/lib/Optimizer/CodeGen/Target.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1113,14 +1113,3 @@ fir::CodeGenSpecifics::get(mlir::MLIRContext *ctx, llvm::Triple &&trp,
}
TODO(mlir::UnknownLoc::get(ctx), "target not implemented");
}

std::unique_ptr<fir::CodeGenSpecifics> fir::CodeGenSpecifics::get(
mlir::MLIRContext *ctx, llvm::Triple &&trp, KindMapping &&kindMap,
llvm::StringRef targetCPU, mlir::LLVM::TargetFeaturesAttr targetFeatures,
const mlir::DataLayout &dl, llvm::StringRef tuneCPU) {
std::unique_ptr<fir::CodeGenSpecifics> CGS = fir::CodeGenSpecifics::get(
ctx, std::move(trp), std::move(kindMap), targetCPU, targetFeatures, dl);

CGS->tuneCPU = tuneCPU;
return CGS;
}
12 changes: 1 addition & 11 deletions flang/lib/Optimizer/CodeGen/TargetRewrite.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,6 @@ class TargetRewrite : public fir::impl::TargetRewritePassBase<TargetRewrite> {
if (!forcedTargetCPU.empty())
fir::setTargetCPU(mod, forcedTargetCPU);

if (!forcedTuneCPU.empty())
fir::setTuneCPU(mod, forcedTuneCPU);

if (!forcedTargetFeatures.empty())
fir::setTargetFeatures(mod, forcedTargetFeatures);

Expand All @@ -109,8 +106,7 @@ class TargetRewrite : public fir::impl::TargetRewritePassBase<TargetRewrite> {

auto specifics = fir::CodeGenSpecifics::get(
mod.getContext(), fir::getTargetTriple(mod), fir::getKindMapping(mod),
fir::getTargetCPU(mod), fir::getTargetFeatures(mod), *dl,
fir::getTuneCPU(mod));
fir::getTargetCPU(mod), fir::getTargetFeatures(mod), *dl);

setMembers(specifics.get(), &rewriter, &*dl);

Expand Down Expand Up @@ -676,18 +672,12 @@ class TargetRewrite : public fir::impl::TargetRewritePassBase<TargetRewrite> {
auto targetCPU = specifics->getTargetCPU();
mlir::StringAttr targetCPUAttr =
targetCPU.empty() ? nullptr : mlir::StringAttr::get(ctx, targetCPU);
auto tuneCPU = specifics->getTuneCPU();
mlir::StringAttr tuneCPUAttr =
tuneCPU.empty() ? nullptr : mlir::StringAttr::get(ctx, tuneCPU);
auto targetFeaturesAttr = specifics->getTargetFeatures();

for (auto fn : mod.getOps<mlir::func::FuncOp>()) {
if (targetCPUAttr)
fn->setAttr("target_cpu", targetCPUAttr);

if (tuneCPUAttr)
fn->setAttr("tune_cpu", tuneCPUAttr);

if (targetFeaturesAttr)
fn->setAttr("target_features", targetFeaturesAttr);

Expand Down
3 changes: 1 addition & 2 deletions flang/lib/Optimizer/CodeGen/TypeConverter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,7 @@ LLVMTypeConverter::LLVMTypeConverter(mlir::ModuleOp module, bool applyTBAA,
kindMapping(getKindMapping(module)),
specifics(CodeGenSpecifics::get(
module.getContext(), getTargetTriple(module), getKindMapping(module),
getTargetCPU(module), getTargetFeatures(module), dl,
getTuneCPU(module))),
getTargetCPU(module), getTargetFeatures(module), dl)),
tbaaBuilder(std::make_unique<TBAABuilder>(module->getContext(), applyTBAA,
forceUnifiedTBAATree)),
dataLayout{&dl} {
Expand Down
18 changes: 0 additions & 18 deletions flang/lib/Optimizer/Dialect/Support/FIRContext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,24 +77,6 @@ llvm::StringRef fir::getTargetCPU(mlir::ModuleOp mod) {
return {};
}

static constexpr const char *tuneCpuName = "fir.tune_cpu";

void fir::setTuneCPU(mlir::ModuleOp mod, llvm::StringRef cpu) {
if (cpu.empty())
return;

auto *ctx = mod.getContext();

mod->setAttr(tuneCpuName, mlir::StringAttr::get(ctx, cpu));
}

llvm::StringRef fir::getTuneCPU(mlir::ModuleOp mod) {
if (auto attr = mod->getAttrOfType<mlir::StringAttr>(tuneCpuName))
return attr.getValue();

return {};
}

static constexpr const char *targetFeaturesName = "fir.target_features";

void fir::setTargetFeatures(mlir::ModuleOp mod, llvm::StringRef features) {
Expand Down
14 changes: 0 additions & 14 deletions flang/test/Driver/tune-cpu-fir.f90

This file was deleted.

6 changes: 0 additions & 6 deletions flang/test/Lower/tune-cpu-llvm.f90

This file was deleted.

3 changes: 1 addition & 2 deletions flang/tools/bbc/bbc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -367,12 +367,11 @@ static mlir::LogicalResult convertFortranSourceToMLIR(
loweringOptions.setLowerToHighLevelFIR(useHLFIR || emitHLFIR);
loweringOptions.setNSWOnLoopVarInc(setNSW);
std::vector<Fortran::lower::EnvironmentDefault> envDefaults = {};
constexpr const char *tuneCPU = "";
auto burnside = Fortran::lower::LoweringBridge::create(
ctx, semanticsContext, defKinds, semanticsContext.intrinsics(),
semanticsContext.targetCharacteristics(), parsing.allCooked(),
targetTriple, kindMap, loweringOptions, envDefaults,
semanticsContext.languageFeatures(), targetMachine, tuneCPU);
semanticsContext.languageFeatures(), targetMachine);
mlir::ModuleOp mlirModule = burnside.getModule();
if (enableOpenMP) {
if (enableOpenMPGPU && !enableOpenMPDevice) {
Expand Down
4 changes: 0 additions & 4 deletions flang/tools/tco/tco.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,6 @@ static cl::opt<std::string> targetTriple("target",
static cl::opt<std::string>
targetCPU("target-cpu", cl::desc("specify a target CPU"), cl::init(""));

static cl::opt<std::string> tuneCPU("tune-cpu", cl::desc("specify a tune CPU"),
cl::init(""));

static cl::opt<std::string>
targetFeatures("target-features", cl::desc("specify the target features"),
cl::init(""));
Expand Down Expand Up @@ -116,7 +113,6 @@ compileFIR(const mlir::PassPipelineCLParser &passPipeline) {
fir::setTargetTriple(*owningRef, targetTriple);
fir::setKindMapping(*owningRef, kindMap);
fir::setTargetCPU(*owningRef, targetCPU);
fir::setTuneCPU(*owningRef, tuneCPU);
fir::setTargetFeatures(*owningRef, targetFeatures);
// tco is a testing tool, so it will happily use the target independent
// data layout if none is on the module.
Expand Down
Loading

0 comments on commit 8dd9494

Please sign in to comment.