Skip to content

Commit c8ba691

Browse files
lialanqedawkins
andauthored
This PR contains the following change: * Update LLVM to llvm/llvm-project@3e61c1ab7f * Changes necessary for llvm/llvm-project#123902 , originally from: #19993 * Renaming changes to accommodate upstream changes. --------- Signed-off-by: Alan Li <[email protected]> Co-authored-by: Quinn Dawkins <[email protected]>
1 parent 3d1faef commit c8ba691

File tree

116 files changed

+484
-519
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

116 files changed

+484
-519
lines changed

compiler/plugins/input/TOSA/InputConversion/Passes.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -36,16 +36,16 @@ void buildTOSAInputConversionPassPipeline(OpPassManager &passManager) {
3636
// Currently we don't handle SCF ops well and have to convert them all to CFG.
3737
// In the future it would be nice if we could have all of flow be both scf
3838
// and cfg compatible.
39-
passManager.addNestedPass<func::FuncOp>(tosa::createTosaToSCF());
39+
passManager.addNestedPass<func::FuncOp>(createTosaToSCFPass());
4040

4141
// We also don't handle calls well on the old codepath; until we remove the
4242
// use of the CFG we can continue inlining.
4343
passManager.addPass(mlir::createInlinerPass());
4444

4545
passManager.addNestedPass<func::FuncOp>(
4646
tosa::createTosaMakeBroadcastablePass());
47-
passManager.addNestedPass<func::FuncOp>(tosa::createTosaToArith());
48-
passManager.addNestedPass<func::FuncOp>(tosa::createTosaToTensor());
47+
passManager.addNestedPass<func::FuncOp>(createTosaToArithPass());
48+
passManager.addNestedPass<func::FuncOp>(createTosaToTensorPass());
4949
passManager.addNestedPass<func::FuncOp>(
5050
iree_compiler::createTosaToLinalgExtPass());
5151
passManager.addNestedPass<func::FuncOp>(mlir::createCanonicalizerPass());
@@ -60,8 +60,8 @@ void buildTOSAInputConversionPassPipeline(OpPassManager &passManager) {
6060
iree_compiler::createConverti48Toi64Pass());
6161

6262
// Sometimes we generate more TOSA operations during the lowering to linalg.
63-
passManager.addNestedPass<func::FuncOp>(tosa::createTosaToArith());
64-
passManager.addNestedPass<func::FuncOp>(tosa::createTosaToTensor());
63+
passManager.addNestedPass<func::FuncOp>(createTosaToArithPass());
64+
passManager.addNestedPass<func::FuncOp>(createTosaToTensorPass());
6565

6666
passManager.addNestedPass<func::FuncOp>(
6767
iree_compiler::createStripSignednessPass());

compiler/plugins/input/TOSA/PluginRegistration.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,9 @@ struct TOSASession
2828
PluginActivationPolicy::DefaultActivated> {
2929
static void registerPasses() {
3030
registerTOSAConversionPasses();
31-
registerTosaToArith();
31+
registerTosaToArithPass();
3232
registerTosaToLinalg();
33-
registerTosaToTensor();
33+
registerTosaToTensorPass();
3434
}
3535

3636
void onRegisterDialects(DialectRegistry &registry) override {

compiler/plugins/target/LLVMCPU/test/materialize_homogeneous_encodings.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,5 +19,5 @@ module attributes {hal.device.targets = [#device_target_llvm_cpu]} {
1919
}
2020
}
2121
// CHECK-LABEL: util.func public @lhs_encoding
22-
// CHECK: tensor.pack
23-
// CHECK: tensor.unpack
22+
// CHECK: linalg.pack
23+
// CHECK: linalg.unpack

compiler/src/iree/compiler/Codegen/Common/CPU/CPULowerToUKernels.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,7 @@ matchDAGForUKernel(RewriterBase &rewriter, linalg::Mmt4DOp op,
263263
}
264264

265265
static FailureOr<IREE::Codegen::UKernelOpInterface>
266-
matchDAGForUKernel(RewriterBase &rewriter, tensor::PackOp op,
266+
matchDAGForUKernel(RewriterBase &rewriter, linalg::PackOp op,
267267
bool /*skipIntermediateRoundings*/) {
268268
auto targetAttr = IREE::HAL::ExecutableTargetAttr::lookup(op);
269269
const char ukernelName[] = "pack";
@@ -386,7 +386,7 @@ matchDAGForUKernel(RewriterBase &rewriter, tensor::PackOp op,
386386
}
387387

388388
static FailureOr<IREE::Codegen::UKernelOpInterface>
389-
matchDAGForUKernel(RewriterBase &rewriter, tensor::UnPackOp op,
389+
matchDAGForUKernel(RewriterBase &rewriter, linalg::UnPackOp op,
390390
bool /*skipIntermediateRoundings*/) {
391391
auto targetAttr = IREE::HAL::ExecutableTargetAttr::lookup(op);
392392
const char ukernelName[] = "unpack";
@@ -616,8 +616,8 @@ void CPULowerToUKernelsPass::runOnOperation() {
616616
// these ops.
617617
auto allTargets = [](auto target) { return true; };
618618
patterns.insert<LowerToUKernelPattern<linalg::Mmt4DOp>,
619-
LowerToUKernelPattern<tensor::PackOp>,
620-
LowerToUKernelPattern<tensor::UnPackOp>>(
619+
LowerToUKernelPattern<linalg::PackOp>,
620+
LowerToUKernelPattern<linalg::UnPackOp>>(
621621
context, allTargets, skipIntermediateRoundings);
622622
// These patterns are inherently specific to the VMVX backend.
623623
patterns.insert<LowerToUKernelPattern<IREE::Codegen::QueryTileSizesOp>>(

compiler/src/iree/compiler/Codegen/Common/CPU/CPUPrepareUkernels.cpp

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ static void tileBatchDimsForBatchMmt4dOp(RewriterBase &rewriter,
4646

4747
static void tileNonPackedDimsFor3DPackOps(RewriterBase &rewriter,
4848
FunctionOpInterface funcOp) {
49-
funcOp.walk([&](tensor::PackOp packOp) {
49+
funcOp.walk([&](linalg::PackOp packOp) {
5050
if (packOp.getSourceRank() != 3 || packOp.getDestRank() != 5) {
5151
return;
5252
}
@@ -81,7 +81,7 @@ static void tileNonPackedDimsFor3DPackOps(RewriterBase &rewriter,
8181

8282
static void tileNonPackedDimsFor5DPUnpackOps(RewriterBase &rewriter,
8383
FunctionOpInterface funcOp) {
84-
funcOp.walk([&](tensor::UnPackOp unpackOp) {
84+
funcOp.walk([&](linalg::UnPackOp unpackOp) {
8585
if (unpackOp.getSourceRank() != 5 || unpackOp.getDestRank() != 3) {
8686
return;
8787
}
@@ -251,10 +251,10 @@ struct ConvertBatchMmt4DtoMmt4DPattern
251251
}
252252
};
253253

254-
struct Convert3DPackto2DPackPattern : public OpRewritePattern<tensor::PackOp> {
255-
using OpRewritePattern<tensor::PackOp>::OpRewritePattern;
254+
struct Convert3DPackto2DPackPattern : public OpRewritePattern<linalg::PackOp> {
255+
using OpRewritePattern<linalg::PackOp>::OpRewritePattern;
256256

257-
LogicalResult matchAndRewrite(tensor::PackOp packOp,
257+
LogicalResult matchAndRewrite(linalg::PackOp packOp,
258258
PatternRewriter &rewriter) const override {
259259
if (packOp.getSourceRank() != 3 || packOp.getDestRank() != 5) {
260260
return failure();
@@ -309,7 +309,7 @@ struct Convert3DPackto2DPackPattern : public OpRewritePattern<tensor::PackOp> {
309309
auto reducedDest = tensor::createCanonicalRankReducingExtractSliceOp(
310310
rewriter, loc, packOp.getDest(), reducedDestType);
311311

312-
auto newPackOp = rewriter.create<tensor::PackOp>(
312+
auto newPackOp = rewriter.create<linalg::PackOp>(
313313
loc, reducedSrc, reducedDest, newInnerDimsPos, packOp.getMixedTiles(),
314314
packOp.getPaddingValue(), newOuterDimsPerm);
315315

@@ -321,10 +321,10 @@ struct Convert3DPackto2DPackPattern : public OpRewritePattern<tensor::PackOp> {
321321
};
322322

323323
struct Convert5DUnPackto4DUnPackPattern
324-
: public OpRewritePattern<tensor::UnPackOp> {
325-
using OpRewritePattern<tensor::UnPackOp>::OpRewritePattern;
324+
: public OpRewritePattern<linalg::UnPackOp> {
325+
using OpRewritePattern<linalg::UnPackOp>::OpRewritePattern;
326326

327-
LogicalResult matchAndRewrite(tensor::UnPackOp unpackOp,
327+
LogicalResult matchAndRewrite(linalg::UnPackOp unpackOp,
328328
PatternRewriter &rewriter) const override {
329329
if (unpackOp.getSourceRank() != 5 || unpackOp.getDestRank() != 3) {
330330
return failure();
@@ -387,7 +387,7 @@ struct Convert5DUnPackto4DUnPackPattern
387387
auto reducedDest = tensor::createCanonicalRankReducingExtractSliceOp(
388388
rewriter, loc, unpackOp.getDest(), reducedDestType);
389389

390-
auto newUnpackOp = rewriter.create<tensor::UnPackOp>(
390+
auto newUnpackOp = rewriter.create<linalg::UnPackOp>(
391391
loc, reducedSrc, reducedDest, newInnerDimsPos, unpackOp.getMixedTiles(),
392392
newOuterDimsPerm);
393393

@@ -436,8 +436,8 @@ void CPUPrepareUkernelsPass::runOnOperation() {
436436
tensor::InsertSliceOp::getCanonicalizationPatterns(patterns, ctx);
437437
tensor::ExtractSliceOp::getCanonicalizationPatterns(patterns, ctx);
438438
tensor::EmptyOp::getCanonicalizationPatterns(patterns, ctx);
439-
tensor::PackOp::getCanonicalizationPatterns(patterns, ctx);
440-
tensor::UnPackOp::getCanonicalizationPatterns(patterns, ctx);
439+
linalg::PackOp::getCanonicalizationPatterns(patterns, ctx);
440+
linalg::UnPackOp::getCanonicalizationPatterns(patterns, ctx);
441441
tensor::CastOp::getCanonicalizationPatterns(patterns, ctx);
442442
tensor::populateFoldTensorEmptyPatterns(patterns);
443443
if (failed(applyPatternsGreedily(funcOp, std::move(patterns)))) {

compiler/src/iree/compiler/Codegen/Common/CPU/test/lower_to_ukernel_ops.mlir

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,7 @@ func.func @mmt4d_bf16bf16f32(%arg0 : tensor<?x?x16x2xbf16>, %arg1 : tensor<?x?x1
287287
func.func @pack_i8i8_x86(%arg0 : tensor<?x?xi8>, %arg1 : tensor<?x?x7x8xi8>, %arg2 : i8) -> tensor<?x?x7x8xi8> attributes {
288288
hal.executable.target = #hal.executable.target<"llvm-cpu", "xyz", {ukernels = "all", target_triple="x86_64-xyz-xyz", cpu_features="+avx512f"}>
289289
} {
290-
%result = tensor.pack %arg0 padding_value(%arg2 : i8) inner_dims_pos = [0, 1] inner_tiles = [7, 8] into %arg1
290+
%result = linalg.pack %arg0 padding_value(%arg2 : i8) inner_dims_pos = [0, 1] inner_tiles = [7, 8] into %arg1
291291
: tensor<?x?xi8> -> tensor<?x?x7x8xi8>
292292
func.return %result : tensor<?x?x7x8xi8>
293293
}
@@ -315,7 +315,7 @@ func.func @pack_i8i8_x86(%arg0 : tensor<?x?xi8>, %arg1 : tensor<?x?x7x8xi8>, %ar
315315
func.func @pack_i8i8(%arg0 : tensor<?x?xi8>, %arg1 : tensor<?x?x7x8xi8>, %arg2 : i8) -> tensor<?x?x7x8xi8> attributes {
316316
hal.executable.target = #hal.executable.target<"vmvx", "vmvx-bytecode-fb", {ukernels = "all"}>
317317
} {
318-
%result = tensor.pack %arg0 padding_value(%arg2 : i8) inner_dims_pos = [0, 1] inner_tiles = [7, 8] into %arg1
318+
%result = linalg.pack %arg0 padding_value(%arg2 : i8) inner_dims_pos = [0, 1] inner_tiles = [7, 8] into %arg1
319319
: tensor<?x?xi8> -> tensor<?x?x7x8xi8>
320320
func.return %result : tensor<?x?x7x8xi8>
321321
}
@@ -344,7 +344,7 @@ func.func @pack_i8i8(%arg0 : tensor<?x?xi8>, %arg1 : tensor<?x?x7x8xi8>, %arg2 :
344344
func.func @pack_f16f16(%arg0 : tensor<?x?xf16>, %arg1 : tensor<?x?x7x8xf16>, %arg2 : f16) -> tensor<?x?x7x8xf16> attributes {
345345
hal.executable.target = #hal.executable.target<"vmvx", "vmvx-bytecode-fb", {ukernels = "all"}>
346346
} {
347-
%result = tensor.pack %arg0 padding_value(%arg2 : f16) inner_dims_pos = [0, 1] inner_tiles = [7, 8] into %arg1
347+
%result = linalg.pack %arg0 padding_value(%arg2 : f16) inner_dims_pos = [0, 1] inner_tiles = [7, 8] into %arg1
348348
: tensor<?x?xf16> -> tensor<?x?x7x8xf16>
349349
func.return %result : tensor<?x?x7x8xf16>
350350
}
@@ -373,7 +373,7 @@ func.func @pack_f16f16(%arg0 : tensor<?x?xf16>, %arg1 : tensor<?x?x7x8xf16>, %ar
373373
func.func @pack_bf16bf16(%arg0 : tensor<?x?xbf16>, %arg1 : tensor<?x?x7x8xbf16>, %arg2 : bf16) -> tensor<?x?x7x8xbf16> attributes {
374374
hal.executable.target = #hal.executable.target<"vmvx", "vmvx-bytecode-fb", {ukernels = "all"}>
375375
} {
376-
%result = tensor.pack %arg0 padding_value(%arg2 : bf16) inner_dims_pos = [0, 1] inner_tiles = [7, 8] into %arg1
376+
%result = linalg.pack %arg0 padding_value(%arg2 : bf16) inner_dims_pos = [0, 1] inner_tiles = [7, 8] into %arg1
377377
: tensor<?x?xbf16> -> tensor<?x?x7x8xbf16>
378378
func.return %result : tensor<?x?x7x8xbf16>
379379
}
@@ -401,7 +401,7 @@ func.func @pack_bf16bf16(%arg0 : tensor<?x?xbf16>, %arg1 : tensor<?x?x7x8xbf16>,
401401
func.func @pack_i32i32_transpose_inner(%arg0 : tensor<?x?xi32>, %arg1 : tensor<?x?x7x8xi32>, %arg2 : i32) -> tensor<?x?x7x8xi32> attributes {
402402
hal.executable.target = #hal.executable.target<"vmvx", "vmvx-bytecode-fb", {ukernels = "all"}>
403403
} {
404-
%result = tensor.pack %arg0 padding_value(%arg2 : i32) inner_dims_pos = [1, 0] inner_tiles = [7, 8] into %arg1
404+
%result = linalg.pack %arg0 padding_value(%arg2 : i32) inner_dims_pos = [1, 0] inner_tiles = [7, 8] into %arg1
405405
: tensor<?x?xi32> -> tensor<?x?x7x8xi32>
406406
func.return %result : tensor<?x?x7x8xi32>
407407
}
@@ -430,19 +430,19 @@ func.func @pack_i32i32_transpose_inner(%arg0 : tensor<?x?xi32>, %arg1 : tensor<?
430430
func.func @pack_f32f32_transpose_inner_and_outer(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?x7x8xf32>, %arg2 : f32) -> tensor<?x?x7x8xf32> attributes {
431431
hal.executable.target = #hal.executable.target<"vmvx", "vmvx-bytecode-fb", {ukernels = "all"}>
432432
} {
433-
%result = tensor.pack %arg0 padding_value(%arg2 : f32) outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [7, 8] into %arg1
433+
%result = linalg.pack %arg0 padding_value(%arg2 : f32) outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [7, 8] into %arg1
434434
: tensor<?x?xf32> -> tensor<?x?x7x8xf32>
435435
func.return %result : tensor<?x?x7x8xf32>
436436
}
437437

438438
// -----
439439

440-
// Check that tensor.pack is not lowered to a microkernel by default - it should
440+
// Check that linalg.pack is not lowered to a microkernel by default - it should
441441
// only be on VMVX.
442442
// CHECK: func @unpack_f16f16_default
443-
// CHECK: tensor.unpack
443+
// CHECK: linalg.unpack
444444
func.func @unpack_f16f16_default(%arg0 : tensor<?x?x7x8xf16>, %arg1 : tensor<?x?xf16>) -> tensor<?x?xf16> {
445-
%result = tensor.unpack %arg0 inner_dims_pos = [0, 1] inner_tiles = [7, 8] into %arg1
445+
%result = linalg.unpack %arg0 inner_dims_pos = [0, 1] inner_tiles = [7, 8] into %arg1
446446
: tensor<?x?x7x8xf16> -> tensor<?x?xf16>
447447
func.return %result : tensor<?x?xf16>
448448
}
@@ -468,7 +468,7 @@ func.func @unpack_f16f16_default(%arg0 : tensor<?x?x7x8xf16>, %arg1 : tensor<?x?
468468
func.func @unpack_f16f16(%arg0 : tensor<?x?x7x8xf16>, %arg1 : tensor<?x?xf16>) -> tensor<?x?xf16> attributes {
469469
hal.executable.target = #hal.executable.target<"vmvx", "vmvx-bytecode-fb", {ukernels = "all"}>
470470
} {
471-
%result = tensor.unpack %arg0 inner_dims_pos = [0, 1] inner_tiles = [7, 8] into %arg1
471+
%result = linalg.unpack %arg0 inner_dims_pos = [0, 1] inner_tiles = [7, 8] into %arg1
472472
: tensor<?x?x7x8xf16> -> tensor<?x?xf16>
473473
func.return %result : tensor<?x?xf16>
474474
}
@@ -494,7 +494,7 @@ func.func @unpack_f16f16(%arg0 : tensor<?x?x7x8xf16>, %arg1 : tensor<?x?xf16>) -
494494
func.func @unpack_i32i32_transpose_inner(%arg0 : tensor<?x?x7x8xi32>, %arg1 : tensor<?x?xi32>) -> tensor<?x?xi32> attributes {
495495
hal.executable.target = #hal.executable.target<"vmvx", "vmvx-bytecode-fb", {ukernels = "all"}>
496496
} {
497-
%result = tensor.unpack %arg0 inner_dims_pos = [1, 0] inner_tiles = [7, 8] into %arg1
497+
%result = linalg.unpack %arg0 inner_dims_pos = [1, 0] inner_tiles = [7, 8] into %arg1
498498
: tensor<?x?x7x8xi32> -> tensor<?x?xi32>
499499
func.return %result : tensor<?x?xi32>
500500
}
@@ -520,7 +520,7 @@ func.func @unpack_i32i32_transpose_inner(%arg0 : tensor<?x?x7x8xi32>, %arg1 : te
520520
func.func @unpack_f32f32_transpose_inner_and_outer(%arg0 : tensor<?x?x7x8xf32>, %arg1 : tensor<?x?xf32>) -> tensor<?x?xf32> attributes {
521521
hal.executable.target = #hal.executable.target<"vmvx", "vmvx-bytecode-fb", {ukernels = "all"}>
522522
} {
523-
%result = tensor.unpack %arg0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [7, 8] into %arg1
523+
%result = linalg.unpack %arg0 outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [7, 8] into %arg1
524524
: tensor<?x?x7x8xf32> -> tensor<?x?xf32>
525525
func.return %result : tensor<?x?xf32>
526526
}

0 commit comments

Comments
 (0)