@@ -287,7 +287,7 @@ func.func @mmt4d_bf16bf16f32(%arg0 : tensor<?x?x16x2xbf16>, %arg1 : tensor<?x?x1
287
287
func.func @pack_i8i8_x86 (%arg0 : tensor <?x?xi8 >, %arg1 : tensor <?x?x7 x8 xi8 >, %arg2 : i8 ) -> tensor <?x?x7 x8 xi8 > attributes {
288
288
hal.executable.target = #hal.executable.target <" llvm-cpu" , " xyz" , {ukernels = " all" , target_triple =" x86_64-xyz-xyz" , cpu_features =" +avx512f" }>
289
289
} {
290
- %result = tensor .pack %arg0 padding_value (%arg2 : i8 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
290
+ %result = linalg .pack %arg0 padding_value (%arg2 : i8 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
291
291
: tensor <?x?xi8 > -> tensor <?x?x7 x8 xi8 >
292
292
func.return %result : tensor <?x?x7 x8 xi8 >
293
293
}
@@ -315,7 +315,7 @@ func.func @pack_i8i8_x86(%arg0 : tensor<?x?xi8>, %arg1 : tensor<?x?x7x8xi8>, %ar
315
315
func.func @pack_i8i8 (%arg0 : tensor <?x?xi8 >, %arg1 : tensor <?x?x7 x8 xi8 >, %arg2 : i8 ) -> tensor <?x?x7 x8 xi8 > attributes {
316
316
hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
317
317
} {
318
- %result = tensor .pack %arg0 padding_value (%arg2 : i8 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
318
+ %result = linalg .pack %arg0 padding_value (%arg2 : i8 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
319
319
: tensor <?x?xi8 > -> tensor <?x?x7 x8 xi8 >
320
320
func.return %result : tensor <?x?x7 x8 xi8 >
321
321
}
@@ -344,7 +344,7 @@ func.func @pack_i8i8(%arg0 : tensor<?x?xi8>, %arg1 : tensor<?x?x7x8xi8>, %arg2 :
344
344
func.func @pack_f16f16 (%arg0 : tensor <?x?xf16 >, %arg1 : tensor <?x?x7 x8 xf16 >, %arg2 : f16 ) -> tensor <?x?x7 x8 xf16 > attributes {
345
345
hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
346
346
} {
347
- %result = tensor .pack %arg0 padding_value (%arg2 : f16 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
347
+ %result = linalg .pack %arg0 padding_value (%arg2 : f16 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
348
348
: tensor <?x?xf16 > -> tensor <?x?x7 x8 xf16 >
349
349
func.return %result : tensor <?x?x7 x8 xf16 >
350
350
}
@@ -373,7 +373,7 @@ func.func @pack_f16f16(%arg0 : tensor<?x?xf16>, %arg1 : tensor<?x?x7x8xf16>, %ar
373
373
func.func @pack_bf16bf16 (%arg0 : tensor <?x?xbf16 >, %arg1 : tensor <?x?x7 x8 xbf16 >, %arg2 : bf16 ) -> tensor <?x?x7 x8 xbf16 > attributes {
374
374
hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
375
375
} {
376
- %result = tensor .pack %arg0 padding_value (%arg2 : bf16 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
376
+ %result = linalg .pack %arg0 padding_value (%arg2 : bf16 ) inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
377
377
: tensor <?x?xbf16 > -> tensor <?x?x7 x8 xbf16 >
378
378
func.return %result : tensor <?x?x7 x8 xbf16 >
379
379
}
@@ -401,7 +401,7 @@ func.func @pack_bf16bf16(%arg0 : tensor<?x?xbf16>, %arg1 : tensor<?x?x7x8xbf16>,
401
401
func.func @pack_i32i32_transpose_inner (%arg0 : tensor <?x?xi32 >, %arg1 : tensor <?x?x7 x8 xi32 >, %arg2 : i32 ) -> tensor <?x?x7 x8 xi32 > attributes {
402
402
hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
403
403
} {
404
- %result = tensor .pack %arg0 padding_value (%arg2 : i32 ) inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
404
+ %result = linalg .pack %arg0 padding_value (%arg2 : i32 ) inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
405
405
: tensor <?x?xi32 > -> tensor <?x?x7 x8 xi32 >
406
406
func.return %result : tensor <?x?x7 x8 xi32 >
407
407
}
@@ -430,19 +430,19 @@ func.func @pack_i32i32_transpose_inner(%arg0 : tensor<?x?xi32>, %arg1 : tensor<?
430
430
func.func @pack_f32f32_transpose_inner_and_outer (%arg0 : tensor <?x?xf32 >, %arg1 : tensor <?x?x7 x8 xf32 >, %arg2 : f32 ) -> tensor <?x?x7 x8 xf32 > attributes {
431
431
hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
432
432
} {
433
- %result = tensor .pack %arg0 padding_value (%arg2 : f32 ) outer_dims_perm = [1 , 0 ] inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
433
+ %result = linalg .pack %arg0 padding_value (%arg2 : f32 ) outer_dims_perm = [1 , 0 ] inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
434
434
: tensor <?x?xf32 > -> tensor <?x?x7 x8 xf32 >
435
435
func.return %result : tensor <?x?x7 x8 xf32 >
436
436
}
437
437
438
438
// -----
439
439
440
- // Check that tensor .pack is not lowered to a microkernel by default - it should
440
+ // Check that linalg .pack is not lowered to a microkernel by default - it should
441
441
// only be on VMVX.
442
442
// CHECK: func @unpack_f16f16_default
443
- // CHECK: tensor .unpack
443
+ // CHECK: linalg .unpack
444
444
func.func @unpack_f16f16_default (%arg0 : tensor <?x?x7 x8 xf16 >, %arg1 : tensor <?x?xf16 >) -> tensor <?x?xf16 > {
445
- %result = tensor .unpack %arg0 inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
445
+ %result = linalg .unpack %arg0 inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
446
446
: tensor <?x?x7 x8 xf16 > -> tensor <?x?xf16 >
447
447
func.return %result : tensor <?x?xf16 >
448
448
}
@@ -468,7 +468,7 @@ func.func @unpack_f16f16_default(%arg0 : tensor<?x?x7x8xf16>, %arg1 : tensor<?x?
468
468
func.func @unpack_f16f16 (%arg0 : tensor <?x?x7 x8 xf16 >, %arg1 : tensor <?x?xf16 >) -> tensor <?x?xf16 > attributes {
469
469
hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
470
470
} {
471
- %result = tensor .unpack %arg0 inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
471
+ %result = linalg .unpack %arg0 inner_dims_pos = [0 , 1 ] inner_tiles = [7 , 8 ] into %arg1
472
472
: tensor <?x?x7 x8 xf16 > -> tensor <?x?xf16 >
473
473
func.return %result : tensor <?x?xf16 >
474
474
}
@@ -494,7 +494,7 @@ func.func @unpack_f16f16(%arg0 : tensor<?x?x7x8xf16>, %arg1 : tensor<?x?xf16>) -
494
494
func.func @unpack_i32i32_transpose_inner (%arg0 : tensor <?x?x7 x8 xi32 >, %arg1 : tensor <?x?xi32 >) -> tensor <?x?xi32 > attributes {
495
495
hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
496
496
} {
497
- %result = tensor .unpack %arg0 inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
497
+ %result = linalg .unpack %arg0 inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
498
498
: tensor <?x?x7 x8 xi32 > -> tensor <?x?xi32 >
499
499
func.return %result : tensor <?x?xi32 >
500
500
}
@@ -520,7 +520,7 @@ func.func @unpack_i32i32_transpose_inner(%arg0 : tensor<?x?x7x8xi32>, %arg1 : te
520
520
func.func @unpack_f32f32_transpose_inner_and_outer (%arg0 : tensor <?x?x7 x8 xf32 >, %arg1 : tensor <?x?xf32 >) -> tensor <?x?xf32 > attributes {
521
521
hal.executable.target = #hal.executable.target <" vmvx" , " vmvx-bytecode-fb" , {ukernels = " all" }>
522
522
} {
523
- %result = tensor .unpack %arg0 outer_dims_perm = [1 , 0 ] inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
523
+ %result = linalg .unpack %arg0 outer_dims_perm = [1 , 0 ] inner_dims_pos = [1 , 0 ] inner_tiles = [7 , 8 ] into %arg1
524
524
: tensor <?x?x7 x8 xf32 > -> tensor <?x?xf32 >
525
525
func.return %result : tensor <?x?xf32 >
526
526
}
0 commit comments