github.com/johnnyeven/libtools@v0.0.0-20191126065708-61829c1adf46/third_party/mlir/lib/Dialect/Linalg/Utils/Utils.cpp (about) 1 //===- Utils.cpp - Utilities to support the Linalg dialect ----------------===// 2 // 3 // Copyright 2019 The MLIR Authors. 4 // 5 // Licensed under the Apache License, Version 2.0 (the "License"); 6 // you may not use this file except in compliance with the License. 7 // You may obtain a copy of the License at 8 // 9 // http://www.apache.org/licenses/LICENSE-2.0 10 // 11 // Unless required by applicable law or agreed to in writing, software 12 // distributed under the License is distributed on an "AS IS" BASIS, 13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 // See the License for the specific language governing permissions and 15 // limitations under the License. 16 // ============================================================================= 17 // 18 // This file implements utilities for the Linalg dialect. 19 // 20 //===----------------------------------------------------------------------===// 21 22 #include "mlir/Dialect/Linalg/Utils/Utils.h" 23 #include "mlir/Dialect/LoopOps/LoopOps.h" 24 #include "mlir/Dialect/StandardOps/Ops.h" 25 #include "mlir/EDSC/Helpers.h" 26 #include "mlir/IR/AffineExpr.h" 27 #include "mlir/IR/AffineMap.h" 28 #include "mlir/IR/OpImplementation.h" 29 #include "mlir/Dialect/Linalg/IR/LinalgOps.h" 30 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h" 31 #include "mlir/Dialect/Linalg/Passes.h" 32 #include "mlir/Dialect/Linalg/Utils/Intrinsics.h" 33 #include "mlir/Pass/Pass.h" 34 #include "mlir/Support/STLExtras.h" 35 #include "mlir/Transforms/FoldUtils.h" 36 37 using namespace mlir; 38 using namespace mlir::edsc; 39 using namespace mlir::edsc::intrinsics; 40 using namespace mlir::linalg; 41 using namespace mlir::linalg::intrinsics; 42 using namespace mlir::loop; 43 44 mlir::edsc::LoopRangeBuilder::LoopRangeBuilder(ValueHandle *iv, 45 ValueHandle range) { 46 assert(range.getType() && "expected !linalg.range type"); 47 assert(range.getValue()->getDefiningOp() && 48 "need operations to extract range parts"); 49 auto rangeOp = cast<RangeOp>(range.getValue()->getDefiningOp()); 50 auto lb = rangeOp.min(); 51 auto ub = rangeOp.max(); 52 auto step = rangeOp.step(); 53 auto forOp = OperationHandle::createOp<ForOp>(lb, ub, step); 54 *iv = ValueHandle(forOp.getInductionVar()); 55 auto *body = forOp.getBody(); 56 enter(body, /*prev=*/1); 57 } 58 59 mlir::edsc::LoopRangeBuilder::LoopRangeBuilder(ValueHandle *iv, 60 SubViewOp::Range range) { 61 auto forOp = 62 OperationHandle::createOp<ForOp>(range.min, range.max, range.step); 63 *iv = ValueHandle(forOp.getInductionVar()); 64 auto *body = forOp.getBody(); 65 enter(body, /*prev=*/1); 66 } 67 68 ValueHandle 69 mlir::edsc::LoopRangeBuilder::operator()(std::function<void(void)> fun) { 70 if (fun) 71 fun(); 72 exit(); 73 return ValueHandle::null(); 74 } 75 76 mlir::edsc::LoopNestRangeBuilder::LoopNestRangeBuilder( 77 ArrayRef<ValueHandle *> ivs, ArrayRef<SubViewOp::Range> ranges) { 78 loops.reserve(ranges.size()); 79 for (unsigned i = 0, e = ranges.size(); i < e; ++i) { 80 loops.emplace_back(ivs[i], ranges[i]); 81 } 82 assert(loops.size() == ivs.size() && "Mismatch loops vs ivs size"); 83 } 84 85 mlir::edsc::LoopNestRangeBuilder::LoopNestRangeBuilder( 86 ArrayRef<ValueHandle *> ivs, ArrayRef<ValueHandle> ranges) { 87 loops.reserve(ranges.size()); 88 for (unsigned i = 0, e = ranges.size(); i < e; ++i) { 89 loops.emplace_back(ivs[i], ranges[i]); 90 } 91 assert(loops.size() == ivs.size() && "Mismatch loops vs ivs size"); 92 } 93 94 mlir::edsc::LoopNestRangeBuilder::LoopNestRangeBuilder( 95 ArrayRef<ValueHandle *> ivs, ArrayRef<Value *> ranges) 96 : LoopNestRangeBuilder( 97 ivs, SmallVector<ValueHandle, 4>(ranges.begin(), ranges.end())) {} 98 99 ValueHandle LoopNestRangeBuilder::LoopNestRangeBuilder::operator()( 100 std::function<void(void)> fun) { 101 if (fun) 102 fun(); 103 for (auto &lit : reverse(loops)) { 104 lit({}); 105 } 106 return ValueHandle::null(); 107 } 108 109 static Value *emitOrFoldComposedAffineApply(OpBuilder &b, Location loc, 110 AffineMap map, 111 ArrayRef<Value *> operandsRef, 112 OperationFolder &state) { 113 SmallVector<Value *, 4> operands(operandsRef.begin(), operandsRef.end()); 114 fullyComposeAffineMapAndOperands(&map, &operands); 115 canonicalizeMapAndOperands(&map, &operands); 116 return state.create<AffineApplyOp>(b, loc, map, operands); 117 } 118 119 SmallVector<Value *, 4> mlir::linalg::applyMapToValues(OpBuilder &b, 120 Location loc, 121 AffineMap map, 122 ArrayRef<Value *> values, 123 OperationFolder &state) { 124 SmallVector<Value *, 4> res; 125 res.reserve(map.getNumResults()); 126 unsigned numDims = map.getNumDims(); 127 // For each `expr` in `map`, applies the `expr` to the values extracted from 128 // ranges. If the resulting application can be folded into a Value*, the 129 // folding occurs eagerly. Otherwise, an affine.apply operation is emitted. 130 for (auto expr : map.getResults()) { 131 AffineMap map = AffineMap::get(numDims, 0, expr); 132 res.push_back(emitOrFoldComposedAffineApply(b, loc, map, values, state)); 133 } 134 return res; 135 } 136 137 /// Returns all the operands of `linalgOp` that are not views. 138 /// Asserts that these operands are value types to allow transformations like 139 /// tiling to just use the values when cloning `linalgOp`. 140 SmallVector<Value *, 4> 141 mlir::linalg::getAssumedNonViewOperands(LinalgOp linalgOp) { 142 auto *op = linalgOp.getOperation(); 143 unsigned numViews = linalgOp.getNumInputsAndOutputs(); 144 unsigned nOperands = op->getNumOperands() - numViews; 145 SmallVector<Value *, 4> res; 146 res.reserve(nOperands); 147 for (unsigned i = 0; i < nOperands; ++i) { 148 res.push_back(op->getOperand(numViews + i)); 149 auto t = res.back()->getType(); 150 (void)t; 151 assert((t.isIntOrIndexOrFloat() || t.isa<VectorType>()) && 152 "expected scalar or vector type"); 153 } 154 return res; 155 }