Skip to content

Commit

Permalink
set keepdims as bool
Browse files Browse the repository at this point in the history
Change-Id: I7dda38b4217ae7fa87a5ac447cb4fcb7a3d77354
  • Loading branch information
HarmonyHu committed Mar 20, 2023
1 parent 4105da9 commit 13850d1
Show file tree
Hide file tree
Showing 11 changed files with 43 additions and 48 deletions.
6 changes: 3 additions & 3 deletions include/tpu_mlir/Dialect/Top/IR/TopOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ class Top_PoolOp <string mnemonic> : Top_Op<mnemonic,[SupportFuseRelu]> {
I64ArrayAttr:$strides,
I64ArrayAttr:$pads,
OptionalAttr<BoolAttr>:$ceil_mode,
DefaultValuedAttr<BoolAttr, "true">:$keep_dim,
DefaultValuedAttr<BoolAttr, "true">:$keepdims,
DefaultValuedAttr<I64Attr, "0">:$pad_value,
DefaultValuedAttr<BoolAttr, "false">:$count_include_pad,
DefaultValuedAttr<BoolAttr, "false">:$do_relu,
Expand Down Expand Up @@ -1376,7 +1376,7 @@ def Top_ReduceOp : Top_Op<"Reduce"> {
let arguments = (ins
AnyTensor:$input,
I64ArrayAttr:$axes,
I64Attr:$keepdims,
BoolAttr:$keepdims,
ReduceModeAttr:$mode
);
let results = (outs AnyTensor:$output);
Expand All @@ -1391,7 +1391,7 @@ def Top_ArgOp : Top_Op<"Arg"> {
let arguments = (ins
AnyTensor:$input,
I64Attr:$axis,
I64Attr:$keepdims,
BoolAttr:$keepdims,
ArgModeAttr:$mode
);
let results = (outs
Expand Down
4 changes: 2 additions & 2 deletions include/tpu_mlir/Dialect/Tpu/IR/TpuOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -1651,7 +1651,7 @@ def Tpu_ReduceOp: Tpu_Op<"Reduce"> {
AnyTensorOrNone:$buffer, // cv18xx reciprocal_table
AnyTensorOrNone:$reciprocal_mantissa_table,
I64ArrayAttr:$axes,
I64Attr:$keepdims,
BoolAttr:$keepdims,
ReduceModeAttr:$mode,
// for cv18xx
OptionalAttr<I64ArrayAttr>:$multiplier,
Expand All @@ -1673,7 +1673,7 @@ def Tpu_ArgOp: Tpu_Op<"Arg", [DeclareOpInterfaceMethods<TypeInterface>]> {
let arguments = (ins
AnyRankedTensor:$input,
I64Attr:$axis,
I64Attr:$keepdims,
BoolAttr:$keepdims,
ArgModeAttr:$mode
);

Expand Down
2 changes: 0 additions & 2 deletions lib/Dialect/Top/Canonicalize/Arg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ struct TopArgReducefull : public OpRewritePattern<ArgOp> {
.Default(-1);
auto arg_axis = op.getAxis();
bool match = false;
int keep_dims = 0;
auto reduce_method_exp = (arg_method == arg_max) ? reduce_max:reduce_min;
for (auto &use : formerOp->getUses()) {
if (use.getOwner() == op)
Expand All @@ -55,7 +54,6 @@ struct TopArgReducefull : public OpRewritePattern<ArgOp> {
if (reduce_axes->at(0) != arg_axis)
continue;
match = true;
keep_dims = reop.getKeepdims();
auto reop_out_shape = module::getShape(reop.getOutput());
auto reop_out_type = module::getStorageType(reop.getOutput());
auto new_type = RankedTensorType::get(reop_out_shape, reop_out_type);
Expand Down
39 changes: 19 additions & 20 deletions lib/Dialect/Top/Canonicalize/Reduce.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,9 @@ struct TopReduceTranspose : public OpRewritePattern<ReduceOp> {
return failure();
}
auto permuteOp = cast<PermuteOp>(formerOp);
if (0 != op->getAttr("keepdims").dyn_cast<IntegerAttr>().getInt())
if (op.getKeepdims()) {
return failure();
}
auto reduce_axes = module::getI64Array(op.getAxes());
auto permute_order = module::getI64Array(permuteOp.getOrder());
auto input_shape = module::getShape(op.getInput());
Expand Down Expand Up @@ -116,40 +117,38 @@ struct ReduceFusePattern : public OpRewritePattern<ReduceOp> {

int mask[MAX_SHAPE_DIMS] = {0};
int new_axis_num = 0;
for (int i=0; i< axis_num_former; i++) {
for (int i = 0; i < axis_num_former; i++) {
mask[axis_list_former->at(i)] = 1;
new_axis_num += 1;
}
for (int i=0; i< axis_num_current; i++) {
for (int i = 0; i < axis_num_current; i++) {
int offset = 0;
while(mask[axis_list_current->at(i)+offset]) {
offset += 1;
while (mask[axis_list_current->at(i) + offset]) {
offset += 1;
}
mask[axis_list_current->at(i)+offset] = 1;
mask[axis_list_current->at(i) + offset] = 1;
new_axis_num += 1;
}
int offset_start = 0;
while(!mask[offset_start]) {
offset_start += 1;
while (!mask[offset_start]) {
offset_start += 1;
}
std::vector<int64_t> new_axis(new_axis_num, 0);
for (int i = 0; i < new_axis_num; i++) {
int offset_insert = offset_start;
while(!mask[offset_insert]) {
offset_insert += 1;
}
new_axis[i] = offset_insert;
offset_start = offset_insert + 1;
int offset_insert = offset_start;
while (!mask[offset_insert]) {
offset_insert += 1;
}
new_axis[i] = offset_insert;
offset_start = offset_insert + 1;
}
std::vector<NamedAttribute> attrs;
attrs.push_back(
rewriter.getNamedAttr("axes", rewriter.getI64ArrayAttr(new_axis)));
attrs.push_back(
rewriter.getNamedAttr("keepdims", op->getAttr("keepdims")));
attrs.push_back(
rewriter.getNamedAttr("mode", op->getAttr("mode")));
rewriter.replaceOpWithNewOp<ReduceOp>(
op, op.getResult().getType(), new_input, attrs);
attrs.push_back(rewriter.getNamedAttr("keepdims", op.getKeepdimsAttr()));
attrs.push_back(rewriter.getNamedAttr("mode", op.getModeAttr()));
rewriter.replaceOpWithNewOp<ReduceOp>(op, op.getResult().getType(),
new_input, attrs);
return success();
}
};
Expand Down
2 changes: 1 addition & 1 deletion lib/Dialect/Top/Interfaces/AvgPool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ void top::AvgPoolOp::shape_inference() {
1;
out_shape.push_back(out_dim);
}
if (getKeepDim() == false) {
if (getKeepdims() == false) {
while (out_shape.size() > 2) {
if (out_shape.back() == 1) {
out_shape.pop_back();
Expand Down
10 changes: 4 additions & 6 deletions lib/Dialect/Tpu/Interfaces/BM1684X/Reduce.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,18 +45,16 @@ int64_t tpu::ReduceOp::dyn_codegen_global_bm1684x(void *buffer) {
if (!buffer)
return sizeof(reduce_full_global_spec_t);
reduce_full_global_spec_t spec = {0};
auto&& axes = getAxes();
auto &&axes = getAxes();
spec.common.axis_num = axes.size();
for (int i = 0; i < axes.size(); i ++)
for (int i = 0; i < axes.size(); i++)
spec.common.axis[i] = (axes[i].cast<IntegerAttr>().getInt());
spec.common.method = BM168x::get_reduce_type(getMode());
spec.common.input_scale = 1.0f;
spec.common.output_scale = 1.0f;
spec.common.keep_dims = getKeepdims();
spec.common.keep_dims = getKeepdims() ? 1 : 0;
spec.buffer_addr = module::getAddress(getBuffer());
return BM168x::dynamic_spec_to_buffer(buffer, spec);
}

int64_t tpu::ReduceOp::get_fw_type_bm1684x() {
return FW_BMNET_REDUCE_FULL;
}
int64_t tpu::ReduceOp::get_fw_type_bm1684x() { return FW_BMNET_REDUCE_FULL; }
2 changes: 1 addition & 1 deletion python/transform/CaffeConverter.py
Original file line number Diff line number Diff line change
Expand Up @@ -425,7 +425,7 @@ def convert_pooling_op(self, layer):
self.addOperand(layer.top[1], mask_op)
return
elif method == 1: # AVE
attrs['keep_dim'] = len(output_shape) == len(input_shape)
attrs['keepdims'] = len(output_shape) == len(input_shape)
new_op = self.mlir.create_avgpool_op([op], output_shape, **attrs)
self.addOperand(layer.top[0], new_op)
return
Expand Down
8 changes: 4 additions & 4 deletions python/transform/MLIRImporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,8 +383,8 @@ def create_avgpool_op(self, operands, output_shape, **kargs):
'do_relu': BoolAttr.get(kargs['do_relu']),
'count_include_pad': BoolAttr.get(kargs['count_include_pad']),
}
if 'keep_dim' in kargs:
param['keep_dim'] = BoolAttr.get(kargs['keep_dim'])
if 'keepdims' in kargs:
param['keepdims'] = BoolAttr.get(kargs['keepdims'])
if "ceil_mode" in kargs:
param["ceil_mode"] = BoolAttr.get(kargs["ceil_mode"])
return self.buildOp(Top.AvgPoolOp, operands, [output_type], **param)
Expand Down Expand Up @@ -905,7 +905,7 @@ def create_reduce_op(self, operands, output_shape, **kargs):
param = {
'name': kargs['name'],
'axes': self.ArrayAttr(kargs['axes']),
'keepdims': IntegerAttr.get(self.mlir_type['INT64'], kargs['keepdims']),
'keepdims': BoolAttr.get(kargs['keepdims']),
'mode': StringAttr.get(kargs['mode']),
}
return self.buildOp(Top.ReduceOp, operands, [output_type], **param)
Expand All @@ -920,7 +920,7 @@ def create_arg_op(self, operands, output_shapes, **kargs):
param = {
'name': kargs['name'],
'axis': IntegerAttr.get(self.mlir_type['INT64'], kargs['axis']),
'keepdims': IntegerAttr.get(self.mlir_type['INT64'], kargs['keepdims']),
'keepdims': BoolAttr.get(kargs['keepdims']),
'mode': StringAttr.get(kargs['mode']),
}
return self.buildOp(Top.ArgOp, operands, out_types, **param)
Expand Down
10 changes: 5 additions & 5 deletions python/transform/OnnxConverter.py
Original file line number Diff line number Diff line change
Expand Up @@ -865,7 +865,7 @@ def convert_global_avgpool_op(self, onnx_node):
'pads': num_dim * 2 * [0],
'count_include_pad': True,
'do_relu': False,
'keep_dim': len(input_shape) == len(output_shape),
'keepdims': len(input_shape) == len(output_shape),
}
new_op = self.mlir.create_avgpool_op([op], output_shape, **p)
self.addOperand(onnx_node.name, new_op)
Expand Down Expand Up @@ -895,7 +895,7 @@ def convert_avgpool_op(self, onnx_node):
'pads': pads,
'count_include_pad': count_include_pad,
'do_relu': False,
'keep_dim': len(input_shape) == len(output_shape),
'keepdims': len(input_shape) == len(output_shape),
}
new_op = self.mlir.create_avgpool_op([op], output_shape, **p)
self.addOperand(onnx_node.name, new_op)
Expand Down Expand Up @@ -1480,7 +1480,7 @@ def convert_reduce_op(self, onnx_node):
num_dims = len(input_shape)
axes = onnx_node.attrs.get('axes', list(range(num_dims))) \
if len(onnx_node.inputs) == 1 else self.getWeight(onnx_node.inputs[1])
keepdims = onnx_node.attrs.get('keepdims', 1)
keepdims = onnx_node.attrs.get('keepdims', 1) != 0
for idx, ax in enumerate(axes):
if ax < 0:
axes[idx] += num_dims
Expand All @@ -1493,7 +1493,7 @@ def convert_reduce_op(self, onnx_node):
'pads': [0, 0, 0, 0],
'count_include_pad': True,
'do_relu': False,
'keep_dim': len(input_shape) == len(output_shape),
'keepdims': len(input_shape) == len(output_shape),
}
new_op = self.mlir.create_avgpool_op(
[op], output_shape, **
Expand All @@ -1514,7 +1514,7 @@ def convert_arg_op(self, onnx_node):
assert (onnx_node.op_type in ["ArgMin", "ArgMax"])
op = self.getOperand(onnx_node.inputs[0])
axis = onnx_node.attrs.get('axis', 0)
keepdims = onnx_node.attrs.get('keepdims', 1)
keepdims = onnx_node.attrs.get('keepdims', 1) != 0
p = {
"name": [onnx_node.name + '_indices', onnx_node.name + '_values'],
"axis": axis,
Expand Down
4 changes: 2 additions & 2 deletions python/transform/TFLiteConverter.py
Original file line number Diff line number Diff line change
Expand Up @@ -691,7 +691,7 @@ def mean_op(self, op):
"kernel_shape": self.mlir.ArrayAttr(kernel_shape),
"strides": self.mlir.ArrayAttr([1, 1]),
"pads": self.mlir.ArrayAttr([0, 0, 0, 0]),
"keep_dim": BoolAttr.get(len(op.inputs[0].shape) == len(op.outputs[0].shape)),
"keepdims": BoolAttr.get(len(op.inputs[0].shape) == len(op.outputs[0].shape)),
}
return Top.AvgPoolOp, attr, True
else:
Expand All @@ -708,7 +708,7 @@ def reduce_op(self, op, mode):
axes = [self.__axis_transpose(op, i) for i in args]
attr = {
"axes": self.mlir.ArrayAttr(axes),
"keepdims": IntegerAttr.get(self.type_to_mlir[TensorType.INT64], param.KeepDims()),
"keepdims": BoolAttr.get(param.KeepDims()),
"mode": StringAttr.get(mode),
}
return Top.ReduceOp, attr, True
Expand Down
4 changes: 2 additions & 2 deletions python/transform/TorchConverter.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,11 +462,11 @@ def convert_sub_op(self, torch_node: TorchNode):
def convert_sum_op(self, torch_node: TorchNode):
op0 = self.getOp(torch_node.inputs[0])
axes = self.const_val[torch_node.inputs[1]]
keep_dim = self.const_val[torch_node.inputs[2]]
keepdims = self.const_val[torch_node.inputs[2]]
p = {
'name': torch_node.name,
'axes': axes,
'keepdims': keep_dim,
'keepdims': keepdims,
'mode': 'ReduceSum',
}
new_op = self.mlir.create_reduce_op([op0], [], **p)
Expand Down

0 comments on commit 13850d1

Please sign in to comment.