Skip to content
12 changes: 6 additions & 6 deletions include/tvm/relay/attrs/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -430,10 +430,10 @@ struct Conv3DTransposeAttrs : public tvm::AttrsNode<Conv3DTransposeAttrs> {
"dimensions respectively. Convolution is applied on the 'D', 'H' and"
"'W' dimensions.");
TVM_ATTR_FIELD(kernel_layout)
.set_default("OIDHW")
.set_default("IODHW")
.describe(
"Dimension ordering of data and weight. Can be 'OIDHW', 'OIDHW16o16i', etc."
"'O', 'I', 'D', 'H', 'W' stands for num_filter, input_channel, depth, height, and width"
"Dimension ordering of data and weight. Can be 'IODHW', 'IODHW16i16o', etc."
"'I', 'O', 'D', 'H', 'W' stands for input_channel, num_filter, depth, height, and width"
"dimensions respectively.");
TVM_ATTR_FIELD(out_layout)
.set_default("")
Expand Down Expand Up @@ -588,10 +588,10 @@ struct Conv2DTransposeAttrs : public tvm::AttrsNode<Conv2DTransposeAttrs> {
"dimensions respectively. Convolution is applied on the 'H' and"
"'W' dimensions.");
TVM_ATTR_FIELD(kernel_layout)
.set_default("OIHW")
.set_default("IOHW")
.describe(
"Dimension ordering of data and weight. Can be 'OIHW', 'OIHW16o16i', etc."
"'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
"Dimension ordering of data and weight. Can be 'IOHW', 'OIHW16o16i', etc."
"'I', 'O', 'H', 'W' stands for input_channel, num_filter, height, and width"
"dimensions respectively.");
TVM_ATTR_FIELD(out_layout)
.set_default("")
Expand Down
11 changes: 7 additions & 4 deletions python/tvm/relay/frontend/keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -450,27 +450,30 @@ def _convert_convolution(inexpr, keras_layer, etab, data_layout, input_shape=Non

def _convert_convolution3d(inexpr, keras_layer, etab, data_layout, input_shape=None):
_check_data_format(keras_layer)
is_deconv = type(keras_layer).__name__ == "Conv3DTranspose"
weightList = keras_layer.get_weights()
weight = weightList[0]
if input_shape is None:
input_shape = keras_layer.input_shape

if data_layout == "NDHWC":
kernel_layout = "DHWIO"
if is_deconv:
kernel_layout = "DHWOI"
else:
kernel_layout = "OIDHW"
if is_deconv:
kernel_layout = "IODHW"
msg = (
"Kernel layout with {} is not supported for operator Convolution3D "
"in frontend Keras."
)
raise tvm.error.OpAttributeUnImplemented(msg.format(data_layout))

is_deconv = type(keras_layer).__name__ == "Conv3DTranspose"

if is_deconv:
kernel_d, kernel_h, kernel_w, n_filters, _ = weight.shape
if kernel_layout == "OIDHW":
weight = weight.transpose([4, 3, 2, 0, 1])
if kernel_layout == "IODHW":
weight = weight.transpose([4, 3, 0, 1, 2])
else:
kernel_d, kernel_h, kernel_w, _, n_filters = weight.shape

Expand Down
3 changes: 3 additions & 0 deletions python/tvm/relay/frontend/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -1251,6 +1251,9 @@ def convolution(self, inputs, input_types):
if len(kernel_size) == 3:
data_layout = "NCDHW"
kernel_layout = "OIDHW"
if use_transpose:
# Transposed convolutions have IODHW layout.
kernel_layout = "IODHW"
elif len(kernel_size) == 2:
data_layout = "NCHW"
kernel_layout = "OIHW"
Expand Down
5 changes: 4 additions & 1 deletion python/tvm/relay/frontend/tensorflow_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -693,7 +693,10 @@ def _impl(inputs, attr, params, mod):
raise tvm.error.OpAttributeInvalid(msg.format(attr["padding"]))

if "kernel_layout" not in attr:
attr["kernel_layout"] = "DHWIO" if attr["data_format"] == "NDHWC" else "OIDHW"
if opname == "conv":
attr["kernel_layout"] = "DHWIO" if attr["data_format"] == "NDHWC" else "OIDHW"
elif opname == "conv_transpose":
attr["kernel_layout"] = "DHWOI" if attr["data_format"] == "NDHWC" else "IODHW"

use_bias = len(inputs) == (3 if opname != "conv_transpose" else 4)
channel_axis = 1 if attr["data_format"] == "NCDHW" else 4
Expand Down
45 changes: 45 additions & 0 deletions python/tvm/relay/op/nn/_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,6 +382,51 @@ def convert_conv2d_transpose(attrs, inputs, tinfos, desired_layouts):
reg.register_strategy("nn.conv3d_transpose", strategy.conv3d_transpose_strategy)


@reg.register_convert_op_layout("nn.conv3d_transpose")
def convert_conv3d_transpose(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv3d_transpose op.

Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.

Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
data, weight = inputs
new_attrs = dict(attrs)
assert (
len(desired_layouts) == 2
), "A desired layout is expected for both of nn.conv3d_transpose's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout

if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv3d_transpose(data, weight, **new_attrs)

# Handle default kernel layouts
if desired_data_layout == "NCDHW":
new_attrs["kernel_layout"] = "IODHW"
return relay.nn.conv3d_transpose(data, weight, **new_attrs)
elif desired_data_layout == "NDHWC":
new_attrs["kernel_layout"] = "DHWOI"
return relay.nn.conv3d_transpose(data, weight, **new_attrs)

raise ValueError("Layout %s is not yet supported" % desired_data_layout)


@reg.register_legalize("nn.conv3d_transpose")
def legalize_conv3d_transpose(attrs, inputs, types):
"""Legalize conv3d_transpose op.
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,7 +436,7 @@ def conv3d_transpose(
channels=None,
kernel_size=None,
data_layout="NCDHW",
kernel_layout="OIDHW",
kernel_layout="IODHW",
out_layout="",
output_padding=(0, 0, 0),
out_dtype="",
Expand Down
11 changes: 5 additions & 6 deletions python/tvm/topi/nn/conv3d_transpose.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,20 +147,19 @@ def conv3d_transpose_legalize(attrs, inputs, types):
data, kernel = inputs
kernel_layout = attrs["kernel_layout"]
# Convert Kernel layout to IODHW
# kernel_layout is different from input kernel layout - IO is swapped
if kernel_layout == "DHWIO":
# input kernel layout is swapped to DHWOI
# output kernel layout will be IODHW
kernel = relay.transpose(kernel, axes=(4, 3, 0, 1, 2))
kernel = relay.transpose(kernel, axes=(3, 4, 0, 1, 2))
elif kernel_layout == "DHWOI":
# input kernel layout is swapped to DHWIO
# output kernel layout will be IODHW
kernel = relay.transpose(kernel, axes=(3, 4, 0, 1, 2))
elif kernel_layout == "IODHW":
kernel = relay.transpose(kernel, axes=(4, 3, 0, 1, 2))
elif kernel_layout == "OIDHW":
# input kernel layout is swapped to OIDHW
# output kernel layout will be IODHW
kernel = relay.transpose(kernel, axes=(1, 0, 2, 3, 4))
elif kernel_layout == "OIDHW":
elif kernel_layout == "IODHW":
# input kernel layout is swapped to IODHW
# output kernel layout will be IODHW
pass
Expand All @@ -172,7 +171,7 @@ def conv3d_transpose_legalize(attrs, inputs, types):
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["data_layout"] = "NCDHW"
# layout of kernel should be IODHW, but kernel_layout should be swapped - OIDHW
new_attrs["kernel_layout"] = "OIDHW"
new_attrs["kernel_layout"] = "IODHW"

# Convert data to NCDHW.
data = relay.transpose(data, axes=(0, 4, 1, 2, 3))
Expand Down
18 changes: 10 additions & 8 deletions src/relay/op/nn/convolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -594,7 +594,7 @@ bool Conv3DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
if (data == nullptr) return false;

static const Layout kNCDHW("NCDHW");
static const Layout kOIDHW("OIDHW");
static const Layout kIODHW("IODHW");

const Conv3DTransposeAttrs* param = attrs.as<Conv3DTransposeAttrs>();
ICHECK(param != nullptr);
Expand All @@ -606,9 +606,9 @@ bool Conv3DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
<< "Conv3d_transpose only support input layouts that are convertible from NCDHW."
<< " But got " << in_layout;

const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIDHW);
const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kIODHW);
ICHECK(trans_kernel_layout.defined())
<< "Conv3d_transpose only support kernel layouts that are convertible from OIDHW."
<< "Conv3d_transpose only support kernel layouts that are convertible from IODHW."
<< " But got " << kernel_layout;

Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout);
Expand Down Expand Up @@ -651,16 +651,18 @@ bool Conv3DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) &&
reporter->AssertEQ(param->kernel_size[1], wshape[3]) &&
reporter->AssertEQ(param->kernel_size[2], wshape[4]))
<< "Conv3D: shape of weight is inconsistent with kernel_size, "
<< "Conv3DTransposed: shape of weight is inconsistent with kernel_size, "
<< " kernel_size=" << param->kernel_size << " wshape=" << Array<IndexExpr>(wshape);
}
if (param->channels.defined()) {
ICHECK(reporter->AssertEQ(param->channels, wshape[1]))
<< "Conv3D: shape of weight is inconsistent with channels, "
<< " channels=" << param->channels << " wshape=" << Array<IndexExpr>(wshape);
ICHECK(reporter->AssertEQ(indexdiv(param->channels, param->groups), wshape[1]))
<< "Conv3DTransposed: shape of weight is inconsistent out_channels, "
<< " out_channels // groups != weight.shape[1] "
<< " out_channels=" << param->channels << " groups=" << param->groups
<< " wshape=" << Array<IndexExpr>(wshape);
}
if (!dshape_ncdhw[1].as<tir::AnyNode>() && !wshape[0].as<tir::AnyNode>()) {
ICHECK(reporter->AssertEQ(indexdiv(dshape_ncdhw[1], param->groups), wshape[0]));
ICHECK(reporter->AssertEQ(dshape_ncdhw[1], wshape[0]));
}
channels = wshape[1];
dilated_ksize_d = 1 + (wshape[2] - 1) * param->dilation[0];
Expand Down
7 changes: 1 addition & 6 deletions src/runtime/contrib/dnnl/dnnl_json_runtime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -424,15 +424,10 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
// Minus one for DNNL representation. No dilation for DNNL is 0, for relay is 1.
for (auto& d : dilates) d--;

// TODO(@apeskov): WA. conv3dTranspose uses wrong layout specifier. IO instead of OI.
auto wgh_logic_layout = TensorRequisite::DefaultLogicLayoutFor(wgh_layout);
if (wgh_logic_layout == "OIDHW") wgh_logic_layout = "IODHW";
if (wgh_logic_layout == "GOIDHW") wgh_logic_layout = "GIODHW";

// Take into account provided layout strings
src_tr = src_tr.TreatAs(src_layout);
dst_tr = dst_tr.TreatAs(dst_layout);
wgh_tr = wgh_tr.TreatAs(wgh_layout, wgh_logic_layout);
wgh_tr = wgh_tr.TreatAs(wgh_layout);

// Should support G mixed with O. Like { G*O, I, H, W }
if (wgh_layout.find("G") == std::string::npos) {
Expand Down
2 changes: 1 addition & 1 deletion tests/python/contrib/test_dnnl.py
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,7 @@ def get_conv3d_transpose(
activation=None,
dtype="float32",
data_layout="NCDHW",
kernel_layout="OIDHW",
kernel_layout="IODHW",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.const(np.random.randint(0, 1, k_shape).astype(dtype))
Expand Down