diff --git a/benchmark/python/ffi/benchmark_ffi.py b/benchmark/python/ffi/benchmark_ffi.py index 26d20e6e0aac..a42140ec0075 100644 --- a/benchmark/python/ffi/benchmark_ffi.py +++ b/benchmark/python/ffi/benchmark_ffi.py @@ -130,6 +130,7 @@ def prepare_workloads(): out=dnp.array([False, False], dtype=bool), keepdims=False) OpArgMngr.add_workload("roll", pool["2x2"], 1, axis=0) OpArgMngr.add_workload("rot90", pool["2x2"], 2) + OpArgMngr.add_workload("triu", pool['3x3']) OpArgMngr.add_workload("array_split", pool['2x2'], 2, axis=1) OpArgMngr.add_workload("vsplit", pool['2x2'], 2) OpArgMngr.add_workload("hsplit", pool['2x2'], 2) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index e88796c8158d..d1b80cadf484 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -43,7 +43,7 @@ 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index', 'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr', 'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm', - 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'kron', + 'tril', 'triu', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'kron', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum', 'true_divide', 'nonzero', 'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'interp', 'diff', 'ediff1d', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite', @@ -2070,6 +2070,31 @@ def tril(m, k=0): return _api_internal.tril(m, k) +@set_module('mxnet.ndarray.numpy') +def triu(m, k=0): + r""" + Upper triangle of an array. + + Return a copy of a matrix with the elements below the `k`-th diagonal + zeroed. + + Please refer to the documentation for `tril` for further details. + + See Also + -------- + tril : lower triangle of an array + + Examples + -------- + >>> np.triu(np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]), -1) + array([[ 1, 2, 3], + [ 4, 5, 6], + [ 0, 8, 9], + [ 0, 0, 12]]) + """ + return _api_internal.triu(m, k) + + def _unary_func_helper(x, fn_array, fn_scalar, out=None, **kwargs): """Helper function for unary operators with kwargs. diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index 3b57f1a2a5fd..feec9ca89a9a 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -68,7 +68,7 @@ 'indices', 'copysign', 'ravel', 'unravel_index', 'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr', 'around', 'round', 'round_', 'arctan2', 'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', - 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'kron', + 'unique', 'lcm', 'tril', 'triu', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'kron', 'equal', 'not_equal', 'interp', 'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum', 'true_divide', 'nonzero', 'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'diff', 'ediff1d', 'resize', 'matmul', @@ -5614,6 +5614,31 @@ def tril_indices(n, k=0, m=None): # pylint: disable=redefined-outer-name +@set_module('mxnet.numpy') +def triu(m, k=0): + r""" + Upper triangle of an array. + + Return a copy of a matrix with the elements below the `k`-th diagonal + zeroed. + + Please refer to the documentation for `tril` for further details. + + See Also + -------- + tril : lower triangle of an array + + Examples + -------- + >>> np.triu(np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]), -1) + array([[ 1, 2, 3], + [ 4, 5, 6], + [ 0, 8, 9], + [ 0, 0, 12]]) + """ + return _mx_nd_np.triu(m, k) + + @set_module('mxnet.numpy') def arange(start, stop=None, step=1, dtype=None, ctx=None): """Return evenly spaced values within a given interval. diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index 9749721ed9e1..5061b772ae8f 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -49,7 +49,7 @@ 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index', 'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr', 'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'interp', - 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'kron', + 'tril', 'triu', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'kron', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum', 'true_divide', 'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'diff', 'ediff1d', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite', @@ -2200,6 +2200,32 @@ def tril(m, k=0): @set_module('mxnet.symbol.numpy') +def triu(m, k=0): + r""" + Upper triangle of an array. + + Return a copy of an array with elements under the `k`-th diagonal zeroed. + + Parameters + ---------- + m : _Symbol, shape (M, N) + Input array. + k : int, optional + Diagonal under which to zero elements. `k = 0` (the default) is the + main diagonal, `k < 0` is below it and `k > 0` is under. + + Returns + ------- + triu : _Symbol, shape (M, N) + Upper triangle of `m`, of same shape and data-type as `m`. + + See Also + -------- + tril : same thing, only for the lower triangle + """ + return _npi.triu(m, k) + + def tril_indices(n, k=0, m=None): """ Return the indices for the lower-triangle of an (n, m) array. diff --git a/src/api/operator/numpy/np_triu_op.cc b/src/api/operator/numpy/np_triu_op.cc new file mode 100644 index 000000000000..e42169aca43b --- /dev/null +++ b/src/api/operator/numpy/np_triu_op.cc @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file np_cumsum.cc + * \brief Implementation of the API of functions in src/operator/numpy/np_triu_op.cc + */ +#include +#include +#include "../utils.h" +#include "../../../operator/numpy/np_triu_op-inl.h" + +namespace mxnet { + +MXNET_REGISTER_API("_npi.triu") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + op::TriuParam param; + nnvm::NodeAttrs attrs; + const nnvm::Op* op = Op::Get("_npi_triu"); + // inputs + param.k = args[1].operator int(); + NDArray* inputs[] = {args[0].operator NDArray*()}; + + attrs.op = op; + attrs.parsed = param; + SetAttrDict(&attrs); + + int num_outputs = 0; + auto ndoutputs = Invoke(op, &attrs, 1, inputs, &num_outputs, nullptr); + *ret = reinterpret_cast(ndoutputs[0]); +}); + +} // namespace mxnet diff --git a/src/operator/numpy/np_triu_op-inl.h b/src/operator/numpy/np_triu_op-inl.h new file mode 100644 index 000000000000..17a484f26efb --- /dev/null +++ b/src/operator/numpy/np_triu_op-inl.h @@ -0,0 +1,241 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2020 by Contributors + * \file np_triu_op-inl.h + * \brief Function definition of triu (upper triangle of an array) op + */ + +#ifndef MXNET_OPERATOR_NUMPY_NP_TRIU_OP_INL_H_ +#define MXNET_OPERATOR_NUMPY_NP_TRIU_OP_INL_H_ + +#include +#include +#include +#include +#include +#include +#include "../mxnet_op.h" +#include "../operator_common.h" +#include "../elemwise_op_common.h" + +namespace mxnet { +namespace op { + +struct TriuParam : public dmlc::Parameter { + int k; + DMLC_DECLARE_PARAMETER(TriuParam) { + DMLC_DECLARE_FIELD(k) + .set_default(0) + .describe("Diagonal in question. The default is 0. " + "Use k>0 for diagonals above the main diagonal, " + "and k<0 for diagonals below the main diagonal. " + "If input has shape (S0 S1) k must be between -S0 and S1."); + } + void SetAttrDict(std::unordered_map* dict) { + std::ostringstream k_s; + k_s << k; + (*dict)["k"] = k_s.str(); + } +}; + +inline bool TriuOpShape(const nnvm::NodeAttrs& attrs, + mxnet::ShapeVector* in_attrs, + mxnet::ShapeVector* out_attrs) { + CHECK_EQ(in_attrs->size(), 1U); + CHECK_EQ(out_attrs->size(), 1U); + + const mxnet::TShape& ishape = (*in_attrs)[0]; + mxnet::TShape oshape; + + if (!mxnet::ndim_is_known(ishape)) { + return false; + } + + if (ishape.ndim() == 1) { + auto s = ishape[0]; + oshape = mxnet::TShape({s, s}); + } else { + oshape = ishape; + } + + if (shape_is_none(oshape)) { + LOG(FATAL) << "Diagonal does not exist."; + } + SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); + + return shape_is_known(out_attrs->at(0)); +} + +template +struct triu1Dforward { + template + MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data, + mshadow::Shape<2> oshape, int k) { + using namespace mxnet_op; + + const index_t row_id = i / oshape[1]; + const index_t col_id = i % oshape[1]; + if (col_id < (row_id + k)) { + KERNEL_ASSIGN(out[i], req, static_cast(0)); + } else { + KERNEL_ASSIGN(out[i], req, data[col_id]); + } + } +}; + +template +struct triu1Dbackward { + template + MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data, + mshadow::Shape<1> oshape, int k) { + using namespace mxnet_op; + auto m = oshape[0]; + auto start = i - k; + DType res = 0; + for (auto y = 0; y <= start && y < m; y++) { + res += data[y * m + i]; + } + KERNEL_ASSIGN(out[i], req, res); + } +}; + +template +struct triu2D { + template + MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data, + mshadow::Shape<2> oshape, int k) { + using namespace mxnet_op; + + const index_t row_id = i / oshape[1]; + const index_t col_id = i % oshape[1]; + if (col_id < (row_id + k)) { + KERNEL_ASSIGN(out[i], req, static_cast(0)); + } else { + KERNEL_ASSIGN(out[i], req, data[i]); + } + } +}; + +template +struct triu3D { + template + MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data, + mshadow::Shape<3> oshape, int k) { + using namespace mxnet_op; + + const index_t row_id = i % (oshape[1] * oshape[2]) / oshape[2]; + const index_t col_id = i % (oshape[1] * oshape[2]) % oshape[2]; + if (col_id < (row_id + k)) { + KERNEL_ASSIGN(out[i], req, static_cast(0)); + } else { + KERNEL_ASSIGN(out[i], req, data[i]); + } + } +}; + +template +void TriuOpProcess(const TBlob& in_data, + const TBlob& out_data, + index_t dsize, + const TriuParam& param, + mxnet_op::Stream *s, + const std::vector& req) { + using namespace mxnet_op; + using namespace mshadow; + + const mxnet::TShape& ishape = in_data.shape_; + const mxnet::TShape& oshape = out_data.shape_; + + if (ishape.ndim() == 2 && oshape.ndim() == 2) { + MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, { + MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { + Kernel, xpu>::Launch( + s, dsize, out_data.dptr(), in_data.dptr(), + Shape2(oshape[0], oshape[1]), param.k); + }); + }); + } else if (ishape.ndim() > 2) { + MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, { + MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { + Kernel, xpu>::Launch( + s, dsize, out_data.dptr(), in_data.dptr(), + oshape.FlatTo3D(oshape.ndim() - 2), param.k); + }); + }); + } else { + MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, { + MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { + if (back) { + Kernel, xpu>::Launch( + s, dsize, out_data.dptr(), in_data.dptr(), + Shape1(oshape[0]), param.k); + } else { + Kernel, xpu>::Launch( + s, dsize, out_data.dptr(), in_data.dptr(), + Shape2(oshape[0], oshape[1]), param.k); + } + }); + }); + } +} + +template +void TriuOpForward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + using namespace mxnet_op; + using namespace mshadow; + CHECK_EQ(inputs.size(), 1U); + CHECK_EQ(outputs.size(), 1U); + CHECK_EQ(req.size(), 1U); + Stream *s = ctx.get_stream(); + const TBlob& in_data = inputs[0]; + const TBlob& out_data = outputs[0]; + const TriuParam& param = nnvm::get(attrs.parsed); + + TriuOpProcess(in_data, out_data, out_data.Size(), param, s, req); +} + +template +void TriuOpBackward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + using namespace mxnet_op; + using namespace mshadow; + CHECK_EQ(inputs.size(), 1U); + CHECK_EQ(outputs.size(), 1U); + Stream *s = ctx.get_stream(); + + const TBlob& in_data = inputs[0]; + const TBlob& out_data = outputs[0]; + const TriuParam& param = nnvm::get(attrs.parsed); + + TriuOpProcess(in_data, out_data, out_data.Size(), param, s, req); +} + +} // namespace op +} // namespace mxnet + +#endif // MXNET_OPERATOR_NUMPY_NP_TRIU_OP_INL_H_ diff --git a/src/operator/numpy/np_triu_op.cc b/src/operator/numpy/np_triu_op.cc new file mode 100644 index 000000000000..fdd526060001 --- /dev/null +++ b/src/operator/numpy/np_triu_op.cc @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! +* Copyright (c) 2020 by Contributors +* \file np_triu_op.cc +* \brief CPU implementation of numpy triu operator +*/ + +#include "./np_triu_op-inl.h" + +namespace mxnet { +namespace op { + +DMLC_REGISTER_PARAMETER(TriuParam); + +NNVM_REGISTER_OP(_npi_triu) +.set_attr_parser(ParamParser) +.set_num_inputs(1) +.set_num_outputs(1) +.set_attr("FListInputNames", + [](const NodeAttrs& attrs) { + return std::vector{"data"}; + }) +.set_attr("FInferShape", TriuOpShape) +.set_attr("FInferType", ElemwiseType<1, 1>) +.set_attr("FCompute", TriuOpForward) +.set_attr("FInplaceOption", + [](const NodeAttrs& attrs) { + return std::vector >{{0, 0}}; + }) +.set_attr("FGradient", ElemwiseGradUseNone{"_backward_triu"}) +.add_argument("data", "NDArray-or-Symbol", "Input ndarray") +.add_arguments(TriuParam::__FIELDS__()); + + +NNVM_REGISTER_OP(_backward_triu) +.set_attr_parser(ParamParser) +.set_num_inputs(1) +.set_num_outputs(1) +.set_attr("TIsBackward", true) +.set_attr("FCompute", TriuOpBackward); + +} // namespace op +} // namespace mxnet diff --git a/src/operator/numpy/np_triu_op.cu b/src/operator/numpy/np_triu_op.cu new file mode 100644 index 000000000000..a143859e1db6 --- /dev/null +++ b/src/operator/numpy/np_triu_op.cu @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2020 by Contributors + * \file np_triu_op.cu + * \brief GPU implementation of numpy triu operator + */ + +#include "./np_triu_op-inl.h" + +namespace mxnet { +namespace op { + +NNVM_REGISTER_OP(_npi_triu) +.set_attr("FCompute", TriuOpForward); + +NNVM_REGISTER_OP(_backward_triu) +.set_attr("FCompute", TriuOpBackward); + +} // namespace op +} // namespace mxnet diff --git a/tests/python/unittest/test_numpy_interoperability.py b/tests/python/unittest/test_numpy_interoperability.py index 79a45f5e46f6..080fb03a7158 100644 --- a/tests/python/unittest/test_numpy_interoperability.py +++ b/tests/python/unittest/test_numpy_interoperability.py @@ -722,6 +722,23 @@ def _add_workload_tril(): OpArgMngr.add_workload('tril', np.zeros((3, 3), dtype=dt)) +def _add_workload_triu(): + OpArgMngr.add_workload('triu', np.random.uniform(size=(4, 1))) + for dt in ['float16', 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8']: + OpArgMngr.add_workload('triu', np.ones((2, 2), dtype=dt)) + a = np.array([ + [[1, 1], [1, 1]], + [[1, 1], [1, 0]], + [[1, 1], [0, 0]], + ], dtype=dt) + OpArgMngr.add_workload('triu', a) + arr = np.array([[1, 1, np.inf], + [1, 1, 1], + [np.inf, 1, 1]]) + OpArgMngr.add_workload('triu', arr) + OpArgMngr.add_workload('triu', np.zeros((3, 3), dtype=dt)) + + def _add_workload_einsum(): chars = 'abcdefghij' sizes = [2, 3, 4, 5, 4, 3, 2, 6, 5, 4] diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 111f0282283e..691c1ea897eb 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -2083,6 +2083,7 @@ def hybrid_forward(self, F, x): ret_mx = np.tril(data_mx, k*prefix) assert same(ret_mx.asnumpy(), ret_np) ret_mx.backward() + print(data_mx.grad) if len(shape) == 2: grad_np = _np.tri(*shape, k=k*prefix) assert same(data_mx.grad.asnumpy(), grad_np) @@ -2099,6 +2100,67 @@ def hybrid_forward(self, F, x): assert same(ret_mx.asnumpy(), ret_np) +@with_seed() +@use_np +def test_np_triu(): + # numpy triu does not support scalar array (zero-dim) + config = [ + ((4, 2), 3), + ((4, 2), 9), + ((4, 2), 0), + ((4, 2), -1), + ((4, 5, 6), 0), + ((4, 5, 6), 5), + ((4, 5, 6), 2), + ((4, 5, 6), -2), + ((4, 5, 6), -5), + ((4, 0), 0), + ((4, 0), 2), + ((4, 0), 4), + ((4, 0), -3), + ((4, 0, 5), 0), + ((4, 0, 5), 1), + ((4, 0, 5), 5), + ((4, 0, 5), -3), + ((3, ), 0), + ((3, ), 2), + ((3, ), 5) + ] + + class TestTriu(HybridBlock): + def __init__(self, k): + super(TestTriu, self).__init__() + self._k = k + + def hybrid_forward(self, F, x): + return F.np.triu(x, k=self._k) + + for prefix in [1, -1]: + for shape, k in config: + data_np = _np.random.uniform(size=shape) + data_mx = np.array(data_np, dtype=data_np.dtype) + data_mx.attach_grad() + ret_np = _np.triu(data_np, k*prefix) + with mx.autograd.record(): + ret_mx = np.triu(data_mx, k*prefix) + assert same(ret_mx.asnumpy(), ret_np) + ret_mx.backward() + if len(shape) == 2: + grad_np = _np.triu(_np.ones_like(data_np), k*prefix) + assert same(data_mx.grad.asnumpy(), grad_np) + if len(shape) == 1: + grad_np = _np.triu(_np.ones(shape), k*prefix) + grad_np = grad_np.sum(axis=0, keepdims=False) + assert same(data_mx.grad.asnumpy(), grad_np) + + net = TestTriu(k*prefix) + for hybrid in [False, True]: + if hybrid: + net.hybridize() + ret_mx = net(data_mx) + assert same(ret_mx.asnumpy(), ret_np) + + @with_seed() @use_np def test_np_unary_funcs():