From 1d893a11345c416ae683f3faa260b6515b341af9 Mon Sep 17 00:00:00 2001 From: Aditya Goel Date: Mon, 10 Jun 2024 22:54:40 +0100 Subject: [PATCH] Drop pyproject.toml entries, start windows fix --- .github/workflows/array-api.yml | 4 +++ .github/workflows/ci.yml | 8 ++--- ndonnx/_core/_impl.py | 5 ++- ndonnx/_funcs.py | 11 ++++-- ndonnx/_opset_extensions.py | 42 ++++++++++++++++------- pyproject.toml | 9 ----- tests/ndonnx/test_constant_propagation.py | 4 +-- tests/ndonnx/test_core.py | 14 ++++---- tests/ndonnx/test_masked.py | 9 +++-- 9 files changed, 67 insertions(+), 39 deletions(-) diff --git a/.github/workflows/array-api.yml b/.github/workflows/array-api.yml index 7d0a0da..1cd6665 100644 --- a/.github/workflows/array-api.yml +++ b/.github/workflows/array-api.yml @@ -13,6 +13,10 @@ concurrency: jobs: array-api-tests: + # Run if the commit message contains 'run array-api tests' or if the job is triggered on schedule + if: >- + contains(github.event.head_commit.message, 'run array-api tests') || + github.event_name == 'schedule' name: Array API test timeout-minutes: 90 runs-on: ubuntu-latest-8core diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d342e3f..b3fac52 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,13 +32,13 @@ jobs: fail-fast: false matrix: os: - - ubuntu-latest - - macos-latest + # - ubuntu-latest + # - macos-latest - windows-latest environment: - - py310 + # - py310 - py311 - - py312 + # - py312 steps: - name: Checkout branch uses: actions/checkout@v4 diff --git a/ndonnx/_core/_impl.py b/ndonnx/_core/_impl.py index 2b0f1fb..44bf656 100644 --- a/ndonnx/_core/_impl.py +++ b/ndonnx/_core/_impl.py @@ -540,7 +540,10 @@ def unique_all(self, x): # FIXME: I think we can simply use arange/ones+cumsum or something for the indices # maybe: indices = opx.cumsum(ones_like(flattened, dtype=dtypes.i64), axis=ndx.asarray(0)) - indices = opx.squeeze(opx.ndindex(opx.shape(flattened._core())), opx.const([1])) + indices = opx.squeeze( + opx.ndindex(opx.shape(flattened._core())), + opx.const([1], dtype=dtypes.int64), + ) ret = namedtuple("ret", ["values", "indices", "inverse_indices", "counts"]) diff --git a/ndonnx/_funcs.py b/ndonnx/_funcs.py index f69c9a5..dd5c44a 100644 --- a/ndonnx/_funcs.py +++ b/ndonnx/_funcs.py @@ -618,7 +618,9 @@ def expand_dims(x, axis=0): if (out := x.dtype._ops.expand_dims(x, axis)) is not NotImplemented: return out return x._transmute( - lambda corearray: opx.unsqueeze(corearray, axes=opx.const([axis])) + lambda corearray: opx.unsqueeze( + corearray, axes=opx.const([axis], dtype=dtypes.int64) + ) ) @@ -694,7 +696,12 @@ def roll(x, shift, axis=None): shift_single = opx.add(opx.const(-sh), len_single) # Find the needed element index and then gather from it range = opx.cast( - opx.range(opx.const(0), len_single, opx.const(1)), to=dtypes.int64 + opx.range( + opx.const(0, dtype=len_single.dtype), + len_single, + opx.const(1, dtype=len_single.dtype), + ), + to=dtypes.int64, ) new_indices = opx.mod(opx.add(range, shift_single), len_single) x = take(x, _from_corearray(new_indices), axis=ax) diff --git a/ndonnx/_opset_extensions.py b/ndonnx/_opset_extensions.py index 815f28d..45a28c2 100644 --- a/ndonnx/_opset_extensions.py +++ b/ndonnx/_opset_extensions.py @@ -184,6 +184,8 @@ def const(value, dtype: CoreType | None = None) -> _CoreArray: @eager_propagate def squeeze(data: _CoreArray, axes: _CoreArray | None = None) -> _CoreArray: + if axes is not None and axes.dtype != dtypes.int64: + raise ValueError(f"Expected axes to be of type int64, got {axes.dtype}") return _CoreArray(op.squeeze(data.var, axes=axes.var if axes is not None else None)) @@ -364,6 +366,8 @@ def concat(inputs: list[_CoreArray], axis: int) -> _CoreArray: @eager_propagate def unsqueeze(data: _CoreArray, axes: _CoreArray) -> _CoreArray: + if axes.dtype != dtypes.int64: + raise TypeError(f"axes must be int64, got {axes.dtype}") return _CoreArray(op.unsqueeze(data.var, axes.var)) @@ -503,7 +507,7 @@ def getitem_null(corearray: _CoreArray, index: _CoreArray) -> _CoreArray: if get_rank(index) == 0: def extend_shape(x: Var) -> Var: - return op.concat([op.const([1]), op.shape(x)], axis=0) + return op.concat([op.const([1], dtype=np.int64), op.shape(x)], axis=0) var = op.reshape(var, extend_shape(var), allowzero=True) index_var = op.reshape(index.var, extend_shape(index.var), allowzero=True) @@ -514,7 +518,8 @@ def extend_shape(x: Var) -> Var: reshaped_var, reshaped_index = var, index.var else: ret_shape = op.concat( - [op.const([-1]), op.shape(var, start=get_rank(index))], axis=0 + [op.const([-1], dtype=np.int64), op.shape(var, start=get_rank(index))], + axis=0, ) reshaped_var = op.reshape(var, ret_shape, allowzero=True) reshaped_index = op.reshape(index.var, op.const([-1]), allowzero=True) @@ -583,7 +588,7 @@ def getitem( index_filtered = [x for x in index if isinstance(x, (type(None), slice))] axis_new_axes = [ind for ind, x in enumerate(index_filtered) if x is None] if len(axis_new_axes) != 0: - var = op.unsqueeze(var, axes=op.const(axis_new_axes)) + var = op.unsqueeze(var, axes=op.const(axis_new_axes, dtype=np.int64)) return _CoreArray(var) @@ -607,7 +612,11 @@ def setitem( indices = getitem(ndindex(_CoreArray(op.shape(var))), index) # broadcast updates as appropriate - index_path_shape = op.slice(op.shape(indices.var), op.const([0]), op.const([-1])) + index_path_shape = op.slice( + op.shape(indices.var), + op.const([0], dtype=np.int64), + op.const([-1], dtype=np.int64), + ) return _CoreArray( op.scatter_nd(var, indices.var, op.expand(updates.var, index_path_shape)) ) @@ -640,14 +649,19 @@ def ndindex(shape: _CoreArray, to_reverse=None, axes_permutation=None) -> _CoreA axes_indices = [axes_permutation.index(i) for i in builtins.range(rank)] shape_var = shape.var + dtype = shape_var.unwrap_tensor().dtype ranges = [ ( - op.range(op.const(0), op.gather(shape_var, op.const(i)), op.const(1)) + op.range( + op.const(0, dtype=dtype), + op.gather(shape_var, op.const(i)), + op.const(1, dtype=dtype), + ) if i not in to_reverse else op.range( - op.sub(op.gather(shape_var, op.const(i)), op.const(1)), - op.const(-1), - op.const(-1), + op.sub(op.gather(shape_var, op.const(i)), op.const(1, dtype=dtype)), + op.const(-1, dtype=dtype), + op.const(-1, dtype=dtype), ) ) for i in builtins.range(rank) @@ -657,7 +671,8 @@ def ndindex(shape: _CoreArray, to_reverse=None, axes_permutation=None) -> _CoreA op.unsqueeze( r, op.const( - [j for j in builtins.range(rank) if axes_indices[i] != j], dtype=np.int_ + [j for j in builtins.range(rank) if axes_indices[i] != j], + dtype=np.int64, ), ) for i, r in enumerate(ranges) @@ -669,7 +684,7 @@ def ndindex(shape: _CoreArray, to_reverse=None, axes_permutation=None) -> _CoreA expanded_ranges = [op.expand(r, shape_var) for r in fit_ranges] ret = op.concat( - [op.unsqueeze(r, op.const([-1])) for r in expanded_ranges], + [op.unsqueeze(r, op.const([-1], dtype=np.int64)) for r in expanded_ranges], axis=-1, ) @@ -692,6 +707,8 @@ def static_map( input: _CoreArray, mapping: Mapping[KeyType, ValueType], default: ValueType | None ) -> _CoreArray: keys = np.array(tuple(mapping.keys())) + if keys.dtype == np.int32: + keys = keys.astype(np.int64) values = np.array(tuple(mapping.values())) value_dtype = values.dtype if default is None: @@ -726,10 +743,11 @@ def get_indices( split_pos_var = split_pos.var positions_var = positions.var indices_x = op.reshape( - op.slice(positions_var, op.const([0]), split_pos_var), op.const([-1]) + op.slice(positions_var, op.const([0], dtype=np.int64), split_pos_var), + op.const([-1], dtype=np.int64), ) indices_y = op.reshape( op.slice(positions_var, split_pos_var, op.const([np.iinfo(np.int64).max])), - op.const([-1]), + op.const([-1], dtype=np.int64), ) return _CoreArray(indices_x), _CoreArray(indices_y) diff --git a/pyproject.toml b/pyproject.toml index e9c9680..0962b75 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -82,12 +82,3 @@ exclude = ["docs/"] [tool.typos.default] extend-ignore-identifiers-re = ["scatter_nd", "arange"] - -[tool.pixi.project] -channels = ["conda-forge"] -platforms = ["osx-arm64"] - -[tool.pixi.pypi-dependencies] -ndonnx = { path = ".", editable = true } - -[tool.pixi.tasks] diff --git a/tests/ndonnx/test_constant_propagation.py b/tests/ndonnx/test_constant_propagation.py index 5be3ed7..f98dd31 100644 --- a/tests/ndonnx/test_constant_propagation.py +++ b/tests/ndonnx/test_constant_propagation.py @@ -67,13 +67,13 @@ def dynamic_masking_model(mode: Literal["lazy", "constant"]): def constant_indexing_model(mode: Literal["lazy", "constant"]): if mode == "constant": - a = ndx.asarray([0, 1, 2, 3]) + a = ndx.asarray([0, 1, 2, 3], dtype=ndx.int64) else: a = ndx.array( shape=("N",), dtype=ndx.int64, ) - b = ndx.asarray([5, 7, 8, 8, 9, 9, 234]) + b = ndx.asarray([5, 7, 8, 8, 9, 9, 234], dtype=ndx.int64) idx = ndx.asarray([1, 3, 5, 0]) result = a * b[idx] return ndx.build({"a": a} if mode == "lazy" else {}, {"y": result}) diff --git a/tests/ndonnx/test_core.py b/tests/ndonnx/test_core.py index 6845ec7..ac307c1 100644 --- a/tests/ndonnx/test_core.py +++ b/tests/ndonnx/test_core.py @@ -81,9 +81,11 @@ def test_null_promotion(): def test_asarray(): - a = ndx.asarray([1, 2, 3]) + a = ndx.asarray([1, 2, 3], dtype=ndx.int64) assert a.dtype == ndx.int64 - np.testing.assert_array_equal(np.array([1, 2, 3]), a.to_numpy(), strict=True) + np.testing.assert_array_equal( + np.array([1, 2, 3], np.int64), a.to_numpy(), strict=True + ) def test_asarray_masked(): @@ -366,7 +368,7 @@ def test_matrix_transpose(): model = ndx.build({"a": a}, {"b": b}) np.testing.assert_equal( npx.matrix_transpose(npx.reshape(npx.arange(3 * 2 * 3), (3, 2, 3))), - run(model, {"a": np.arange(3 * 2 * 3).reshape(3, 2, 3)})["b"], + run(model, {"a": np.arange(3 * 2 * 3, dtype=np.int64).reshape(3, 2, 3)})["b"], ) @@ -377,7 +379,7 @@ def test_matrix_transpose_attribute(): model = ndx.build({"a": a}, {"b": b}) np.testing.assert_equal( npx.reshape(npx.arange(3 * 2 * 3), (3, 2, 3)).mT, - run(model, {"a": np.arange(3 * 2 * 3).reshape(3, 2, 3)})["b"], + run(model, {"a": np.arange(3 * 2 * 3, dtype=np.int64).reshape(3, 2, 3)})["b"], ) @@ -388,7 +390,7 @@ def test_transpose_attribute(): model = ndx.build({"a": a}, {"b": b}) np.testing.assert_equal( npx.reshape(npx.arange(3 * 2), (3, 2)).T, - run(model, {"a": np.arange(3 * 2).reshape(3, 2)})["b"], + run(model, {"a": np.arange(3 * 2, dtype=np.int64).reshape(3, 2)})["b"], ) @@ -399,7 +401,7 @@ def test_array_spox_interoperability(): model = ndx.build({"a": a}, {"b": b}) expected = npx.reshape(npx.arange(3 * 2), (3, 2)) + 5 input = np.ma.masked_array( - np.arange(3 * 2).reshape(3, 2), mask=np.ones((3, 2), dtype=bool) + np.arange(3 * 2, dtype=np.int64).reshape(3, 2), mask=np.ones((3, 2), dtype=bool) ) actual = run(model, {"a": input})["b"] np.testing.assert_equal(expected, actual) diff --git a/tests/ndonnx/test_masked.py b/tests/ndonnx/test_masked.py index c64ea53..b5af42e 100644 --- a/tests/ndonnx/test_masked.py +++ b/tests/ndonnx/test_masked.py @@ -183,11 +183,14 @@ def test_eager_mode(): np.ma.masked_array([-12, 21, 12213], mask=[1, 0, 0]), dtype=ndx.nint64 ) np.testing.assert_equal( - (a + b).to_numpy(), np.ma.masked_array([2, 4, 6], mask=[0, 0, 1]) + (a + b).to_numpy(), + np.ma.masked_array([2, 4, 6], mask=[0, 0, 1], dtype=np.int64), ) np.testing.assert_equal( - (a - b).to_numpy(), np.ma.masked_array([0, 0, 0], mask=[0, 0, 1]) + (a - b).to_numpy(), + np.ma.masked_array([0, 0, 0], mask=[0, 0, 1], dtype=np.int64), ) np.testing.assert_equal( - (a * c).to_numpy(), np.ma.masked_array([-12, 42, 36639], mask=[1, 0, 1]) + (a * c).to_numpy(), + np.ma.masked_array([-12, 42, 36639], mask=[1, 0, 1], dtype=np.int64), )