diff --git a/.circleci/config.yml b/.circleci/config.yml
index c7d85bd..11e8d07 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -84,11 +84,6 @@ jobs:
dir: nnoir
- lint:
dir: nnoir
- - run:
- name: manual mypy check # workaround https://github.com/pfnet/pysen/issues/32
- working_directory: nnoir
- command: |
- poetry run mypy . --strict --implicit-reexport --ignore-missing-imports
- run:
name: Test nnoir
working_directory: nnoir
diff --git a/nnoir-onnx/nnoir_onnx/freeze.py b/nnoir-onnx/nnoir_onnx/freeze.py
index 8a6ce10..05dfbd3 100644
--- a/nnoir-onnx/nnoir_onnx/freeze.py
+++ b/nnoir-onnx/nnoir_onnx/freeze.py
@@ -5,20 +5,20 @@
from nnoir_onnx import utils
-def command_list(args) -> None:
+def command_list(args: argparse.Namespace) -> None:
model = onnx.load(args.input)
s = utils.list_dimension_variables(model)
if len(s) != 0:
print(s)
-def command_freeze(args) -> None:
+def command_freeze(args: argparse.Namespace) -> None:
model = onnx.load(args.input)
fixed_model = utils.freeze_dimension_variables(model, args.fix_dimension)
onnx.save(fixed_model, args.output)
-def freeze():
+def freeze() -> None:
print("Warning: freeze_onnx is deprecated. Instead use `onnx2nnoir --fix_dimension`.")
parser = argparse.ArgumentParser(description="ONNX Freezer")
subparsers = parser.add_subparsers()
diff --git a/nnoir-onnx/nnoir_onnx/onnx.py b/nnoir-onnx/nnoir_onnx/onnx.py
index 722ddcb..92d8559 100644
--- a/nnoir-onnx/nnoir_onnx/onnx.py
+++ b/nnoir-onnx/nnoir_onnx/onnx.py
@@ -2,35 +2,38 @@
import re
import tempfile
from itertools import chain
+from typing import Any, Dict, List, Optional
import numpy as np
import onnx
import onnxruntime
-from nnoir import *
+from nnoir import NNOIR, Value
+from nnoir.functions import Function
from nnoir_onnx.operators import *
+from numpy.typing import NDArray
-from .operators.utils import InvalidONNXData, UnknownSizedVariable, UnsupportedONNXOperation
+from .operators.utils import InvalidONNXData, Op, UnknownSizedVariable, UnsupportedONNXOperation
from .utils import freeze_dimension_variables, list_dimension_variables
-def tensor_to_narray(tensor):
+def tensor_to_narray(tensor: onnx.TensorProto) -> NDArray[Any]:
arr = []
storage = onnx.mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[tensor.data_type]
storage = onnx.mapping.STORAGE_TENSOR_TYPE_TO_FIELD[storage]
arr = getattr(tensor, storage)
if arr == []:
- result = np.frombuffer(tensor.raw_data, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[tensor.data_type])
+ result: NDArray[Any] = np.frombuffer(tensor.raw_data, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[tensor.data_type]) # type: ignore
else:
result = np.array(arr, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[tensor.data_type])
shape = tensor.dims if tensor.dims != [] else [1]
return result.reshape(*shape)
-def narray_to_value_info(name, arr):
+def narray_to_value_info(name: str, arr: NDArray[Any]) -> onnx.ValueInfoProto:
return onnx.helper.make_tensor_value_info(name, onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype], arr.shape)
-def value_info_to_zero_narray(vi):
+def value_info_to_zero_narray(vi: onnx.ValueInfoProto) -> NDArray[Any]:
return np.zeros(
list(map(lambda x: x.dim_value, vi.type.tensor_type.shape.dim)),
dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[vi.type.tensor_type.elem_type],
@@ -38,9 +41,9 @@ def value_info_to_zero_narray(vi):
class ONNX:
- def __init__(self, path, graph_name=None, fix_dimension=None):
+ def __init__(self, path: str, graph_name: Optional[str] = None, fix_dimension: Optional[Dict[str, int]] = None):
self.onnx_path = path
- self.model = onnx.load(path)
+ self.model: onnx.ModelProto = onnx.load(path)
if graph_name is not None:
self.model.graph.name = graph_name
if fix_dimension is not None:
@@ -120,13 +123,13 @@ def __init__(self, path, graph_name=None, fix_dimension=None):
self.constant_nodes = {n: self.nodes[n] for n in constant_nodes}
self.opset_version = self.model.opset_import[0].version
- def _internal_values_info(self, model):
- values = list(set([v for n in model.graph.node for v in n.output]))
+ def _internal_values_info(self, model: onnx.ModelProto) -> List[onnx.ValueInfoProto]:
+ values: List[str] = list(set([v for n in model.graph.node for v in n.output]))
return [onnx.helper.make_empty_tensor_value_info(v) for v in values]
- def _rename_to_c_ident(self):
+ def _rename_to_c_ident(self) -> None:
m = copy.deepcopy(self.model)
- value_names = [i.name for i in m.graph.input] + [v.name for v in self._internal_values_info(m)]
+ value_names: List[str] = [i.name for i in m.graph.input] + [v.name for v in self._internal_values_info(m)]
# Initializer id is not restricted C identifier syntax rules.
for initializer in self.model.graph.initializer:
rename_step = 0
@@ -151,19 +154,19 @@ def _rename_to_c_ident(self):
n.input[i] = rename_candidate
initializer.name = rename_candidate
- def _try_run(self, constant_nodes):
+ def _try_run(self, constant_nodes: List[str]) -> Dict[str, NDArray[Any]]:
model = copy.deepcopy(self.model)
while len(model.graph.output) > 0:
model.graph.output.pop(0)
inits = [v.name for v in model.graph.initializer]
- input_values = [v for v in model.graph.input if v.name not in inits]
- dummy_inputs = {i.name: value_info_to_zero_narray(i) for i in input_values}
- outputs = [
+ input_values: List[onnx.ValueInfoProto] = [v for v in model.graph.input if v.name not in inits]
+ dummy_inputs: Dict[str, NDArray[Any]] = {i.name: value_info_to_zero_narray(i) for i in input_values}
+ outputs: List[onnx.ValueInfoProto] = [
*[v for v in model.graph.input],
*self._internal_values_info(model),
]
- result = copy.deepcopy(dummy_inputs)
+ result: Dict[str, NDArray[Any]] = copy.deepcopy(dummy_inputs)
for t in model.graph.initializer:
result[t.name] = tensor_to_narray(t)
with tempfile.NamedTemporaryFile() as f:
@@ -198,62 +201,64 @@ def _try_run(self, constant_nodes):
for k, v in zip(outputs, sess.run(outputs, {i: dummy_inputs[i] for i in inputs})):
if k not in constant_nodes:
# save memory usage
- v = np.broadcast_to(np.zeros(1, dtype=v.dtype), (1,) if v.ndim == 0 else v.shape)
+ v = np.broadcast_to(np.zeros(1, dtype=v.dtype), (1,) if v.ndim == 0 else v.shape) # type: ignore
result[k] = v
dummy_inputs[k] = v
model.graph.input.append(narray_to_value_info(k, v))
return result
- def _find(self, p, xs, default=None):
+ def _find(self, p, xs, default=None): # type: ignore
return next(filter(p, xs), default)
- def _find_initializer(self, name):
- return self._find(lambda n: name == n.name, self.model.graph.initializer)
+ def _find_initializer(self, name: str) -> Optional[onnx.TensorProto]:
+ return self._find(lambda n: name == n.name, self.model.graph.initializer) # type: ignore
- def _has_initializer(self, name):
+ def _has_initializer(self, name: str) -> bool:
return self._find_initializer(name) is not None
- def _find_generator(self, name):
- return self._find(lambda n: name in n.output, self.model.graph.node)
+ def _find_generator(self, name: str) -> Optional[onnx.NodeProto]:
+ return self._find(lambda n: name in n.output, self.model.graph.node) # type: ignore
- def _find_input(self, name):
- return self._find(lambda n: name == n.name, self.model.graph.input)
+ def _find_input(self, name: str) -> Optional[onnx.ValueInfoProto]:
+ return self._find(lambda n: name == n.name, self.model.graph.input) # type: ignore
- def _has_input(self, name):
+ def _has_input(self, name: str) -> bool:
return self._find_input(name) is not None
- def to_NNOIR(self):
- inputs = list(map(lambda x: x.name, self.sess.get_inputs()))
- outputs = list(map(lambda x: x.name, self.sess.get_outputs()))
+ def to_NNOIR(self) -> NNOIR:
+
+ inputs: List[str] = [x.name for x in self.sess.get_inputs()]
+ outputs: List[str] = [x.name for x in self.sess.get_outputs()]
try:
functions = self._to_NNOIR_functions()
except UnsupportedONNXOperation as e:
self._dump_dot()
raise e
- nodes = [Value(n, self.nodes[n]) for n in set(chain.from_iterable(map(lambda x: x.inputs + x.outputs, functions)))]
- # rename to C ident (some frameworks don't satisfy the onnx spec.)
- renaming_table = {n.name: f"v{i}".encode("utf-8") for i, n in enumerate(nodes)}
+ # FIXME: name of nnoir.Value should be bytes. (Throughout the onnx_nnoir source code, there are many mismatches between byte and str types.)
+ nodes = [Value(n, self.nodes[n]) for n in set(chain.from_iterable(map(lambda x: x.inputs + x.outputs, functions)))] # type: ignore
+
+ renaming_table: Dict[str, bytes] = {n.name: f"v{i}".encode("utf-8") for i, n in enumerate(nodes)} # type: ignore
- def rename(x):
+ def rename(x: str) -> bytes:
try:
return renaming_table[x]
except Exception as e:
raise RuntimeError(f"not found key {x} in renaming_table")
- inputs = list(map(rename, inputs))
- outputs = list(map(rename, outputs))
+ inputs: List[bytes] = list(map(rename, inputs)) # type: ignore
+ outputs: List[bytes] = list(map(rename, outputs)) # type: ignore
- def rename_function(e):
- e.inputs = list(map(rename, e.inputs))
- e.outputs = list(map(rename, e.outputs))
+ def rename_function(e: Function) -> Function:
+ e.inputs = list(map(rename, e.inputs)) # type: ignore
+ e.outputs = list(map(rename, e.outputs)) # type: ignore
return e
functions = list(map(rename_function, functions))
- def rename_node(n):
- n.name = rename(n.name)
+ def rename_node(n: Value) -> Value:
+ n.name = rename(n.name) # type: ignore
return n
nodes = list(map(rename_node, nodes))
@@ -262,13 +267,13 @@ def rename_node(n):
self.model.graph.name.encode("utf-8"),
self.model.producer_name,
self.model.producer_version,
- inputs,
- outputs,
+ inputs, # type: ignore
+ outputs, # type: ignore
nodes,
functions,
)
- def _eval_nodes(self, nodes):
+ def _eval_nodes(self, nodes: List[str]) -> Dict[str, Any]:
m = copy.deepcopy(self.model)
for n in m.graph.output:
m.graph.output.remove(n)
@@ -286,7 +291,7 @@ def _eval_nodes(self, nodes):
result = []
return dict(zip(output_names, result))
- def test(self):
+ def test(self) -> None:
with tempfile.NamedTemporaryFile() as tmpf:
m = copy.deepcopy(self.model)
for n in m.graph.output:
@@ -302,14 +307,14 @@ def test(self):
outputs = [x.name for x in sess.get_inputs()]
results = sess.run(outputs, inputs)
- def op_for_node(self, node):
+ def op_for_node(self, node: onnx.NodeProto) -> Op:
op_name = f"Op{node.op_type}"
if op_name in globals():
- return globals()[op_name](node, self.opset_version)
+ return globals()[op_name](node, self.opset_version) # type: ignore
else:
raise UnsupportedONNXOperation(node, f"converting from {node.op_type} is undefined")
- def _to_NNOIR_functions(self):
+ def _to_NNOIR_functions(self) -> List[Function]:
outputs = list(map(lambda x: x.name, self.sess.get_outputs()))
visited = []
known_generator = []
@@ -324,7 +329,6 @@ def _to_NNOIR_functions(self):
continue
if generator is not None:
function = self.op_for_node(generator).to_function(self.nodes, self.constant_nodes)
-
inputs = list(chain.from_iterable(map(lambda x: x.inputs, function)))
outputs += inputs
functions += function
@@ -332,12 +336,13 @@ def _to_NNOIR_functions(self):
initializer = self._find_initializer(o)
if initializer is not None:
raise UnsupportedONNXOperation(initializer, "converting from Constant is undefined")
+
return functions
- def _list_constant_nodes(self):
- outputs = list(map(lambda x: x.name, self.sess.get_outputs()))
+ def _list_constant_nodes(self) -> List[str]:
+ outputs: List[str] = [x.name for x in self.sess.get_outputs()]
- def dfs(visited, nodes, result):
+ def dfs(visited: List[str], nodes: List[str], result: List[str]) -> None:
for n in nodes:
if self._has_initializer(n):
result.append(n)
@@ -345,6 +350,9 @@ def dfs(visited, nodes, result):
pass
else:
generator = self._find_generator(n)
+ if generator is None:
+ raise InvalidONNXData(f"generator {n} not found")
+
if generator.op_type == "Shape": # In nnoir, array shape is known information.
result.append(n)
next_nodes = []
@@ -360,11 +368,11 @@ def dfs(visited, nodes, result):
result.append(o)
visited.append(n)
- result = []
+ result: List[str] = []
dfs([], outputs, result)
return result
- def _dot_box_color(self, node):
+ def _dot_box_color(self, node: onnx.NodeProto) -> str:
if not all([o in self.constant_nodes for o in node.output]):
try:
_ = self.op_for_node(node).to_function(self.nodes, self.constant_nodes)
@@ -376,14 +384,14 @@ def _dot_box_color(self, node):
else:
return "white"
- def _dump_dot(self):
+ def _dump_dot(self) -> None:
dot_path = f"{self.onnx_path}.dot"
ln = "\l"
value_name_table = NameTable("val")
function_name_table = NameTable("fun")
- def is_used(name):
+ def is_used(name: str) -> bool:
for n in self.model.graph.node:
if name in n.input:
return True
@@ -446,22 +454,9 @@ def is_used(name):
)
-def to_dummy_input(x):
- if hasattr(x.type, "tensor_type"):
- if x.type.tensor_type.elem_type == onnx.TensorProto.FLOAT:
- return np.zeros(
- tuple(map(lambda d: d.dim_value, x.type.tensor_type.shape.dim)),
- dtype=np.float32,
- )
- else:
- raise "unsupported"
- else:
- raise "unsupported"
-
-
class NameTable:
def __init__(self, prefix: str) -> None:
- self.tbl = dict()
+ self.tbl: Dict[str, str] = dict()
self.prefix = prefix
def __getitem__(self, key: str) -> str:
diff --git a/nnoir-onnx/nnoir_onnx/onnx2nnoir.py b/nnoir-onnx/nnoir_onnx/onnx2nnoir.py
index 2a74f18..8c83b5a 100644
--- a/nnoir-onnx/nnoir_onnx/onnx2nnoir.py
+++ b/nnoir-onnx/nnoir_onnx/onnx2nnoir.py
@@ -5,7 +5,7 @@
from nnoir_onnx import ONNX, utils
-def main():
+def main() -> None:
parser = argparse.ArgumentParser(description="ONNX to NNOIR Converter")
parser.add_argument(
"-o", "--output", dest="output", type=str, required=True, metavar="NNOIR", help="output(NNOIR) file path"
diff --git a/nnoir-onnx/nnoir_onnx/operators/__init__.py b/nnoir-onnx/nnoir_onnx/operators/__init__.py
index 1b52e9c..40eb388 100644
--- a/nnoir-onnx/nnoir_onnx/operators/__init__.py
+++ b/nnoir-onnx/nnoir_onnx/operators/__init__.py
@@ -1,3 +1,9 @@
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function
+from numpy.typing import NDArray
+
from .add import OpAdd
from .average_pool import OpAveragePool
from .batch_normalization import OpBatchNormalization
diff --git a/nnoir-onnx/nnoir_onnx/operators/add.py b/nnoir-onnx/nnoir_onnx/operators/add.py
index 3daf98c..a193d28 100644
--- a/nnoir-onnx/nnoir_onnx/operators/add.py
+++ b/nnoir-onnx/nnoir_onnx/operators/add.py
@@ -1,24 +1,29 @@
-from nnoir.functions import *
+from typing import Any, Dict, List
+
+import onnx
+from nnoir.functions import Add, Constant, Function
+from numpy.typing import NDArray
from .utils import *
class OpAdd(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpAdd, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[a, b] = self.node.input
- def constant_add(v, w):
+ def constant_add(v: str, w: str) -> List[Function]:
internal_node = gen_unregisterd_node_name(env)
register_node(env, internal_node, env[w])
return [
- Constant([], [internal_node], value=constants[w]),
- Add([v, internal_node], list(self.node.output)),
+ Constant([], [internal_node], value=constants[w]), # type: ignore
+ Add([v, internal_node], list(self.node.output)), # type: ignore
]
if a in constants and b not in constants:
+ print(b)
return constant_add(b, a)
elif a not in constants and b in constants:
return constant_add(a, b)
diff --git a/nnoir-onnx/nnoir_onnx/operators/average_pool.py b/nnoir-onnx/nnoir_onnx/operators/average_pool.py
index e0f0b0d..b969670 100644
--- a/nnoir-onnx/nnoir_onnx/operators/average_pool.py
+++ b/nnoir-onnx/nnoir_onnx/operators/average_pool.py
@@ -1,11 +1,15 @@
+from typing import Any, Dict, List, Optional, Tuple
+
import numpy as np
-from nnoir.functions import *
+import onnx
+from nnoir.functions import AveragePooling2D, Function
+from numpy.typing import NDArray
from .utils import *
class OpAveragePool(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super().__init__(node, *args)
self.kernel_shape = None
@@ -28,7 +32,7 @@ def __init__(self, node, *args):
if attr.name == "count_include_pad":
self.count_include_pad = attr.i
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[x] = self.node.input
_input = env[x]
@@ -36,8 +40,8 @@ def to_function(self, env, constants):
channel = _input.shape[1]
in_h = _input.shape[2]
in_w = _input.shape[3]
- kh = self.kernel_shape[0]
- kw = self.kernel_shape[1]
+ kh = self.kernel_shape[0] # type: ignore
+ kw = self.kernel_shape[1] # type: ignore
sy = self.strides[0]
sx = self.strides[1]
diff --git a/nnoir-onnx/nnoir_onnx/operators/batch_normalization.py b/nnoir-onnx/nnoir_onnx/operators/batch_normalization.py
index d12c044..8503ba0 100644
--- a/nnoir-onnx/nnoir_onnx/operators/batch_normalization.py
+++ b/nnoir-onnx/nnoir_onnx/operators/batch_normalization.py
@@ -1,13 +1,17 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import BatchNormalization, Function
+from numpy.typing import NDArray
from .utils import *
class OpBatchNormalization(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super().__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[x, gamma, beta, mean, var] = self.node.input
if gamma not in constants:
raise UnsupportedONNXOperation(self.node, "missing gamma")
diff --git a/nnoir-onnx/nnoir_onnx/operators/clip.py b/nnoir-onnx/nnoir_onnx/operators/clip.py
index 5faf2dd..281dbde 100644
--- a/nnoir-onnx/nnoir_onnx/operators/clip.py
+++ b/nnoir-onnx/nnoir_onnx/operators/clip.py
@@ -1,16 +1,20 @@
+from typing import Any, Dict, List, Optional, Tuple, Union
+
import numpy as np
-from nnoir.functions import *
+import onnx
+from nnoir.functions import ClippedReLU, Function
+from numpy.typing import NDArray
from .utils import *
class OpClip(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpClip, self).__init__(node, *args)
- def to_function(self, env, constants):
- _min = -3.4028234663852886e38
- _max = 3.4028234663852886e38
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
+ _min: Union[float, NDArray[Any]] = -3.4028234663852886e38
+ _max: Union[float, NDArray[Any]] = 3.4028234663852886e38
if self.opset_version < 6:
raise UnsupportedONNXOperation(self.node, "only opset_version >= 6 is supported")
diff --git a/nnoir-onnx/nnoir_onnx/operators/concat.py b/nnoir-onnx/nnoir_onnx/operators/concat.py
index a2e7ffe..e267222 100644
--- a/nnoir-onnx/nnoir_onnx/operators/concat.py
+++ b/nnoir-onnx/nnoir_onnx/operators/concat.py
@@ -1,11 +1,15 @@
+from typing import Any, Dict, List, Optional, Tuple
+
import numpy as np
-from nnoir.functions import *
+import onnx
+from nnoir.functions import Concat, Function
+from numpy.typing import NDArray
from .utils import *
class OpConcat(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpConcat, self).__init__(node, *args)
self.axis = None
@@ -13,7 +17,7 @@ def __init__(self, node, *args):
if attr.name == "axis":
self.axis = attr.i
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[y] = self.node.output
- axis = len(env[y].shape) + self.axis if self.axis < 0 else self.axis
- return [Concat(list(self.node.input), list(self.node.output), axis=int(axis))]
+ axis = len(env[y].shape) + self.axis if self.axis < 0 else self.axis # type: ignore
+ return [Concat(list(self.node.input), list(self.node.output), axis=int(axis))] # type: ignore
diff --git a/nnoir-onnx/nnoir_onnx/operators/conv.py b/nnoir-onnx/nnoir_onnx/operators/conv.py
index 2d4e59c..9335667 100644
--- a/nnoir-onnx/nnoir_onnx/operators/conv.py
+++ b/nnoir-onnx/nnoir_onnx/operators/conv.py
@@ -1,11 +1,15 @@
+from typing import Any, Dict, List, Optional, Tuple
+
import numpy as np
-from nnoir.functions import *
+import onnx
+from nnoir.functions import Convolution2D, DepthwiseConvolution2D, Function
+from numpy.typing import NDArray
from .utils import *
class OpConv(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpConv, self).__init__(node, *args)
self.kernel_shape = None
@@ -28,14 +32,14 @@ def __init__(self, node, *args):
if attr.name == "pads":
self.pads = attr.ints
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
b = None
if len(self.node.input) == 2:
[x, W] = self.node.input
elif len(self.node.input) == 3:
[x, W, b] = self.node.input
else:
- raise "invalid"
+ raise "invalid" # type: ignore
if W not in constants:
raise UnsupportedONNXOperation(self.node, "W must be constant")
W = constants[W]
@@ -49,8 +53,8 @@ def to_function(self, env, constants):
in_ch = _input.shape[1]
in_h = _input.shape[2]
in_w = _input.shape[3]
- kh = self.kernel_shape[0]
- kw = self.kernel_shape[1]
+ kh = self.kernel_shape[0] # type: ignore
+ kw = self.kernel_shape[1] # type: ignore
sy = self.strides[0]
sx = self.strides[1]
dy = self.dilations[0]
diff --git a/nnoir-onnx/nnoir_onnx/operators/cos.py b/nnoir-onnx/nnoir_onnx/operators/cos.py
index 991bb7c..e6cd1fb 100644
--- a/nnoir-onnx/nnoir_onnx/operators/cos.py
+++ b/nnoir-onnx/nnoir_onnx/operators/cos.py
@@ -1,11 +1,15 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Cos, Function
+from numpy.typing import NDArray
from .utils import *
class OpCos(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpCos, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
return [Cos(list(self.node.input), list(self.node.output))]
diff --git a/nnoir-onnx/nnoir_onnx/operators/div.py b/nnoir-onnx/nnoir_onnx/operators/div.py
index 6c94a68..c664023 100644
--- a/nnoir-onnx/nnoir_onnx/operators/div.py
+++ b/nnoir-onnx/nnoir_onnx/operators/div.py
@@ -1,22 +1,26 @@
+from typing import Any, Dict, List, Optional, Tuple
+
import numpy as np
-from nnoir.functions import *
+import onnx
+from nnoir.functions import Constant, Div, Function, Mul
+from numpy.typing import NDArray
from .utils import *
class OpDiv(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpDiv, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[a, b] = self.node.input
- def scale(v, w):
+ def scale(v: str, w: NDArray[Any]) -> List[Function]:
internal_node = gen_unregisterd_node_name(env)
register_node(env, internal_node, w)
return [
- Constant([], [internal_node], value=w),
- Mul([v, internal_node], list(self.node.output)),
+ Constant([], [internal_node], value=w), # type: ignore
+ Mul([v, internal_node], list(self.node.output)), # type: ignore
]
if a in constants and b not in constants:
diff --git a/nnoir-onnx/nnoir_onnx/operators/dropout.py b/nnoir-onnx/nnoir_onnx/operators/dropout.py
index 9486fbf..9979868 100644
--- a/nnoir-onnx/nnoir_onnx/operators/dropout.py
+++ b/nnoir-onnx/nnoir_onnx/operators/dropout.py
@@ -1,13 +1,17 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, Transpose
+from numpy.typing import NDArray
from .utils import *
class OpDropout(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpDropout, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
return [
Transpose(
list(self.node.input),
diff --git a/nnoir-onnx/nnoir_onnx/operators/elu.py b/nnoir-onnx/nnoir_onnx/operators/elu.py
index 4dc1edc..fb1c3f6 100644
--- a/nnoir-onnx/nnoir_onnx/operators/elu.py
+++ b/nnoir-onnx/nnoir_onnx/operators/elu.py
@@ -1,10 +1,14 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import ELU, Function
+from numpy.typing import NDArray
from .utils import *
class OpElu(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpElu, self).__init__(node, *args)
self.alpha = 1.0
@@ -12,5 +16,5 @@ def __init__(self, node, *args):
if attr.name == "alpha":
self.alpha = attr.f
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
return [ELU(list(self.node.input), list(self.node.output), alpha=self.alpha)]
diff --git a/nnoir-onnx/nnoir_onnx/operators/exp.py b/nnoir-onnx/nnoir_onnx/operators/exp.py
index 38acb6d..f8725a1 100644
--- a/nnoir-onnx/nnoir_onnx/operators/exp.py
+++ b/nnoir-onnx/nnoir_onnx/operators/exp.py
@@ -1,11 +1,15 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Exp, Function
+from numpy.typing import NDArray
from .utils import *
class OpExp(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpExp, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
return [Exp(list(self.node.input), list(self.node.output))]
diff --git a/nnoir-onnx/nnoir_onnx/operators/flatten.py b/nnoir-onnx/nnoir_onnx/operators/flatten.py
index 6b965b3..75ff1e4 100644
--- a/nnoir-onnx/nnoir_onnx/operators/flatten.py
+++ b/nnoir-onnx/nnoir_onnx/operators/flatten.py
@@ -1,13 +1,16 @@
from functools import reduce
+from typing import Any, Dict, List, Optional, Tuple
import numpy as np
-from nnoir.functions import *
+import onnx
+from nnoir.functions import Function, Reshape
+from numpy.typing import NDArray
from .utils import *
class OpFlatten(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpFlatten, self).__init__(node, *args)
self.axis = 1
@@ -15,7 +18,7 @@ def __init__(self, node, *args):
if attr.name == "axis":
self.axis = attr.i
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[x] = self.node.input
if self.axis == 0:
flattened_shape = (1, -1)
diff --git a/nnoir-onnx/nnoir_onnx/operators/gemm.py b/nnoir-onnx/nnoir_onnx/operators/gemm.py
index 68ed87c..eb418f3 100644
--- a/nnoir-onnx/nnoir_onnx/operators/gemm.py
+++ b/nnoir-onnx/nnoir_onnx/operators/gemm.py
@@ -1,10 +1,14 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, Gemm, Linear, Transpose
+from numpy.typing import NDArray
from .utils import *
class OpGemm(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpGemm, self).__init__(node, *args)
self.alpha = 1.0
@@ -21,7 +25,7 @@ def __init__(self, node, *args):
if attr.name == "transB":
self.transB = attr.i
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
if len(self.node.input) == 3:
[A, B, C] = self.node.input
else:
@@ -51,9 +55,9 @@ def to_function(self, env, constants):
internal_node = f"{A}_{id(A)}"
env[internal_node] = env[A].T
return [
- Transpose([A], [internal_node], axes=[1, 0]),
+ Transpose([A], [internal_node], axes=[1, 0]), # type: ignore
Linear(
- [internal_node],
+ [internal_node], # type: ignore
list(self.node.output),
W=self.alpha * b,
b=self.beta * c.ravel(),
diff --git a/nnoir-onnx/nnoir_onnx/operators/global_average_pool.py b/nnoir-onnx/nnoir_onnx/operators/global_average_pool.py
index da0054b..2365b9e 100644
--- a/nnoir-onnx/nnoir_onnx/operators/global_average_pool.py
+++ b/nnoir-onnx/nnoir_onnx/operators/global_average_pool.py
@@ -1,11 +1,15 @@
+from typing import Any, Dict, List, Optional, Tuple
+
import numpy as np
-from nnoir.functions import *
+import onnx
+from nnoir.functions import AveragePooling2D, Function
+from numpy.typing import NDArray
from .utils import *
class OpGlobalAveragePool(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpGlobalAveragePool, self).__init__(node, *args)
self.kernel_shape = None
@@ -28,7 +32,7 @@ def __init__(self, node, *args):
if attr.name == "count_include_pad":
self.count_include_pad = attr.i
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[x] = self.node.input
_input = env[x]
diff --git a/nnoir-onnx/nnoir_onnx/operators/hard_sigmoid.py b/nnoir-onnx/nnoir_onnx/operators/hard_sigmoid.py
index 2fc12eb..e048330 100644
--- a/nnoir-onnx/nnoir_onnx/operators/hard_sigmoid.py
+++ b/nnoir-onnx/nnoir_onnx/operators/hard_sigmoid.py
@@ -1,16 +1,20 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import AddConstant, ClippedReLU, Function, MulConstant
+from numpy.typing import NDArray
from .utils import *
# https://github.com/onnx/onnx/blob/main/docs/Operators.md#hardsigmoid
class OpHardSigmoid(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpHardSigmoid, self).__init__(node, *args)
if self.opset_version < 6:
raise UnsupportedONNXOperation(self.node, "only opset_version >= 6 is supported")
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[x] = self.node.input
t0 = gen_unregisterd_node_name(env)
register_node(env, t0, env[x])
@@ -27,9 +31,9 @@ def to_function(self, env, constants):
return [
# t0 = alpha * x
- MulConstant([x], [t0], value=alpha),
+ MulConstant([x], [t0], value=alpha), # type: ignore
# t1 = alpha * x + beta
- AddConstant([t0], [t1], value=beta),
+ AddConstant([t0], [t1], value=beta), # type: ignore
# output = max(0, min(alpha * x + beta, 1))
- ClippedReLU([t1], list(self.node.output), upper=1.0),
+ ClippedReLU([t1], list(self.node.output), upper=1.0), # type: ignore
]
diff --git a/nnoir-onnx/nnoir_onnx/operators/hard_swish.py b/nnoir-onnx/nnoir_onnx/operators/hard_swish.py
index 2a99c01..7f92d27 100644
--- a/nnoir-onnx/nnoir_onnx/operators/hard_swish.py
+++ b/nnoir-onnx/nnoir_onnx/operators/hard_swish.py
@@ -1,13 +1,17 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import AddConstant, ClippedReLU, Function, Mul, MulConstant
+from numpy.typing import NDArray
from .utils import *
class OpHardSwish(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpHardSwish, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[x] = self.node.input
t0 = gen_unregisterd_node_name(env)
register_node(env, t0, env[x])
@@ -17,8 +21,8 @@ def to_function(self, env, constants):
register_node(env, t2, env[x])
return [
- AddConstant([x], [t0], value=3.0),
- ClippedReLU([t0], [t1], upper=6.0),
- Mul([x, t1], [t2]),
- MulConstant([t2], list(self.node.output), value=1 / 6),
+ AddConstant([x], [t0], value=3.0), # type: ignore
+ ClippedReLU([t0], [t1], upper=6.0), # type: ignore
+ Mul([x, t1], [t2]), # type: ignore
+ MulConstant([t2], list(self.node.output), value=1 / 6), # type: ignore
]
diff --git a/nnoir-onnx/nnoir_onnx/operators/leaky_relu.py b/nnoir-onnx/nnoir_onnx/operators/leaky_relu.py
index d2b3231..5813489 100644
--- a/nnoir-onnx/nnoir_onnx/operators/leaky_relu.py
+++ b/nnoir-onnx/nnoir_onnx/operators/leaky_relu.py
@@ -1,10 +1,14 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, LeakyReLU
+from numpy.typing import NDArray
from .utils import *
class OpLeakyRelu(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpLeakyRelu, self).__init__(node, *args)
self.alpha = 0.01
@@ -12,5 +16,5 @@ def __init__(self, node, *args):
if attr.name == "alpha":
self.alpha = attr.f
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
return [LeakyReLU(list(self.node.input), list(self.node.output), slope=self.alpha)]
diff --git a/nnoir-onnx/nnoir_onnx/operators/lrn.py b/nnoir-onnx/nnoir_onnx/operators/lrn.py
index 6aec592..213e362 100644
--- a/nnoir-onnx/nnoir_onnx/operators/lrn.py
+++ b/nnoir-onnx/nnoir_onnx/operators/lrn.py
@@ -1,10 +1,14 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, LocalResponseNormalization
+from numpy.typing import NDArray
from .utils import *
class OpLRN(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpLRN, self).__init__(node, *args)
self.alpha = 0.0001
@@ -21,14 +25,14 @@ def __init__(self, node, *args):
if attr.name == "size":
self.size = attr.i
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
return [
LocalResponseNormalization(
list(self.node.input),
list(self.node.output),
n=self.size,
k=self.bias,
- alpha=self.alpha / self.size,
+ alpha=self.alpha / self.size, # type: ignore
beta=self.beta,
)
]
diff --git a/nnoir-onnx/nnoir_onnx/operators/lstm.py b/nnoir-onnx/nnoir_onnx/operators/lstm.py
index 5b5fc02..ce335b4 100644
--- a/nnoir-onnx/nnoir_onnx/operators/lstm.py
+++ b/nnoir-onnx/nnoir_onnx/operators/lstm.py
@@ -1,11 +1,15 @@
+from typing import Any, Dict, List, Optional, Tuple, no_type_check
+
import numpy as np
-from nnoir.functions import *
+import onnx
+from nnoir.functions import Add, Concat, Constant, Function, Linear, Mul, ReLU, Reshape, Sigmoid, Sum, Tanh, Transpose
+from numpy.typing import NDArray
from .utils import *
class OpLSTM(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpLSTM, self).__init__(node, *args)
self.activation_alpha = []
@@ -32,7 +36,8 @@ def __init__(self, node, *args):
if attr.name == "input_forget":
self.input_forget = attr.i
- def to_function(self, env, constants):
+ @no_type_check
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
num_of_gates = 4
num_of_peepholes = 3
num_directions = (env[self.node.input[1]].shape)[0]
@@ -48,7 +53,7 @@ def to_function(self, env, constants):
activation_f, activation_g, activation_h = [Sigmoid, Tanh, Tanh]
elif l == 3:
- def to_op(s):
+ def to_op(s: bytes) -> Function:
if s == b"Sigmoid":
return Sigmoid
elif s == b"Tanh":
@@ -56,13 +61,13 @@ def to_op(s):
elif s == b"Relu":
return ReLU
else:
- raise UnsupportedONNXOperation(self.node, f"{s} is not supported")
+ raise UnsupportedONNXOperation(self.node, f"{s!r} is not supported")
activation_f, activation_g, activation_h = [to_op(f) for f in self.activations]
else:
raise UnsupportedONNXOperation(self.node, "the number of activations must be 0 or 3")
- graph = []
+ graph: List[Function] = []
init_h = np.zeros((batch_size, hidden_size)).astype(np.float32)
init_c = np.zeros((batch_size, hidden_size)).astype(np.float32)
ps = np.zeros((num_of_peepholes, hidden_size)).astype(np.float32)
@@ -208,12 +213,12 @@ def to_op(s):
dummy_res = np.zeros((batch_size, hidden_size)).astype(np.float32)
- def gate(env, res, x, h, W, R, WB, RB, f, c=None, P=None):
+ def gate(env, res, x, h, W, R, WB, RB, f, c=None, P=None) -> List[Function]:
t0 = gen_new_node(env, dummy_res)
t1 = gen_new_node(env, dummy_res)
t2 = gen_new_node(env, dummy_res)
- graph = []
+ graph: List[Function] = []
graph += [Linear([x], [t0], W=W, b=WB)]
graph += [Linear([h], [t1], W=R, b=RB)]
graph += [Add([t0, t1], [t2])]
@@ -270,7 +275,7 @@ def gate(env, res, x, h, W, R, WB, RB, f, c=None, P=None):
raise UnsupportedONNXOperation(self.node, "direction is not forward")
-def gen_new_node(env, value):
+def gen_new_node(env: Dict[str, NDArray[Any]], value: NDArray[Any]) -> str:
n = gen_unregisterd_node_name(env)
register_node(env, n, value)
return n
diff --git a/nnoir-onnx/nnoir_onnx/operators/mat_mul.py b/nnoir-onnx/nnoir_onnx/operators/mat_mul.py
index 28d5b68..4cb5f5f 100644
--- a/nnoir-onnx/nnoir_onnx/operators/mat_mul.py
+++ b/nnoir-onnx/nnoir_onnx/operators/mat_mul.py
@@ -1,10 +1,14 @@
+from typing import Any, Dict, List, Optional, Tuple
+
import numpy as np
-from nnoir.functions import Constant, Linear, MatMul
+import onnx
+from nnoir.functions import Constant, Function, Linear, MatMul
+from numpy.typing import NDArray
from .utils import Op, gen_unregisterd_node_name, register_node
-def gen_value(env, arr):
+def gen_value(env: Dict[str, NDArray[Any]], arr: NDArray[Any]) -> str:
name = gen_unregisterd_node_name(env)
register_node(env, name, arr)
@@ -12,18 +16,18 @@ def gen_value(env, arr):
class OpMatMul(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpMatMul, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[x, W] = self.node.input
if W in constants and constants[W].ndim == 2 and env[x].ndim == 2:
return [Linear([x], list(self.node.output), W=env[W].T, b=None)]
elif W in constants:
const_name = gen_value(env, constants[W])
nodes = [
- Constant([], [const_name], value=constants[W]),
- MatMul([x, const_name], list(self.node.output)),
+ Constant([], [const_name], value=constants[W]), # type: ignore
+ MatMul([x, const_name], list(self.node.output)), # type: ignore
]
return nodes
else:
diff --git a/nnoir-onnx/nnoir_onnx/operators/max_pool.py b/nnoir-onnx/nnoir_onnx/operators/max_pool.py
index 80151e0..d8ad5e7 100644
--- a/nnoir-onnx/nnoir_onnx/operators/max_pool.py
+++ b/nnoir-onnx/nnoir_onnx/operators/max_pool.py
@@ -1,11 +1,15 @@
+from typing import Any, Dict, List, Optional, Tuple
+
import numpy as np
-from nnoir.functions import *
+import onnx
+from nnoir.functions import Function, MaxPooling2D
+from numpy.typing import NDArray
from .utils import *
class OpMaxPool(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpMaxPool, self).__init__(node, *args)
self.kernel_shape = None
@@ -36,7 +40,7 @@ def __init__(self, node, *args):
else:
raise UnsupportedONNXOperation(self.node, f"unknown attribute {attr.name}")
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[x] = self.node.input
_input = env[x]
@@ -44,8 +48,8 @@ def to_function(self, env, constants):
channel = _input.shape[1]
in_h = _input.shape[2]
in_w = _input.shape[3]
- kh = self.kernel_shape[0]
- kw = self.kernel_shape[1]
+ kh = self.kernel_shape[0] # type: ignore
+ kw = self.kernel_shape[1] # type: ignore
sy = self.strides[0]
sx = self.strides[1]
diff --git a/nnoir-onnx/nnoir_onnx/operators/mul.py b/nnoir-onnx/nnoir_onnx/operators/mul.py
index 543823f..d230fa5 100644
--- a/nnoir-onnx/nnoir_onnx/operators/mul.py
+++ b/nnoir-onnx/nnoir_onnx/operators/mul.py
@@ -1,21 +1,25 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Constant, Function, Mul
+from numpy.typing import NDArray
from .utils import *
class OpMul(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super().__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[a, b] = self.node.input
- def scale(v, w):
+ def scale(v: str, w: str) -> List[Function]:
internal_node = gen_unregisterd_node_name(env)
register_node(env, internal_node, env[w])
return [
- Constant([], [internal_node], value=constants[w]),
- Mul([v, internal_node], list(self.node.output)),
+ Constant([], [internal_node], value=constants[w]), # type: ignore
+ Mul([v, internal_node], list(self.node.output)), # type: ignore
]
if a in constants and b not in constants:
diff --git a/nnoir-onnx/nnoir_onnx/operators/pad.py b/nnoir-onnx/nnoir_onnx/operators/pad.py
index 60aeec6..fd8248e 100644
--- a/nnoir-onnx/nnoir_onnx/operators/pad.py
+++ b/nnoir-onnx/nnoir_onnx/operators/pad.py
@@ -1,23 +1,27 @@
+from typing import Any, Dict, List, Optional, Tuple
+
import numpy as np
-from nnoir.functions import *
+import onnx
+from nnoir.functions import ConstantPadding, Function
+from numpy.typing import NDArray
from .utils import *
class OpPad(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpPad, self).__init__(node, *args)
self.mode = b"constant"
self.pads = None
self.value = 0.0
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
if self.opset_version >= 11:
# pads
if not self.node.input[1] in constants:
raise UnsupportedONNXOperation(self.node, 'pads must be "constant"')
- self.pads = constants[self.node.input[1]]
+ self.pads = constants[self.node.input[1]] # type: ignore
# optional: constant_value
if len(self.node.input) >= 3:
@@ -40,7 +44,7 @@ def to_function(self, env, constants):
raise UnsupportedONNXOperation(self.node, f"unknown attribute {attr.s}")
input_ = [self.node.input[0]]
- pads = list(map(int, self.pads)) # In ONNX specification, the type of `pads` is int64
+ pads = list(map(int, self.pads)) # type: ignore # In ONNX specification, the type of `pads` is int64
else:
# opset version >= 2
for attr in self.node.attribute:
@@ -52,11 +56,11 @@ def to_function(self, env, constants):
self.value = attr.f
input_ = list(self.node.input)
- pads = self.pads
+ pads = self.pads # type: ignore
if self.mode != b"constant":
raise UnsupportedONNXOperation(self.node, 'mode must be "constant"')
- n = len(self.pads) // 2
+ n = len(self.pads) // 2 # type: ignore
return [
ConstantPadding(
input_,
diff --git a/nnoir-onnx/nnoir_onnx/operators/prelu.py b/nnoir-onnx/nnoir_onnx/operators/prelu.py
index cab4a2a..d035a1e 100644
--- a/nnoir-onnx/nnoir_onnx/operators/prelu.py
+++ b/nnoir-onnx/nnoir_onnx/operators/prelu.py
@@ -1,20 +1,24 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, LeakyReLU
+from numpy.typing import NDArray
from .utils import *
class OpPRelu(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpPRelu, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[x, slope] = self.node.input
if slope in constants.keys():
c = constants[slope].ravel()
if len(c) != 1:
raise UnsupportedONNXOperation(self.node, "# of slope size must be 1")
- v = [LeakyReLU([x], list(self.node.output), slope=float(c[0]))]
+ v: List[Function] = [LeakyReLU([x], list(self.node.output), slope=float(c[0]))]
return v
else:
raise UnsupportedONNXOperation(self.node, "# of slope must be constant")
diff --git a/nnoir-onnx/nnoir_onnx/operators/reduce_mean.py b/nnoir-onnx/nnoir_onnx/operators/reduce_mean.py
index 26c1215..2f50729 100644
--- a/nnoir-onnx/nnoir_onnx/operators/reduce_mean.py
+++ b/nnoir-onnx/nnoir_onnx/operators/reduce_mean.py
@@ -1,10 +1,14 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, MulConstant, Sum
+from numpy.typing import NDArray
from .utils import *
class OpReduceMean(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpReduceMean, self).__init__(node, *args)
self.axes = None
@@ -15,7 +19,7 @@ def __init__(self, node, *args):
if attr.name == "keepdims":
self.keepdims = attr.i > 0
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[x] = self.node.input
[y] = self.node.output
axes = self.axes
@@ -32,9 +36,9 @@ def to_function(self, env, constants):
return [
Sum(
list(self.node.input),
- [internal_node],
+ [internal_node], # type: ignore
axes=list(axes),
keepdims=self.keepdims,
),
- MulConstant([internal_node], list(self.node.output), value=float(1.0 / n)),
+ MulConstant([internal_node], list(self.node.output), value=float(1.0 / n)), # type: ignore
]
diff --git a/nnoir-onnx/nnoir_onnx/operators/reduce_sum.py b/nnoir-onnx/nnoir_onnx/operators/reduce_sum.py
index 213d4ba..62d7e53 100644
--- a/nnoir-onnx/nnoir_onnx/operators/reduce_sum.py
+++ b/nnoir-onnx/nnoir_onnx/operators/reduce_sum.py
@@ -1,10 +1,14 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, Sum
+from numpy.typing import NDArray
from .utils import *
class OpReduceSum(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpReduceSum, self).__init__(node, *args)
self.axes = None
@@ -15,7 +19,7 @@ def __init__(self, node, *args):
if attr.name == "keepdims":
self.keepdims = attr.i > 0
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
x = self.node.input[0]
axes = self.axes
if axes is None and len(self.node.input) > 1: # Opset 13
diff --git a/nnoir-onnx/nnoir_onnx/operators/relu.py b/nnoir-onnx/nnoir_onnx/operators/relu.py
index b7b2d65..37f6b14 100644
--- a/nnoir-onnx/nnoir_onnx/operators/relu.py
+++ b/nnoir-onnx/nnoir_onnx/operators/relu.py
@@ -1,11 +1,15 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, ReLU
+from numpy.typing import NDArray
from .utils import *
class OpRelu(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpRelu, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
return [ReLU(list(self.node.input), list(self.node.output))]
diff --git a/nnoir-onnx/nnoir_onnx/operators/reshape.py b/nnoir-onnx/nnoir_onnx/operators/reshape.py
index f59537d..0ab9ad9 100644
--- a/nnoir-onnx/nnoir_onnx/operators/reshape.py
+++ b/nnoir-onnx/nnoir_onnx/operators/reshape.py
@@ -1,13 +1,17 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, Reshape
+from numpy.typing import NDArray
from .utils import *
class OpReshape(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpReshape, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[x, _] = self.node.input
[y] = self.node.output
return [Reshape([x], list(self.node.output), shape=list(map(int, env[y].shape)))]
diff --git a/nnoir-onnx/nnoir_onnx/operators/resize.py b/nnoir-onnx/nnoir_onnx/operators/resize.py
index fc084ed..1ed44d8 100644
--- a/nnoir-onnx/nnoir_onnx/operators/resize.py
+++ b/nnoir-onnx/nnoir_onnx/operators/resize.py
@@ -1,10 +1,14 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, Resize2D
+from numpy.typing import NDArray
from .utils import *
class OpResize(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpResize, self).__init__(node, *args)
if self.opset_version < 11:
@@ -61,7 +65,7 @@ def __init__(self, node, *args):
self.node, f"{self.nearest_mode.decode('utf-8')} is not supported for Resize nearest mode."
)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
x, *_ = self.node.input
[y] = self.node.output
return [
diff --git a/nnoir-onnx/nnoir_onnx/operators/sigmoid.py b/nnoir-onnx/nnoir_onnx/operators/sigmoid.py
index 9a26946..83b2342 100644
--- a/nnoir-onnx/nnoir_onnx/operators/sigmoid.py
+++ b/nnoir-onnx/nnoir_onnx/operators/sigmoid.py
@@ -1,11 +1,15 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, Sigmoid
+from numpy.typing import NDArray
from .utils import *
class OpSigmoid(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpSigmoid, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
return [Sigmoid(list(self.node.input), list(self.node.output))]
diff --git a/nnoir-onnx/nnoir_onnx/operators/sin.py b/nnoir-onnx/nnoir_onnx/operators/sin.py
index 084ba19..d8af423 100644
--- a/nnoir-onnx/nnoir_onnx/operators/sin.py
+++ b/nnoir-onnx/nnoir_onnx/operators/sin.py
@@ -1,11 +1,15 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, Sin
+from numpy.typing import NDArray
from .utils import *
class OpSin(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpSin, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
return [Sin(list(self.node.input), list(self.node.output))]
diff --git a/nnoir-onnx/nnoir_onnx/operators/softmax.py b/nnoir-onnx/nnoir_onnx/operators/softmax.py
index 1c738f5..eff8097 100644
--- a/nnoir-onnx/nnoir_onnx/operators/softmax.py
+++ b/nnoir-onnx/nnoir_onnx/operators/softmax.py
@@ -1,10 +1,14 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, Softmax
+from numpy.typing import NDArray
from .utils import *
class OpSoftmax(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpSoftmax, self).__init__(node, *args)
self.axis = 1
@@ -12,5 +16,5 @@ def __init__(self, node, *args):
if attr.name == "axis":
self.axis = attr.i
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
return [Softmax(list(self.node.input), list(self.node.output), axis=self.axis)]
diff --git a/nnoir-onnx/nnoir_onnx/operators/split.py b/nnoir-onnx/nnoir_onnx/operators/split.py
index 0c42117..b11d2e5 100644
--- a/nnoir-onnx/nnoir_onnx/operators/split.py
+++ b/nnoir-onnx/nnoir_onnx/operators/split.py
@@ -1,28 +1,32 @@
-from typing import Tuple
+from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
-from nnoir.functions import Constant, MatMul, Transpose
+import onnx
+from nnoir.functions import Constant, Function, MatMul, Transpose
+from numpy.typing import NDArray
from .utils import Op, UnsupportedONNXOperation, gen_unregisterd_node_name, register_node
+ShapeLike = Union[Tuple[int, ...], List[int]]
-def create_half_split_matrices(k: int) -> Tuple[np.ndarray, np.ndarray]:
+
+def create_half_split_matrices(k: int) -> Tuple[NDArray[Any], NDArray[Any]]:
k_2 = k // 2
eye = np.eye(k_2, dtype="float32")
zero = np.zeros((k_2, k_2), dtype="float32")
- return (np.concatenate([eye, zero]), np.concatenate([zero, eye]))
+ return (np.concatenate([eye, zero]), np.concatenate([zero, eye])) # type: ignore
-def gen_value(env, arr):
+def gen_value(env: Dict[str, NDArray[Any]], arr: NDArray[Any]) -> str:
name = gen_unregisterd_node_name(env)
register_node(env, name, arr)
return name
-def gen_dummy_value(env, shape):
+def gen_dummy_value(env: Dict[str, NDArray[Any]], shape: ShapeLike) -> str:
dummy_arr = np.zeros(shape).astype(np.float32)
name = gen_unregisterd_node_name(env)
register_node(env, name, dummy_arr)
@@ -31,10 +35,10 @@ def gen_dummy_value(env, shape):
class OpSplit(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super().__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
if len(self.node.input) > 1:
raise UnsupportedONNXOperation(self.node, "the number of inputs must be 1.")
if len(self.node.output) > 2:
@@ -56,13 +60,13 @@ def to_function(self, env, constants):
transpose_perm_1 = list(range(len(shape)))
transpose_perm_1.insert(split_axis, transpose_perm_1.pop(-1))
- def transpose_shape(shape, axes):
+ def transpose_shape(shape: ShapeLike, axes: List[int]) -> Tuple[int, ...]:
l = []
for axis in axes:
l.append(shape[axis])
return tuple(l)
- def linear_shape(x_shape, w_shape):
+ def linear_shape(x_shape: ShapeLike, w_shape: ShapeLike) -> ShapeLike:
"""
(..., n, k), (k, m) -> (batch, channel, n, m)
"""
@@ -72,7 +76,7 @@ def linear_shape(x_shape, w_shape):
k1 = w_shape[0]
m = w_shape[1]
assert k0 == k1
- return np.concatenate([x_shape[:-2], [n], [m]])
+ return np.concatenate([x_shape[:-2], [n], [m]]) # type: ignore
trans_shape = transpose_shape(shape, transpose_perm_0)
trans_out = gen_dummy_value(env, trans_shape)
@@ -84,15 +88,15 @@ def linear_shape(x_shape, w_shape):
linear_down_out = gen_dummy_value(env, linear_down_shape)
up_const = gen_value(env, matrice_up)
- up_const_node = Constant([], [up_const], value=matrice_up)
+ up_const_node = Constant([], [up_const], value=matrice_up) # type: ignore
down_const = gen_value(env, matrice_up)
- down_const_node = Constant([], [down_const], value=matrice_down)
+ down_const_node = Constant([], [down_const], value=matrice_down) # type: ignore
- transpose_node = Transpose(list(self.node.input), [trans_out], axes=transpose_perm_0)
- linear_up_node = MatMul([trans_out, up_const], [linear_up_out])
- linear_down_node = MatMul([trans_out, down_const], [linear_down_out])
- transpose_up_node = Transpose([linear_up_out], [self.node.output[0]], axes=transpose_perm_1)
- transpose_down_node = Transpose([linear_down_out], [self.node.output[1]], axes=transpose_perm_1)
+ transpose_node = Transpose(list(self.node.input), [trans_out], axes=transpose_perm_0) # type: ignore
+ linear_up_node = MatMul([trans_out, up_const], [linear_up_out]) # type: ignore
+ linear_down_node = MatMul([trans_out, down_const], [linear_down_out]) # type: ignore
+ transpose_up_node = Transpose([linear_up_out], [self.node.output[0]], axes=transpose_perm_1) # type: ignore
+ transpose_down_node = Transpose([linear_down_out], [self.node.output[1]], axes=transpose_perm_1) # type: ignore
nodes = [
up_const_node,
down_const_node,
diff --git a/nnoir-onnx/nnoir_onnx/operators/squeeze.py b/nnoir-onnx/nnoir_onnx/operators/squeeze.py
index 273ddce..08ca03e 100644
--- a/nnoir-onnx/nnoir_onnx/operators/squeeze.py
+++ b/nnoir-onnx/nnoir_onnx/operators/squeeze.py
@@ -1,11 +1,15 @@
+from typing import Any, Dict, List, Optional, Tuple
+
import numpy as np
-from nnoir.functions import *
+import onnx
+from nnoir.functions import Function, Reshape
+from numpy.typing import NDArray
from .utils import *
class OpSqueeze(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpSqueeze, self).__init__(node, *args)
self.axes = []
@@ -14,7 +18,7 @@ def __init__(self, node, *args):
if attr.name == "axes":
self.axes = attr.ints
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
x = self.node.input[0]
[y] = self.node.output
shape0 = env[x].shape
diff --git a/nnoir-onnx/nnoir_onnx/operators/sub.py b/nnoir-onnx/nnoir_onnx/operators/sub.py
index 33d64d4..82bb060 100644
--- a/nnoir-onnx/nnoir_onnx/operators/sub.py
+++ b/nnoir-onnx/nnoir_onnx/operators/sub.py
@@ -1,13 +1,17 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Bias, Function, Sub
+from numpy.typing import NDArray
from .utils import *
class OpSub(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpSub, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
[a, b] = self.node.input
if a in constants and b not in constants:
raise UnsupportedONNXOperation(self.node, "unimplemented yet")
diff --git a/nnoir-onnx/nnoir_onnx/operators/sum.py b/nnoir-onnx/nnoir_onnx/operators/sum.py
index 057da74..06394e8 100644
--- a/nnoir-onnx/nnoir_onnx/operators/sum.py
+++ b/nnoir-onnx/nnoir_onnx/operators/sum.py
@@ -1,15 +1,18 @@
import functools
+from typing import Any, Dict, List, Optional, Tuple
-from nnoir.functions import *
+import onnx
+from nnoir.functions import Add, Function
+from numpy.typing import NDArray
from .utils import *
class OpSum(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpSum, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
if len(self.node.input) != 2:
raise UnsupportedONNXOperation(self.node, "# of inputs must be 2")
return [Add(list(self.node.input), list(self.node.output))]
diff --git a/nnoir-onnx/nnoir_onnx/operators/tan.py b/nnoir-onnx/nnoir_onnx/operators/tan.py
index 486949c..088e874 100644
--- a/nnoir-onnx/nnoir_onnx/operators/tan.py
+++ b/nnoir-onnx/nnoir_onnx/operators/tan.py
@@ -1,11 +1,15 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, Tan
+from numpy.typing import NDArray
from .utils import *
class OpTan(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpTan, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
return [Tan(list(self.node.input), list(self.node.output))]
diff --git a/nnoir-onnx/nnoir_onnx/operators/tanh.py b/nnoir-onnx/nnoir_onnx/operators/tanh.py
index 81ef215..d332350 100644
--- a/nnoir-onnx/nnoir_onnx/operators/tanh.py
+++ b/nnoir-onnx/nnoir_onnx/operators/tanh.py
@@ -1,11 +1,15 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, Tanh
+from numpy.typing import NDArray
from .utils import *
class OpTanh(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpTanh, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
return [Tanh(list(self.node.input), list(self.node.output))]
diff --git a/nnoir-onnx/nnoir_onnx/operators/transpose.py b/nnoir-onnx/nnoir_onnx/operators/transpose.py
index 1556a2c..43cad24 100644
--- a/nnoir-onnx/nnoir_onnx/operators/transpose.py
+++ b/nnoir-onnx/nnoir_onnx/operators/transpose.py
@@ -1,11 +1,15 @@
+from typing import Any, Dict, List, Optional, Tuple
+
import numpy as np
-from nnoir.functions import *
+import onnx
+from nnoir.functions import Function, Transpose
+from numpy.typing import NDArray
from .utils import *
class OpTranspose(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpTranspose, self).__init__(node, *args)
self.perm = None
@@ -13,5 +17,5 @@ def __init__(self, node, *args):
if attr.name == "perm":
self.perm = list(attr.ints)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
return [Transpose(list(self.node.input), list(self.node.output), axes=self.perm)]
diff --git a/nnoir-onnx/nnoir_onnx/operators/unsqueeze.py b/nnoir-onnx/nnoir_onnx/operators/unsqueeze.py
index 40f76e5..6900ede 100644
--- a/nnoir-onnx/nnoir_onnx/operators/unsqueeze.py
+++ b/nnoir-onnx/nnoir_onnx/operators/unsqueeze.py
@@ -1,13 +1,17 @@
-from nnoir.functions import *
+from typing import Any, Dict, List, Optional, Tuple
+
+import onnx
+from nnoir.functions import Function, Reshape
+from numpy.typing import NDArray
from .utils import *
class OpUnsqueeze(Op):
- def __init__(self, node, *args):
+ def __init__(self, node: onnx.NodeProto, *args: Any):
super(OpUnsqueeze, self).__init__(node, *args)
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
x = self.node.input[0]
[y] = self.node.output
diff --git a/nnoir-onnx/nnoir_onnx/operators/utils.py b/nnoir-onnx/nnoir_onnx/operators/utils.py
index 268838f..e3107f4 100644
--- a/nnoir-onnx/nnoir_onnx/operators/utils.py
+++ b/nnoir-onnx/nnoir_onnx/operators/utils.py
@@ -1,43 +1,47 @@
import io
+from typing import Any, Dict, List, Optional, Tuple
import numpy as np
+import onnx
+from nnoir.functions import Function
+from numpy.typing import NDArray
class InvalidONNXData(Exception):
- def __init__(self, message):
+ def __init__(self, message: str):
self.message = message
class UnsupportedONNXOperation(Exception):
- def __init__(self, node, message):
+ def __init__(self, node: onnx.NodeProto, message: str):
self.node = node
self.message = message
class UnknownSizedVariable(Exception):
- def __init__(self, message):
+ def __init__(self, message: str):
self.message = message
class Op:
- def __init__(self, node, opset_version):
+ def __init__(self, node: onnx.NodeProto, opset_version: int):
self.node = node
self.opset_version = opset_version
- def to_function(self, env, constants):
+ def to_function(self, env: Dict[str, NDArray[Any]], constants: Dict[str, NDArray[Any]]) -> List[Function]:
raise UnsupportedONNXOperation(self.node, "not implemented")
-def encode_ndarray(obj):
+def encode_ndarray(obj: Optional[NDArray[Any]]) -> Optional[Dict[bytes, bytes]]:
if obj is None:
return None
else:
with io.BytesIO() as out:
- np.save(out, obj.copy())
+ np.save(out, obj.copy()) # type: ignore
return {b"ndarray": out.getvalue()}
-def auto_pad_to_manual_pad(n, k, s, d, auto_pad):
+def auto_pad_to_manual_pad(n: int, k: int, s: int, d: int, auto_pad: bytes) -> Tuple[int, int]:
dk = (k - 1) * d + 1
if n % s == 0:
pad = max(dk - s, 0)
@@ -54,15 +58,17 @@ def auto_pad_to_manual_pad(n, k, s, d, auto_pad):
elif auto_pad == b"VALID":
return (0, 0)
else:
- raise "invalid"
+ raise "invalid" # type: ignore
-def gen_unregisterd_node_name(env):
+def gen_unregisterd_node_name(env: Dict[str, NDArray[Any]]) -> str:
for i in range(len(env)):
candidate = f"v{i}"
if candidate not in env:
return candidate
+ return f"v{len(env)}"
-def register_node(env, name, val):
+
+def register_node(env: Dict[str, NDArray[Any]], name: str, val: NDArray[Any]) -> None:
env[name] = val
diff --git a/nnoir-onnx/nnoir_onnx/py.typed b/nnoir-onnx/nnoir_onnx/py.typed
new file mode 100644
index 0000000..e69de29
diff --git a/nnoir-onnx/nnoir_onnx/utils.py b/nnoir-onnx/nnoir_onnx/utils.py
index 371dbd5..8b73295 100644
--- a/nnoir-onnx/nnoir_onnx/utils.py
+++ b/nnoir-onnx/nnoir_onnx/utils.py
@@ -1,9 +1,11 @@
from typing import Dict, Set
+import onnx
+
from .operators.utils import UnknownSizedVariable
-def list_dimension_variables(model) -> Set[str]:
+def list_dimension_variables(model: onnx.ModelProto) -> Set[str]:
s = set()
for x in model.graph.input:
if x.type.HasField("tensor_type"):
@@ -13,7 +15,7 @@ def list_dimension_variables(model) -> Set[str]:
return s
-def freeze_dimension_variables(model, fix_dimension):
+def freeze_dimension_variables(model: onnx.ModelProto, fix_dimension: Dict[str, int]) -> onnx.ModelProto:
s = list_dimension_variables(model)
diff = s.difference(set(fix_dimension.keys()))
if len(diff) != 0:
diff --git a/nnoir-onnx/pyproject.toml b/nnoir-onnx/pyproject.toml
index 17c1d39..77b6045 100644
--- a/nnoir-onnx/pyproject.toml
+++ b/nnoir-onnx/pyproject.toml
@@ -43,15 +43,32 @@ requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.pysen]
-version = "0.10"
+version = "0.10.5"
[tool.pysen.lint]
enable_black = true
enable_flake8 = false # disabled due to too many errors
enable_isort = true
-enable_mypy = false # disabled currently
+enable_mypy = true
mypy_preset = "strict"
line_length = 128
py_version = "py37"
[[tool.pysen.lint.mypy_targets]]
paths = ["."]
+
+[tool.mypy]
+[[tool.mypy.overrides]]
+module = "onnx.*"
+ignore_missing_imports = true
+
+[[tool.mypy.overrides]]
+module = "onnxruntime.*"
+ignore_missing_imports = true
+
+[[tool.mypy.overrides]]
+module = "nnoir.*"
+implicit_reexport = true
+
+[[tool.mypy.overrides]]
+module = "nnoir_onnx.*"
+implicit_reexport = true
diff --git a/nnoir-onnx/test/test_add.py b/nnoir-onnx/test/test_add.py
index 8a5d7f2..0cb580e 100644
--- a/nnoir-onnx/test/test_add.py
+++ b/nnoir-onnx/test/test_add.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def test_add_00():
+def test_add_00() -> None:
"""
opset version >= 7
without constant, supports multidirectional broadcasting
@@ -38,7 +41,7 @@ def create_onnx(self) -> onnx.ModelProto:
AddTester({"A": a, "B": b}, outputs).run()
-def test_add_01():
+def test_add_01() -> None:
"""
opset version >= 7
with one constant, unidirectional broadcasting (from constant to variable)
@@ -67,7 +70,7 @@ def create_onnx(self) -> onnx.ModelProto:
AddTester({"A": a}, outputs).run()
-def test_add_02():
+def test_add_02() -> None:
"""
opset version >= 7
with one constant, support multidirectional broadcasting
@@ -96,7 +99,7 @@ def create_onnx(self) -> onnx.ModelProto:
AddTester({"A": a}, outputs).run()
-def test_add_03():
+def test_add_03() -> None:
"""
opset version >= 7
with one constant, different shape length
diff --git a/nnoir-onnx/test/test_average_pooling_2d.py b/nnoir-onnx/test/test_average_pooling_2d.py
index 00310cd..1675819 100644
--- a/nnoir-onnx/test/test_average_pooling_2d.py
+++ b/nnoir-onnx/test/test_average_pooling_2d.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
from util import Base
@@ -7,9 +10,9 @@
info = make_tensor_value_info
-def test_average_pooling_2d_00():
+def test_average_pooling_2d_00() -> None:
class AveragePoolTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -26,9 +29,9 @@ def create_onnx(self) -> onnx.ModelProto:
AveragePoolTester({"v0": v0}, outputs).run()
-def test_average_pooling_2d_01():
+def test_average_pooling_2d_01() -> None:
class AveragePoolTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -51,9 +54,9 @@ def create_onnx(self) -> onnx.ModelProto:
AveragePoolTester({"v0": v0}, outputs).run()
-def test_average_pooling_2d_02():
+def test_average_pooling_2d_02() -> None:
class AveragePoolTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_batch_normalization.py b/nnoir-onnx/test/test_batch_normalization.py
index 5e48937..86049c2 100644
--- a/nnoir-onnx/test/test_batch_normalization.py
+++ b/nnoir-onnx/test/test_batch_normalization.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -10,9 +13,9 @@
channel = 3
-def test_batch_normalization_00():
+def test_batch_normalization_00() -> None:
class BatchNormalizationTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -73,9 +76,9 @@ def create_onnx(self) -> onnx.ModelProto:
BatchNormalizationTester({"x": x}, outputs).run()
-def test_batch_normalization_01():
+def test_batch_normalization_01() -> None:
class BatchNormalizationTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_clip.py b/nnoir-onnx/test/test_clip.py
index 904421c..7bf56ab 100644
--- a/nnoir-onnx/test/test_clip.py
+++ b/nnoir-onnx/test/test_clip.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def test_clip_00():
+def test_clip_00() -> None:
class ClipTester(Base):
"""
IR version == 11
@@ -29,7 +32,7 @@ def create_onnx(self) -> onnx.ModelProto:
ClipTester(inputs, outputs).run()
-def test_clip_01():
+def test_clip_01() -> None:
class ClipTester(Base):
"""
IR version == 6
diff --git a/nnoir-onnx/test/test_concat.py b/nnoir-onnx/test/test_concat.py
index 1805c66..524ca79 100644
--- a/nnoir-onnx/test/test_concat.py
+++ b/nnoir-onnx/test/test_concat.py
@@ -1,6 +1,9 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
import pytest
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -9,7 +12,7 @@
info = make_tensor_value_info
-def test_concat_00():
+def test_concat_00() -> None:
class ConcatTester(Base):
def create_onnx(self) -> onnx.ModelProto:
node = make_node("Concat", inputs=["v0", "v1"], outputs=["v2"], axis=1)
@@ -29,7 +32,7 @@ def create_onnx(self) -> onnx.ModelProto:
ConcatTester({"v0": v0, "v1": v1}, outputs).run()
-def test_concat_01():
+def test_concat_01() -> None:
class ConcatTester(Base):
def create_onnx(self) -> onnx.ModelProto:
node = make_node("Concat", inputs=["v0", "v1", "v2"], outputs=["v3"], axis=2)
@@ -53,7 +56,7 @@ def create_onnx(self) -> onnx.ModelProto:
@pytest.mark.xfail()
-def test_concat_02():
+def test_concat_02() -> None:
"""
Test to get value from initializers directly.
Currently unsupported.
diff --git a/nnoir-onnx/test/test_conv.py b/nnoir-onnx/test/test_conv.py
index 3133c54..0a9ec48 100644
--- a/nnoir-onnx/test/test_conv.py
+++ b/nnoir-onnx/test/test_conv.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def test_Conv_00():
+def test_Conv_00() -> None:
class ConvTester(Base):
def create_onnx(self) -> onnx.ModelProto:
node = make_node(
@@ -34,7 +37,7 @@ def create_onnx(self) -> onnx.ModelProto:
ConvTester({"x": x}, outputs).run()
-def test_Conv_01():
+def test_Conv_01() -> None:
x_shape = (1, 4, 4, 5)
class GroupedConvTester(Base):
diff --git a/nnoir-onnx/test/test_cos.py b/nnoir-onnx/test/test_cos.py
index b4513fd..8f1f2a3 100644
--- a/nnoir-onnx/test/test_cos.py
+++ b/nnoir-onnx/test/test_cos.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,9 +11,9 @@
info = make_tensor_value_info
-def test_cos_00():
+def test_cos_00() -> None:
class CosTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_div.py b/nnoir-onnx/test/test_div.py
index a0dd1de..04aae2b 100644
--- a/nnoir-onnx/test/test_div.py
+++ b/nnoir-onnx/test/test_div.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,11 +11,11 @@
info = make_tensor_value_info
-def test_div_00():
+def test_div_00() -> None:
shape = (3, 4, 5)
class DivTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -33,14 +36,14 @@ def create_onnx(self) -> onnx.ModelProto:
DivTester({"v0": v0, "v1": v1}, outputs).run()
-def test_div_01():
+def test_div_01() -> None:
"""
Test for multidirectional broadcasting
https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
"""
class DivTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -61,14 +64,14 @@ def create_onnx(self) -> onnx.ModelProto:
DivTester({"v0": v0, "v1": v1}, outputs).run()
-def test_div_02():
+def test_div_02() -> None:
"""
Test for multidirectional broadcasting
https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
"""
class DivTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -89,13 +92,13 @@ def create_onnx(self) -> onnx.ModelProto:
DivTester({"v0": v0, "v1": v1}, outputs).run()
-def test_div_const_00():
+def test_div_const_00() -> None:
"""
Test for constants
"""
class DivTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -117,13 +120,13 @@ def create_onnx(self) -> onnx.ModelProto:
DivTester({"v0": v0}, outputs).run()
-def test_div_const_01():
+def test_div_const_01() -> None:
"""
Test for constants
"""
class DivTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_dropout.py b/nnoir-onnx/test/test_dropout.py
index ce54aac..84fd881 100644
--- a/nnoir-onnx/test/test_dropout.py
+++ b/nnoir-onnx/test/test_dropout.py
@@ -1,6 +1,9 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
import pytest
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -11,13 +14,13 @@
shape = (1, 3, 4, 5)
-def test_dropout_00():
+def test_dropout_00() -> None:
class DropoutTester(Base):
"""
opset version 10
"""
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -41,13 +44,13 @@ def create_onnx(self) -> onnx.ModelProto:
@pytest.mark.xfail()
-def test_dropout_01():
+def test_dropout_01() -> None:
class DropoutTester(Base):
"""
Consideration: Optional output 'mask'
"""
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_elu.py b/nnoir-onnx/test/test_elu.py
index a61075b..85fad5d 100644
--- a/nnoir-onnx/test/test_elu.py
+++ b/nnoir-onnx/test/test_elu.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def test_elu_00():
+def test_elu_00() -> None:
"""
opser version >= 6
"""
diff --git a/nnoir-onnx/test/test_exp.py b/nnoir-onnx/test/test_exp.py
index bf1debd..9281c1a 100644
--- a/nnoir-onnx/test/test_exp.py
+++ b/nnoir-onnx/test/test_exp.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,13 +11,13 @@
info = make_tensor_value_info
-def test_exp_base():
+def test_exp_base() -> None:
"""
opset version >= 6
"""
class ExpTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_flatten.py b/nnoir-onnx/test/test_flatten.py
index acd1071..d480aeb 100644
--- a/nnoir-onnx/test/test_flatten.py
+++ b/nnoir-onnx/test/test_flatten.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def test_flatten_00():
+def test_flatten_00() -> None:
"""
opser version >= 11
"""
@@ -16,7 +19,7 @@ def test_flatten_00():
shape = (1, 3, 4, 5)
class FlattenTester(Base):
- def __init__(self, inputs, outputs, axis):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str], axis: int):
super().__init__(inputs, outputs)
self.axis = axis
diff --git a/nnoir-onnx/test/test_gemm.py b/nnoir-onnx/test/test_gemm.py
index d174901..5d5a466 100644
--- a/nnoir-onnx/test/test_gemm.py
+++ b/nnoir-onnx/test/test_gemm.py
@@ -1,6 +1,9 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
import pytest
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -9,7 +12,7 @@
info = make_tensor_value_info
-def test_gemm_00():
+def test_gemm_00() -> None:
a_shape = (4, 3)
b_shape = (5, 4)
c_shape = (1, 5)
@@ -49,7 +52,7 @@ def create_onnx(self) -> onnx.ModelProto:
@pytest.mark.xfail()
-def test_gemm_01():
+def test_gemm_01() -> None:
"""
unidirectional broadcasting is not supported
"""
@@ -91,7 +94,7 @@ def create_onnx(self) -> onnx.ModelProto:
GemmTester(inputs, outputs).run()
-def test_gemm_02():
+def test_gemm_02() -> None:
a_shape = (4, 3)
b_shape = (5, 4)
c_shape = (3, 5)
@@ -131,7 +134,7 @@ def create_onnx(self) -> onnx.ModelProto:
GemmTester(inputs, outputs).run()
-def test_gemm_03():
+def test_gemm_03() -> None:
a_shape = (3, 4)
b_shape = (4, 5)
c_shape = (3, 5)
@@ -171,7 +174,7 @@ def create_onnx(self) -> onnx.ModelProto:
GemmTester(inputs, outputs).run()
-def test_gemm_04():
+def test_gemm_04() -> None:
a_shape = (4, 3)
b_shape = (4, 5)
diff --git a/nnoir-onnx/test/test_global_avearge_pooling.py b/nnoir-onnx/test/test_global_avearge_pooling.py
index 623033f..948a7b0 100644
--- a/nnoir-onnx/test/test_global_avearge_pooling.py
+++ b/nnoir-onnx/test/test_global_avearge_pooling.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def test_global_average_pooling_00():
+def test_global_average_pooling_00() -> None:
"""
opset version >= 1
"""
diff --git a/nnoir-onnx/test/test_hard_sigmoid.py b/nnoir-onnx/test/test_hard_sigmoid.py
index 3b37a89..6ec5604 100644
--- a/nnoir-onnx/test/test_hard_sigmoid.py
+++ b/nnoir-onnx/test/test_hard_sigmoid.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from util import Base
@@ -8,7 +11,7 @@
class HardSigmoidTester(Base):
- def __init__(self, inputs, outputs, **kwargs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str], **kwargs: Any):
super().__init__(inputs, outputs)
self.params = kwargs
@@ -21,7 +24,7 @@ def create_onnx(self) -> onnx.ModelProto:
return model
-def test_hard_sigmoid_00():
+def test_hard_sigmoid_00() -> None:
# y = max(0, min(1, 0.2 * x + 0.5))
#
# | condition | result of `alpha * x + beta` | value of y |
@@ -34,7 +37,7 @@ def test_hard_sigmoid_00():
HardSigmoidTester({"v0": v0}, ["v1"]).run()
-def test_hard_sigmoid_01():
+def test_hard_sigmoid_01() -> None:
alpha = 1 / 3
beta = 3 / 5
# y = max(0, min(1, (1/3) * x + (3/5)))
diff --git a/nnoir-onnx/test/test_hard_swish.py b/nnoir-onnx/test/test_hard_swish.py
index 48b79d6..d0fcd7b 100644
--- a/nnoir-onnx/test/test_hard_swish.py
+++ b/nnoir-onnx/test/test_hard_swish.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,13 +11,13 @@
info = make_tensor_value_info
-def test_hard_swish_00():
+def test_hard_swish_00() -> None:
"""
opset version >= 14
"""
class HardSwishTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_leaky_relu.py b/nnoir-onnx/test/test_leaky_relu.py
index 91b44af..1cab14c 100644
--- a/nnoir-onnx/test/test_leaky_relu.py
+++ b/nnoir-onnx/test/test_leaky_relu.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,13 +11,13 @@
info = make_tensor_value_info
-def test_leaky_relu_00():
+def test_leaky_relu_00() -> None:
"""
opset version >= 6
"""
class LeakyReluTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_lrn.py b/nnoir-onnx/test/test_lrn.py
index 3419f61..57f454c 100644
--- a/nnoir-onnx/test/test_lrn.py
+++ b/nnoir-onnx/test/test_lrn.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def test_lrn_00():
+def test_lrn_00() -> None:
"""
opser version >= 1
"""
diff --git a/nnoir-onnx/test/test_lstm.py b/nnoir-onnx/test/test_lstm.py
index 00d9ab2..7ed38b8 100644
--- a/nnoir-onnx/test/test_lstm.py
+++ b/nnoir-onnx/test/test_lstm.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def test_LSTM_00():
+def test_LSTM_00() -> None:
class LSTMTester(Base):
def create_onnx(self) -> onnx.ModelProto:
input_size = 2
@@ -49,7 +52,7 @@ def create_onnx(self) -> onnx.ModelProto:
LSTMTester({"x": x}, outputs).run()
-def test_LSTM_01():
+def test_LSTM_01() -> None:
class LSTMTester(Base):
def create_onnx(self) -> onnx.ModelProto:
input_size = 2
@@ -88,7 +91,7 @@ def create_onnx(self) -> onnx.ModelProto:
)
W_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(np.float32)
R_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)
- B = from_array(np.concatenate((W_B, R_B), 1), "B")
+ B = from_array(np.concatenate((W_B, R_B), 1), "B") # type: ignore
graph = make_graph([node], "lstm_graph", inputs, outputs, initializer=[W, R, B])
print(onnx.helper.printable_graph(graph))
@@ -100,7 +103,7 @@ def create_onnx(self) -> onnx.ModelProto:
LSTMTester({"x": x}, outputs).run()
-def test_LSTM_02():
+def test_LSTM_02() -> None:
class LSTMTester(Base):
def create_onnx(self) -> onnx.ModelProto:
input_size = 2
@@ -140,7 +143,7 @@ def create_onnx(self) -> onnx.ModelProto:
)
W_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(np.float32)
R_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)
- B = from_array(np.concatenate((W_B, R_B), 1), "B")
+ B = from_array(np.concatenate((W_B, R_B), 1), "B") # type: ignore
seq_lens = from_array(np.repeat(seq_length, batch_size).astype(np.int32), "sequence_lens")
init_h = from_array(np.ones((1, batch_size, hidden_size)).astype(np.float32), "initial_h")
@@ -162,7 +165,7 @@ def create_onnx(self) -> onnx.ModelProto:
LSTMTester({"x": x}, outputs).run()
-def test_LSTM_03():
+def test_LSTM_03() -> None:
class LSTMTester(Base):
def create_onnx(self) -> onnx.ModelProto:
input_size = 2
@@ -211,7 +214,7 @@ def create_onnx(self) -> onnx.ModelProto:
)
W_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(np.float32)
R_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)
- B = from_array(np.concatenate((W_B, R_B), 1), "B")
+ B = from_array(np.concatenate((W_B, R_B), 1), "B") # type: ignore
seq_lens = from_array(np.repeat(seq_length, batch_size).astype(np.int32), "sequence_lens")
init_h = from_array(np.ones((1, batch_size, hidden_size)).astype(np.float32), "initial_h")
@@ -237,7 +240,7 @@ def create_onnx(self) -> onnx.ModelProto:
LSTMTester({"x": x}, outputs).run()
-def test_LSTM_04():
+def test_LSTM_04() -> None:
class LSTMTester(Base):
def create_onnx(self) -> onnx.ModelProto:
input_size = 2
@@ -284,7 +287,7 @@ def create_onnx(self) -> onnx.ModelProto:
LSTMTester({"x": x}, outputs).run()
-def test_LSTM_05():
+def test_LSTM_05() -> None:
batch_size = 3
hidden_size = 3
@@ -329,7 +332,7 @@ def create_onnx(self) -> onnx.ModelProto:
)
W_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(np.float32)
R_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)
- B = from_array(np.concatenate((W_B, R_B), 1), "B")
+ B = from_array(np.concatenate((W_B, R_B), 1), "B") # type: ignore
seq_lens = from_array(np.repeat(seq_length, batch_size).astype(np.int32), "sequence_lens")
diff --git a/nnoir-onnx/test/test_matmul.py b/nnoir-onnx/test/test_matmul.py
index fbeb3fa..42b9059 100644
--- a/nnoir-onnx/test/test_matmul.py
+++ b/nnoir-onnx/test/test_matmul.py
@@ -1,6 +1,9 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
import pytest
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -9,7 +12,7 @@
info = make_tensor_value_info
-def test_matmul_00():
+def test_matmul_00() -> None:
"""
opset version >= 9
"""
@@ -36,7 +39,7 @@ def create_onnx(self) -> onnx.ModelProto:
MatMulTester({"x": x}, outputs).run()
-def test_matmul_01():
+def test_matmul_01() -> None:
"""
opset version >= 9
"""
@@ -63,7 +66,7 @@ def create_onnx(self) -> onnx.ModelProto:
MatMulTester({"x": x}, outputs).run()
-def test_matmul_02():
+def test_matmul_02() -> None:
"""
opset version >= 9
"""
@@ -92,7 +95,7 @@ def create_onnx(self) -> onnx.ModelProto:
MatMulTester({"x": x, "y": y}, outputs).run()
-def test_matmul_03():
+def test_matmul_03() -> None:
"""
opset version >= 9
"""
@@ -121,7 +124,7 @@ def create_onnx(self) -> onnx.ModelProto:
MatMulTester({"x": x, "y": y}, outputs).run()
-def test_matmul_04():
+def test_matmul_04() -> None:
"""
opset version >= 9
"""
@@ -150,7 +153,7 @@ def create_onnx(self) -> onnx.ModelProto:
MatMulTester({"x": x, "y": y}, outputs).run()
-def test_matmul_05():
+def test_matmul_05() -> None:
"""
opset version >= 9
"""
@@ -179,7 +182,7 @@ def create_onnx(self) -> onnx.ModelProto:
MatMulTester({"x": x, "y": y}, outputs).run()
-def test_matmul_06():
+def test_matmul_06() -> None:
"""
opset version >= 9
"""
diff --git a/nnoir-onnx/test/test_max_pooling.py b/nnoir-onnx/test/test_max_pooling.py
index f59dfed..945f4bc 100644
--- a/nnoir-onnx/test/test_max_pooling.py
+++ b/nnoir-onnx/test/test_max_pooling.py
@@ -1,7 +1,10 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
import pytest
from nnoir_onnx.operators.utils import UnsupportedONNXOperation
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
from util import Base
@@ -9,7 +12,7 @@
info = make_tensor_value_info
-def test_max_pool_00():
+def test_max_pool_00() -> None:
class MaxPoolTester(Base):
def create_onnx(self) -> onnx.ModelProto:
node = make_node(
@@ -32,7 +35,7 @@ def create_onnx(self) -> onnx.ModelProto:
@pytest.mark.xfail()
-def test_max_pool_01():
+def test_max_pool_01() -> None:
"""
opset version >= 10
@@ -62,7 +65,7 @@ def create_onnx(self) -> onnx.ModelProto:
@pytest.mark.xfail(raises=UnsupportedONNXOperation)
-def test_max_pool_02():
+def test_max_pool_02() -> None:
"""
opset version >= 11
@@ -91,7 +94,7 @@ def create_onnx(self) -> onnx.ModelProto:
MaxPoolTester({"v0": v0}, outputs).run()
-def test_max_pool_03():
+def test_max_pool_03() -> None:
"""
opset version >= 10
diff --git a/nnoir-onnx/test/test_mul.py b/nnoir-onnx/test/test_mul.py
index 26d08da..7fe10c2 100644
--- a/nnoir-onnx/test/test_mul.py
+++ b/nnoir-onnx/test/test_mul.py
@@ -1,6 +1,9 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
from nnoir_onnx.operators.utils import UnsupportedONNXOperation
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -9,7 +12,7 @@
info = make_tensor_value_info
-def test_mul_00():
+def test_mul_00() -> None:
"""
opset version >= 7
without constant, supports multidirectional broadcasting
@@ -39,7 +42,7 @@ def create_onnx(self) -> onnx.ModelProto:
MulTester({"A": a, "B": b}, outputs).run()
-def test_mul_01():
+def test_mul_01() -> None:
"""
opset version >= 7
with one constant, unidirectional broadcasting (from constant to variable)
@@ -68,7 +71,7 @@ def create_onnx(self) -> onnx.ModelProto:
MulTester({"A": a}, outputs).run()
-def test_mul_02():
+def test_mul_02() -> None:
"""
opset version >= 7
with one constant, support multidirectional broadcasting
@@ -97,7 +100,7 @@ def create_onnx(self) -> onnx.ModelProto:
MulTester({"A": a}, outputs).run()
-def test_mul_03():
+def test_mul_03() -> None:
"""
opset version >= 7
with one constant, different shape length
diff --git a/nnoir-onnx/test/test_pad.py b/nnoir-onnx/test/test_pad.py
index 6fff6ff..b2871b1 100644
--- a/nnoir-onnx/test/test_pad.py
+++ b/nnoir-onnx/test/test_pad.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,13 +11,13 @@
info = make_tensor_value_info
-def test_pad_00():
+def test_pad_00() -> None:
"""
opset version >= 11
"""
class PadTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -34,13 +37,13 @@ def create_onnx(self) -> onnx.ModelProto:
PadTester({"v0": v0}, outputs).run()
-def test_pad_01():
+def test_pad_01() -> None:
"""
opset version >= 2
"""
class PadTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -65,13 +68,13 @@ def create_onnx(self) -> onnx.ModelProto:
PadTester({"v0": v0}, outputs).run()
-def test_pad_02():
+def test_pad_02() -> None:
"""
opset version >= 11 and 0-dimension ndarray value
"""
class PadTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_prelu.py b/nnoir-onnx/test/test_prelu.py
index a557855..5ac2e5a 100644
--- a/nnoir-onnx/test/test_prelu.py
+++ b/nnoir-onnx/test/test_prelu.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def test_prelu_00():
+def test_prelu_00() -> None:
class PReluTester(Base):
def create_onnx(self) -> onnx.ModelProto:
node = make_node("PRelu", inputs=["x", "slope"], outputs=["y"])
diff --git a/nnoir-onnx/test/test_reduce_mean.py b/nnoir-onnx/test/test_reduce_mean.py
index 22264a5..b32ddb3 100644
--- a/nnoir-onnx/test/test_reduce_mean.py
+++ b/nnoir-onnx/test/test_reduce_mean.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def test_reduce_mean_00():
+def test_reduce_mean_00() -> None:
"""
opset version >= 1
"""
@@ -29,7 +32,7 @@ def create_onnx(self) -> onnx.ModelProto:
ReduceMeanTester({"v0": v0}, outputs).run()
-def test_reduce_mean_01():
+def test_reduce_mean_01() -> None:
"""
opset version >= 1
"""
@@ -50,7 +53,7 @@ def create_onnx(self) -> onnx.ModelProto:
ReduceMeanTester({"v0": v0}, outputs).run()
-def test_reduce_mean_02():
+def test_reduce_mean_02() -> None:
"""
opset version >= 1
"""
diff --git a/nnoir-onnx/test/test_reduce_sum.py b/nnoir-onnx/test/test_reduce_sum.py
index 62147fc..e6768cd 100644
--- a/nnoir-onnx/test/test_reduce_sum.py
+++ b/nnoir-onnx/test/test_reduce_sum.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List, Optional, Tuple
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,9 @@
info = make_tensor_value_info
-def run_opset_11_tester(input_shape, output_shape, axes=None, keepdims=1):
+def run_opset_11_tester(
+ input_shape: Tuple[int, ...], output_shape: Tuple[int, ...], axes: Optional[List[int]] = None, keepdims: int = 1
+) -> None:
class ReduceSumTester(Base):
def create_onnx(self) -> onnx.ModelProto:
kwargs = {
@@ -28,23 +33,25 @@ def create_onnx(self) -> onnx.ModelProto:
ReduceSumTester({"v0": np.random.rand(*input_shape).astype(np.float32)}, ["v1"]).run()
-def test_opset_11_reduce_sum_00():
+def test_opset_11_reduce_sum_00() -> None:
run_opset_11_tester((1, 3, 4, 5), (1, 1, 1, 1))
-def test_opset_11_reduce_sum_01():
+def test_opset_11_reduce_sum_01() -> None:
run_opset_11_tester((1, 3, 4, 5), (), keepdims=0)
-def test_opset_11_reduce_sum_02():
+def test_opset_11_reduce_sum_02() -> None:
run_opset_11_tester((1, 3, 4, 5), (1, 1, 1, 5), axes=[1, 2])
-def test_opset_11_reduce_sum_03():
+def test_opset_11_reduce_sum_03() -> None:
run_opset_11_tester((1, 3, 4, 5), (1, 5), axes=[1, 2], keepdims=0)
-def run_opset_13_tester(input_shape, output_shape, axes=None, keepdims=1):
+def run_opset_13_tester(
+ input_shape: Tuple[int, ...], output_shape: Tuple[int, ...], axes: Optional[List[int]] = None, keepdims: int = 1
+) -> None:
class ReduceSumTester(Base):
def create_onnx(self) -> onnx.ModelProto:
kwargs = {
@@ -67,17 +74,17 @@ def create_onnx(self) -> onnx.ModelProto:
ReduceSumTester({"v0": np.random.rand(*input_shape).astype(np.float32)}, ["v1"]).run()
-def test_opset_13_reduce_sum_00():
+def test_opset_13_reduce_sum_00() -> None:
run_opset_13_tester((1, 3, 4, 5), (1, 1, 1, 1))
-def test_opset_13_reduce_sum_01():
+def test_opset_13_reduce_sum_01() -> None:
run_opset_13_tester((1, 3, 4, 5), (), keepdims=0)
-def test_opset_13_reduce_sum_02():
+def test_opset_13_reduce_sum_02() -> None:
run_opset_13_tester((1, 3, 4, 5), (1, 1, 1, 5), axes=[1, 2])
-def test_opset_13_reduce_sum_03():
+def test_opset_13_reduce_sum_03() -> None:
run_opset_13_tester((1, 3, 4, 5), (1, 5), axes=[1, 2], keepdims=0)
diff --git a/nnoir-onnx/test/test_relu.py b/nnoir-onnx/test/test_relu.py
index f8ef027..4253de2 100644
--- a/nnoir-onnx/test/test_relu.py
+++ b/nnoir-onnx/test/test_relu.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def test_relu_00():
+def test_relu_00() -> None:
"""
opset version >= 6
"""
diff --git a/nnoir-onnx/test/test_reshape.py b/nnoir-onnx/test/test_reshape.py
index cbb0e89..6bac0ea 100644
--- a/nnoir-onnx/test/test_reshape.py
+++ b/nnoir-onnx/test/test_reshape.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def test_reshape_00():
+def test_reshape_00() -> None:
"""
opset version >= 5
"""
diff --git a/nnoir-onnx/test/test_resize.py b/nnoir-onnx/test/test_resize.py
index bd5276a..2403fce 100644
--- a/nnoir-onnx/test/test_resize.py
+++ b/nnoir-onnx/test/test_resize.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def test_resize_00():
+def test_resize_00() -> None:
"""
opset version >= 11
"""
@@ -43,7 +46,7 @@ def create_onnx(self) -> onnx.ModelProto:
ResizeTester(inputs, outputs).run()
-def test_resize_01():
+def test_resize_01() -> None:
"""
opset version >= 11
"""
@@ -77,7 +80,7 @@ def create_onnx(self) -> onnx.ModelProto:
ResizeTester(inputs, outputs).run()
-def test_resize_02():
+def test_resize_02() -> None:
"""
opset version >= 11
"""
@@ -112,7 +115,7 @@ def create_onnx(self) -> onnx.ModelProto:
ResizeTester(inputs, outputs).run()
-def test_resize_03():
+def test_resize_03() -> None:
"""
opset version >= 11
"""
diff --git a/nnoir-onnx/test/test_sigmoid.py b/nnoir-onnx/test/test_sigmoid.py
index 0dc5bc3..8a1c803 100644
--- a/nnoir-onnx/test/test_sigmoid.py
+++ b/nnoir-onnx/test/test_sigmoid.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,13 +11,13 @@
info = make_tensor_value_info
-def test_sigmoid_00():
+def test_sigmoid_00() -> None:
"""
opset version >= 6
"""
class SigmoidTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_sin.py b/nnoir-onnx/test/test_sin.py
index 587de15..317d413 100644
--- a/nnoir-onnx/test/test_sin.py
+++ b/nnoir-onnx/test/test_sin.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,9 +11,9 @@
info = make_tensor_value_info
-def test_sin_00():
+def test_sin_00() -> None:
class SinTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_softmax.py b/nnoir-onnx/test/test_softmax.py
index 8dcdfe8..cab0351 100644
--- a/nnoir-onnx/test/test_softmax.py
+++ b/nnoir-onnx/test/test_softmax.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,9 +11,9 @@
info = make_tensor_value_info
-def test_softmax_00():
+def test_softmax_00() -> None:
class SoftmaxTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -28,9 +31,9 @@ def create_onnx(self) -> onnx.ModelProto:
SoftmaxTester({"v0": v0}, outputs).run()
-def test_softmax_01():
+def test_softmax_01() -> None:
class SoftmaxTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_split.py b/nnoir-onnx/test/test_split.py
index ff58b69..3485ce2 100644
--- a/nnoir-onnx/test/test_split.py
+++ b/nnoir-onnx/test/test_split.py
@@ -1,6 +1,9 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
import pytest
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -9,9 +12,9 @@
info = make_tensor_value_info
-def test_split_trans_axis2():
+def test_split_trans_axis2() -> None:
class SplitTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -32,9 +35,9 @@ def create_onnx(self) -> onnx.ModelProto:
SplitTester({"v0": v0}, outputs).run()
-def test_split_trans_axis3():
+def test_split_trans_axis3() -> None:
class SplitTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -55,14 +58,14 @@ def create_onnx(self) -> onnx.ModelProto:
SplitTester({"v0": v0}, outputs).run()
-def test_split_default_axis():
+def test_split_default_axis() -> None:
"""
Omit specification of axis.
If it is ommited, axis is treated as 0.
"""
class SplitTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -84,14 +87,14 @@ def create_onnx(self) -> onnx.ModelProto:
@pytest.mark.xfail()
-def test_split_specify_split():
+def test_split_specify_split() -> None:
"""
Specify second input (optional parameter).
Due to lack of implementation, the second input is not supported.
"""
class SplitTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_squeeze.py b/nnoir-onnx/test/test_squeeze.py
index 1dbdf9b..280aade 100644
--- a/nnoir-onnx/test/test_squeeze.py
+++ b/nnoir-onnx/test/test_squeeze.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List, Optional, Tuple
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,9 +11,9 @@
info = make_tensor_value_info
-def run_opset_11_tester(input_shape, output_shape, axes=None):
+def run_opset_11_tester(input_shape: Tuple[int, ...], output_shape: Tuple[int, ...], axes: Optional[List[int]] = None) -> None:
class SqueezeTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super(SqueezeTester, self).__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -30,33 +33,33 @@ def create_onnx(self) -> onnx.ModelProto:
SqueezeTester({"x": np.ones(input_shape, dtype=np.float32)}, ["y"]).run()
-def test_opset_11_squeeze_00():
+def test_opset_11_squeeze_00() -> None:
run_opset_11_tester((1, 3, 1, 5), (3, 5))
-def test_opset_11_squeeze_01():
+def test_opset_11_squeeze_01() -> None:
run_opset_11_tester((1, 3, 1, 5), (3, 1, 5), axes=[0])
-def test_opset_11_squeeze_02():
+def test_opset_11_squeeze_02() -> None:
run_opset_11_tester((1, 3, 1, 5), (1, 3, 5), axes=[-2])
-def test_opset_11_squeeze_03():
+def test_opset_11_squeeze_03() -> None:
run_opset_11_tester((1, 3, 1, 5), (3, 5), axes=[0, 2])
-def test_opset_11_squeeze_04():
+def test_opset_11_squeeze_04() -> None:
run_opset_11_tester((1, 3, 1, 5), (3, 5), axes=[0, -2])
-def test_opset_11_squeeze_05():
+def test_opset_11_squeeze_05() -> None:
run_opset_11_tester((1, 3, 1, 5), (3, 5), axes=[2, -4])
-def run_opset_13_tester(input_shape, output_shape, axes=None):
+def run_opset_13_tester(input_shape: Tuple[int, ...], output_shape: Tuple[int, ...], axes: Optional[List[int]] = None) -> None:
class SqueezeTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super(SqueezeTester, self).__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -79,25 +82,25 @@ def create_onnx(self) -> onnx.ModelProto:
SqueezeTester({"x": np.ones(input_shape, dtype=np.float32)}, ["y"]).run()
-def test_opset_13_squeeze_00():
+def test_opset_13_squeeze_00() -> None:
run_opset_13_tester((1, 3, 1, 5), (3, 5))
-def test_opset_13_squeeze_01():
+def test_opset_13_squeeze_01() -> None:
run_opset_13_tester((1, 3, 1, 5), (3, 1, 5), axes=[0])
-def test_opset_13_squeeze_02():
+def test_opset_13_squeeze_02() -> None:
run_opset_13_tester((1, 3, 1, 5), (1, 3, 5), axes=[-2])
-def test_opset_13_squeeze_03():
+def test_opset_13_squeeze_03() -> None:
run_opset_13_tester((1, 3, 1, 5), (3, 5), axes=[0, 2])
-def test_opset_13_squeeze_04():
+def test_opset_13_squeeze_04() -> None:
run_opset_13_tester((1, 3, 1, 5), (3, 5), axes=[0, -2])
-def test_opset_13_squeeze_05():
+def test_opset_13_squeeze_05() -> None:
run_opset_13_tester((1, 3, 1, 5), (3, 5), axes=[2, -4])
diff --git a/nnoir-onnx/test/test_sub.py b/nnoir-onnx/test/test_sub.py
index 504fd5e..613d01d 100644
--- a/nnoir-onnx/test/test_sub.py
+++ b/nnoir-onnx/test/test_sub.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
from util import Base
@@ -7,11 +10,11 @@
info = make_tensor_value_info
-def test_sub_00():
+def test_sub_00() -> None:
shape = (3, 4, 5)
class SubTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -32,14 +35,14 @@ def create_onnx(self) -> onnx.ModelProto:
SubTester({"v0": v0, "v1": v1}, outputs).run()
-def test_sub_01():
+def test_sub_01() -> None:
"""
Test for multidirectional broadcasting
https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
"""
class SubTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -60,14 +63,14 @@ def create_onnx(self) -> onnx.ModelProto:
SubTester({"v0": v0, "v1": v1}, outputs).run()
-def test_sub_02():
+def test_sub_02() -> None:
"""
Test for multidirectional broadcasting
https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
"""
class SubTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_sum.py b/nnoir-onnx/test/test_sum.py
index 033e078..9294031 100644
--- a/nnoir-onnx/test/test_sum.py
+++ b/nnoir-onnx/test/test_sum.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
from util import Base
@@ -7,11 +10,11 @@
info = make_tensor_value_info
-def test_sum_00():
+def test_sum_00() -> None:
shape = (3, 4, 5)
class SumTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -32,14 +35,14 @@ def create_onnx(self) -> onnx.ModelProto:
SumTester({"v0": v0, "v1": v1}, outputs).run()
-def test_sum_01():
+def test_sum_01() -> None:
"""
Test for multidirectional broadcasting
https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
"""
class SumTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
@@ -60,14 +63,14 @@ def create_onnx(self) -> onnx.ModelProto:
SumTester({"v0": v0, "v1": v1}, outputs).run()
-def test_sum_02():
+def test_sum_02() -> None:
"""
Test for multidirectional broadcasting
https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
"""
class SumTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_tan.py b/nnoir-onnx/test/test_tan.py
index 020e542..6556ba9 100644
--- a/nnoir-onnx/test/test_tan.py
+++ b/nnoir-onnx/test/test_tan.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,9 +11,9 @@
info = make_tensor_value_info
-def test_tan_00():
+def test_tan_00() -> None:
class TanTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_tanh.py b/nnoir-onnx/test/test_tanh.py
index 7832c37..3ebe96a 100644
--- a/nnoir-onnx/test/test_tanh.py
+++ b/nnoir-onnx/test/test_tanh.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,13 +11,13 @@
info = make_tensor_value_info
-def test_tanh_00():
+def test_tanh_00() -> None:
"""
opset version >= 6
"""
class TanhTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_transpose.py b/nnoir-onnx/test/test_transpose.py
index a7f5bcc..8e6bcf2 100644
--- a/nnoir-onnx/test/test_transpose.py
+++ b/nnoir-onnx/test/test_transpose.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,13 +11,13 @@
info = make_tensor_value_info
-def test_transpose_00():
+def test_transpose_00() -> None:
"""
opset version >= 6
"""
class TransposeTester(Base):
- def __init__(self, inputs, outputs):
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
super().__init__(inputs, outputs)
def create_onnx(self) -> onnx.ModelProto:
diff --git a/nnoir-onnx/test/test_unsqueeze.py b/nnoir-onnx/test/test_unsqueeze.py
index 651b378..ded9d5e 100644
--- a/nnoir-onnx/test/test_unsqueeze.py
+++ b/nnoir-onnx/test/test_unsqueeze.py
@@ -1,5 +1,8 @@
+from typing import Any, Dict, List, Tuple
+
import numpy as np
import onnx
+from numpy.typing import NDArray
from onnx import TensorProto
from onnx.helper import make_graph, make_model, make_node, make_opsetid, make_tensor, make_tensor_value_info
from onnx.numpy_helper import from_array
@@ -8,7 +11,7 @@
info = make_tensor_value_info
-def run_opset_11_tester(input_shape, output_shape, axes):
+def run_opset_11_tester(input_shape: Tuple[int, ...], output_shape: Tuple[int, ...], axes: List[int]) -> None:
class UnsqueezeTester(Base):
def create_onnx(self) -> onnx.ModelProto:
inputs = [info("x", TensorProto.FLOAT, input_shape)]
@@ -21,15 +24,15 @@ def create_onnx(self) -> onnx.ModelProto:
UnsqueezeTester({"x": (np.random.rand(*input_shape).astype(np.float32) * 10.0)}, ["y"]).run()
-def test_opset_1_unsqueeze_00():
+def test_opset_1_unsqueeze_00() -> None:
run_opset_11_tester((3, 4), (1, 3, 4, 1), [0, 3])
-def test_opset_11_unsqueeze_01():
+def test_opset_11_unsqueeze_01() -> None:
run_opset_11_tester((3, 4), (1, 3, 1, 4), [0, -2])
-def run_opset_13_tester(input_shape, output_shape, axes):
+def run_opset_13_tester(input_shape: Tuple[int, ...], output_shape: Tuple[int, ...], axes: List[int]) -> None:
class UnsqueezeTester(Base):
def create_onnx(self) -> onnx.ModelProto:
inputs = [info("x", TensorProto.FLOAT, input_shape)]
@@ -43,9 +46,9 @@ def create_onnx(self) -> onnx.ModelProto:
UnsqueezeTester({"x": (np.random.rand(*input_shape).astype(np.float32) * 10.0)}, ["y"]).run()
-def test_opset_13_unsqueeze_00():
+def test_opset_13_unsqueeze_00() -> None:
run_opset_13_tester((3, 4), (1, 3, 4, 1), [0, 3])
-def test_opset_13_unsqueeze_01():
+def test_opset_13_unsqueeze_01() -> None:
run_opset_13_tester((3, 4), (1, 3, 1, 4), [0, -2])
diff --git a/nnoir-onnx/test/util.py b/nnoir-onnx/test/util.py
index f2b2579..1a8f8b8 100644
--- a/nnoir-onnx/test/util.py
+++ b/nnoir-onnx/test/util.py
@@ -1,5 +1,5 @@
import tempfile
-from typing import Dict, List, Optional
+from typing import Any, Dict, List, Optional
import nnoir
import numpy as np
@@ -7,6 +7,7 @@
import onnxruntime
from nnoir import NNOIR
from nnoir_onnx import ONNX
+from numpy.typing import NDArray
epsilon = 0.0001
@@ -14,14 +15,14 @@
class Base:
- def __init__(self, inputs, outputs):
- self.inputs: Dict[str, np.ndarray] = inputs
+ def __init__(self, inputs: Dict[str, NDArray[Any]], outputs: List[str]):
+ self.inputs: Dict[str, NDArray[Any]] = inputs
self.outputs: List[str] = outputs
self.onnx: Optional[onnx.ModelProto] = None
self.nnoir: Optional[NNOIR] = None
- def run(self):
+ def run(self) -> None:
self.onnx = self.create_onnx()
onnx.checker.check_model(self.onnx)
onnx_result = self.execute_onnx(self.onnx)
@@ -36,7 +37,7 @@ def run(self):
for a, b in zip(rerun_result, nnoir_result):
assert np.all(abs(a - b) < epsilon)
- def save_and_run(self, model: NNOIR):
+ def save_and_run(self, model: NNOIR) -> List[NDArray[Any]]:
with tempfile.NamedTemporaryFile(delete=TMP_REMOVE) as f:
model.dump(f.name)
reload_nnoir: NNOIR = nnoir.load(f.name)
@@ -46,18 +47,18 @@ def create_onnx(self) -> onnx.ModelProto:
# should be override
assert False
- def execute_onnx(self, model: onnx.ModelProto) -> List[np.ndarray]:
+ def execute_onnx(self, model: onnx.ModelProto) -> List[NDArray[Any]]:
with tempfile.NamedTemporaryFile(delete=TMP_REMOVE) as f:
onnx.save(model, f.name)
sess = onnxruntime.InferenceSession(f.name)
r = sess.run(self.outputs, self.inputs)
- return r
+ return r # type: ignore
def create_nnoir(self, model: onnx.ModelProto) -> NNOIR:
with tempfile.NamedTemporaryFile(delete=TMP_REMOVE) as f:
onnx.save(model, f.name)
return ONNX(f.name).to_NNOIR()
- def execute_nnoir(self, nnoir) -> List[np.ndarray]:
+ def execute_nnoir(self, nnoir: NNOIR) -> List[NDArray[Any]]:
r = nnoir.run(*self.inputs.values())
return r
diff --git a/nnoir/nnoir/dot.py b/nnoir/nnoir/dot.py
index 94bd979..d6c4907 100644
--- a/nnoir/nnoir/dot.py
+++ b/nnoir/nnoir/dot.py
@@ -12,7 +12,7 @@ def function_label(function: Dict[bytes, Any]) -> str:
ret = "{{" + (
"|".join(
[function[b"name"].decode()]
- + list(map(lambda v: "", enumerate(function[b"inputs"]))) # type: ignore
+ + list(map(lambda v: "", enumerate(function[b"inputs"])))
)
)
if b"W" in function[b"params"]:
@@ -34,13 +34,13 @@ def find_params(params: Dict[bytes, Any]) -> Iterator[str]:
params_str = "\l".join(find_params(function[b"params"]))
if params_str != "":
ret += "|{%s\l}" % params_str
- ret += "|{%s}}" % "|".join(map(lambda v: "<" + v.decode() + ">", reversed(function[b"outputs"]))) # type: ignore
+ ret += "|{%s}}" % "|".join(map(lambda v: "<" + v.decode() + ">", reversed(function[b"outputs"])))
return ret
def function_name(function: Dict[bytes, Any]) -> str:
- inputs = "".join(map(lambda v: v.decode(), reversed(function[b"inputs"]))) # type: ignore
- outputs = "".join(map(lambda v: v.decode(), reversed(function[b"outputs"]))) # type: ignore
+ inputs = "".join(map(lambda v: v.decode(), reversed(function[b"inputs"])))
+ outputs = "".join(map(lambda v: v.decode(), reversed(function[b"outputs"])))
return "{}_{}_{}".format(function[b"name"].decode(), inputs, outputs)
diff --git a/nnoir/nnoir/py.typed b/nnoir/nnoir/py.typed
new file mode 100644
index 0000000..e69de29
diff --git a/nnoir/poetry.lock b/nnoir/poetry.lock
index d91791e..0a191f1 100644
--- a/nnoir/poetry.lock
+++ b/nnoir/poetry.lock
@@ -13,22 +13,25 @@ files = [
[[package]]
name = "attrs"
-version = "22.2.0"
+version = "23.1.0"
description = "Classes Without Boilerplate"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"},
- {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"},
+ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
+ {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
]
+[package.dependencies]
+importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
+
[package.extras]
-cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"]
-dev = ["attrs[docs,tests]"]
-docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"]
-tests = ["attrs[tests-no-zope]", "zope.interface"]
-tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy (>=0.971,<0.990)", "mypy (>=0.971,<0.990)", "pympler", "pympler", "pytest (>=4.3.0)", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-mypy-plugins", "pytest-xdist[psutil]", "pytest-xdist[psutil]"]
+cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
+dev = ["attrs[docs,tests]", "pre-commit"]
+docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
+tests = ["attrs[tests-no-zope]", "zope-interface"]
+tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
[[package]]
name = "black"
@@ -80,14 +83,14 @@ uvloop = ["uvloop (>=0.15.2)"]
[[package]]
name = "click"
-version = "8.1.3"
+version = "8.1.7"
description = "Composable command line interface toolkit"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
- {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
+ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
+ {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
]
[package.dependencies]
@@ -123,14 +126,13 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""}
[[package]]
name = "dacite"
-version = "1.8.0"
+version = "1.8.1"
description = "Simple creation of data classes from dictionaries."
category = "dev"
optional = false
python-versions = ">=3.6"
files = [
- {file = "dacite-1.8.0-py3-none-any.whl", hash = "sha256:f7b1205cc5d9b62835aac8cbc1e6e37c1da862359a401f1edbe2ae08fbdc6193"},
- {file = "dacite-1.8.0.tar.gz", hash = "sha256:6257a5e505b61a8cafee7ef3ad08cf32ee9b885718f42395d017e0a9b4c6af65"},
+ {file = "dacite-1.8.1-py3-none-any.whl", hash = "sha256:cc31ad6fdea1f49962ea42db9421772afe01ac5442380d9a99fcf3d188c61afe"},
]
[package.extras]
@@ -190,20 +192,23 @@ smmap = ">=3.0.1,<6"
[[package]]
name = "gitpython"
-version = "3.1.31"
+version = "3.1.37"
description = "GitPython is a Python library used to interact with Git repositories"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "GitPython-3.1.31-py3-none-any.whl", hash = "sha256:f04893614f6aa713a60cbbe1e6a97403ef633103cdd0ef5eb6efe0deb98dbe8d"},
- {file = "GitPython-3.1.31.tar.gz", hash = "sha256:8ce3bcf69adfdf7c7d503e78fd3b1c492af782d58893b650adb2ac8912ddd573"},
+ {file = "GitPython-3.1.37-py3-none-any.whl", hash = "sha256:5f4c4187de49616d710a77e98ddf17b4782060a1788df441846bddefbb89ab33"},
+ {file = "GitPython-3.1.37.tar.gz", hash = "sha256:f9b9ddc0761c125d5780eab2d64be4873fc6817c2899cbcb34b02344bdc7bc54"},
]
[package.dependencies]
gitdb = ">=4.0.1,<5"
typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""}
+[package.extras]
+test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-sugar"]
+
[[package]]
name = "importlib-metadata"
version = "4.2.0"
@@ -341,45 +346,45 @@ files = [
[[package]]
name = "mypy"
-version = "1.1.1"
+version = "1.4.1"
description = "Optional static typing for Python"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "mypy-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39c7119335be05630611ee798cc982623b9e8f0cff04a0b48dfc26100e0b97af"},
- {file = "mypy-1.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:61bf08362e93b6b12fad3eab68c4ea903a077b87c90ac06c11e3d7a09b56b9c1"},
- {file = "mypy-1.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbb19c9f662e41e474e0cff502b7064a7edc6764f5262b6cd91d698163196799"},
- {file = "mypy-1.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:315ac73cc1cce4771c27d426b7ea558fb4e2836f89cb0296cbe056894e3a1f78"},
- {file = "mypy-1.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:5cb14ff9919b7df3538590fc4d4c49a0f84392237cbf5f7a816b4161c061829e"},
- {file = "mypy-1.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:26cdd6a22b9b40b2fd71881a8a4f34b4d7914c679f154f43385ca878a8297389"},
- {file = "mypy-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5b5f81b40d94c785f288948c16e1f2da37203c6006546c5d947aab6f90aefef2"},
- {file = "mypy-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21b437be1c02712a605591e1ed1d858aba681757a1e55fe678a15c2244cd68a5"},
- {file = "mypy-1.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d809f88734f44a0d44959d795b1e6f64b2bbe0ea4d9cc4776aa588bb4229fc1c"},
- {file = "mypy-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:a380c041db500e1410bb5b16b3c1c35e61e773a5c3517926b81dfdab7582be54"},
- {file = "mypy-1.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b7c7b708fe9a871a96626d61912e3f4ddd365bf7f39128362bc50cbd74a634d5"},
- {file = "mypy-1.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1c10fa12df1232c936830839e2e935d090fc9ee315744ac33b8a32216b93707"},
- {file = "mypy-1.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0a28a76785bf57655a8ea5eb0540a15b0e781c807b5aa798bd463779988fa1d5"},
- {file = "mypy-1.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ef6a01e563ec6a4940784c574d33f6ac1943864634517984471642908b30b6f7"},
- {file = "mypy-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d64c28e03ce40d5303450f547e07418c64c241669ab20610f273c9e6290b4b0b"},
- {file = "mypy-1.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:64cc3afb3e9e71a79d06e3ed24bb508a6d66f782aff7e56f628bf35ba2e0ba51"},
- {file = "mypy-1.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce61663faf7a8e5ec6f456857bfbcec2901fbdb3ad958b778403f63b9e606a1b"},
- {file = "mypy-1.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2b0c373d071593deefbcdd87ec8db91ea13bd8f1328d44947e88beae21e8d5e9"},
- {file = "mypy-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:2888ce4fe5aae5a673386fa232473014056967f3904f5abfcf6367b5af1f612a"},
- {file = "mypy-1.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:19ba15f9627a5723e522d007fe708007bae52b93faab00f95d72f03e1afa9598"},
- {file = "mypy-1.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:59bbd71e5c58eed2e992ce6523180e03c221dcd92b52f0e792f291d67b15a71c"},
- {file = "mypy-1.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9401e33814cec6aec8c03a9548e9385e0e228fc1b8b0a37b9ea21038e64cdd8a"},
- {file = "mypy-1.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b398d8b1f4fba0e3c6463e02f8ad3346f71956b92287af22c9b12c3ec965a9f"},
- {file = "mypy-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:69b35d1dcb5707382810765ed34da9db47e7f95b3528334a3c999b0c90fe523f"},
- {file = "mypy-1.1.1-py3-none-any.whl", hash = "sha256:4e4e8b362cdf99ba00c2b218036002bdcdf1e0de085cdb296a49df03fb31dfc4"},
- {file = "mypy-1.1.1.tar.gz", hash = "sha256:ae9ceae0f5b9059f33dbc62dea087e942c0ccab4b7a003719cb70f9b8abfa32f"},
+ {file = "mypy-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:566e72b0cd6598503e48ea610e0052d1b8168e60a46e0bfd34b3acf2d57f96a8"},
+ {file = "mypy-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ca637024ca67ab24a7fd6f65d280572c3794665eaf5edcc7e90a866544076878"},
+ {file = "mypy-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dde1d180cd84f0624c5dcaaa89c89775550a675aff96b5848de78fb11adabcd"},
+ {file = "mypy-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8c4d8e89aa7de683e2056a581ce63c46a0c41e31bd2b6d34144e2c80f5ea53dc"},
+ {file = "mypy-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:bfdca17c36ae01a21274a3c387a63aa1aafe72bff976522886869ef131b937f1"},
+ {file = "mypy-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7549fbf655e5825d787bbc9ecf6028731973f78088fbca3a1f4145c39ef09462"},
+ {file = "mypy-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:98324ec3ecf12296e6422939e54763faedbfcc502ea4a4c38502082711867258"},
+ {file = "mypy-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:141dedfdbfe8a04142881ff30ce6e6653c9685b354876b12e4fe6c78598b45e2"},
+ {file = "mypy-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8207b7105829eca6f3d774f64a904190bb2231de91b8b186d21ffd98005f14a7"},
+ {file = "mypy-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:16f0db5b641ba159eff72cff08edc3875f2b62b2fa2bc24f68c1e7a4e8232d01"},
+ {file = "mypy-1.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:470c969bb3f9a9efcedbadcd19a74ffb34a25f8e6b0e02dae7c0e71f8372f97b"},
+ {file = "mypy-1.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5952d2d18b79f7dc25e62e014fe5a23eb1a3d2bc66318df8988a01b1a037c5b"},
+ {file = "mypy-1.4.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:190b6bab0302cec4e9e6767d3eb66085aef2a1cc98fe04936d8a42ed2ba77bb7"},
+ {file = "mypy-1.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9d40652cc4fe33871ad3338581dca3297ff5f2213d0df345bcfbde5162abf0c9"},
+ {file = "mypy-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:01fd2e9f85622d981fd9063bfaef1aed6e336eaacca00892cd2d82801ab7c042"},
+ {file = "mypy-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2460a58faeea905aeb1b9b36f5065f2dc9a9c6e4c992a6499a2360c6c74ceca3"},
+ {file = "mypy-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2746d69a8196698146a3dbe29104f9eb6a2a4d8a27878d92169a6c0b74435b6"},
+ {file = "mypy-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ae704dcfaa180ff7c4cfbad23e74321a2b774f92ca77fd94ce1049175a21c97f"},
+ {file = "mypy-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:43d24f6437925ce50139a310a64b2ab048cb2d3694c84c71c3f2a1626d8101dc"},
+ {file = "mypy-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c482e1246726616088532b5e964e39765b6d1520791348e6c9dc3af25b233828"},
+ {file = "mypy-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:43b592511672017f5b1a483527fd2684347fdffc041c9ef53428c8dc530f79a3"},
+ {file = "mypy-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34a9239d5b3502c17f07fd7c0b2ae6b7dd7d7f6af35fbb5072c6208e76295816"},
+ {file = "mypy-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5703097c4936bbb9e9bce41478c8d08edd2865e177dc4c52be759f81ee4dd26c"},
+ {file = "mypy-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e02d700ec8d9b1859790c0475df4e4092c7bf3272a4fd2c9f33d87fac4427b8f"},
+ {file = "mypy-1.4.1-py3-none-any.whl", hash = "sha256:45d32cec14e7b97af848bddd97d85ea4f0db4d5a149ed9676caa4eb2f7402bb4"},
+ {file = "mypy-1.4.1.tar.gz", hash = "sha256:9bbcd9ab8ea1f2e1c8031c21445b511442cc45c89951e49bbf852cbb70755b1b"},
]
[package.dependencies]
mypy-extensions = ">=1.0.0"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""}
-typing-extensions = ">=3.10"
+typing-extensions = ">=4.1.0"
[package.extras]
dmypy = ["psutil (>=4.0)"]
@@ -439,57 +444,57 @@ files = [
[[package]]
name = "packaging"
-version = "23.0"
+version = "23.1"
description = "Core utilities for Python packages"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"},
- {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"},
+ {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"},
+ {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"},
]
[[package]]
name = "pathspec"
-version = "0.11.1"
+version = "0.11.2"
description = "Utility library for gitignore style pattern matching of file paths."
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"},
- {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"},
+ {file = "pathspec-0.11.2-py3-none-any.whl", hash = "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20"},
+ {file = "pathspec-0.11.2.tar.gz", hash = "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"},
]
[[package]]
name = "platformdirs"
-version = "3.2.0"
+version = "3.10.0"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "platformdirs-3.2.0-py3-none-any.whl", hash = "sha256:ebe11c0d7a805086e99506aa331612429a72ca7cd52a1f0d277dc4adc20cb10e"},
- {file = "platformdirs-3.2.0.tar.gz", hash = "sha256:d5b638ca397f25f979350ff789db335903d7ea010ab28903f57b27e1b16c2b08"},
+ {file = "platformdirs-3.10.0-py3-none-any.whl", hash = "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d"},
+ {file = "platformdirs-3.10.0.tar.gz", hash = "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d"},
]
[package.dependencies]
-typing-extensions = {version = ">=4.5", markers = "python_version < \"3.8\""}
+typing-extensions = {version = ">=4.7.1", markers = "python_version < \"3.8\""}
[package.extras]
-docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"]
-test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.2.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"]
+docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"]
+test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"]
[[package]]
name = "pluggy"
-version = "1.0.0"
+version = "1.2.0"
description = "plugin and hook calling mechanisms for python"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"},
- {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"},
+ {file = "pluggy-1.2.0-py3-none-any.whl", hash = "sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849"},
+ {file = "pluggy-1.2.0.tar.gz", hash = "sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3"},
]
[package.dependencies]
@@ -537,14 +542,14 @@ files = [
[[package]]
name = "pysen"
-version = "0.10.3"
+version = "0.10.5"
description = "Python linting made easy. Also a casual yet honorific way to address individuals who have entered an organization prior to you."
category = "dev"
optional = false
python-versions = "*"
files = [
- {file = "pysen-0.10.3-py3-none-any.whl", hash = "sha256:08e79588bf0aa1e9233554d84ac277679d4998dede7768bc7ca8cc5fee6fc846"},
- {file = "pysen-0.10.3.tar.gz", hash = "sha256:3023f48789a90fe660bcacc59a2e57d62297a04c50222ac5d72aff254b53e55c"},
+ {file = "pysen-0.10.5-py3-none-any.whl", hash = "sha256:4e8a83263f04585807e3754622bb635d4a0ccd88ec1a4f324e8c9efba300237f"},
+ {file = "pysen-0.10.5.tar.gz", hash = "sha256:61a6674e0b8a0c6b837b878310bd4117c5d4108dd95db572caa31c5b311d85bb"},
]
[package.dependencies]
@@ -585,14 +590,14 @@ testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xm
[[package]]
name = "smmap"
-version = "5.0.0"
+version = "5.0.1"
description = "A pure Python implementation of a sliding window memory map manager"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"},
- {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"},
+ {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"},
+ {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"},
]
[[package]]
@@ -621,60 +626,77 @@ files = [
[[package]]
name = "tomlkit"
-version = "0.11.7"
+version = "0.12.1"
description = "Style preserving TOML library"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "tomlkit-0.11.7-py3-none-any.whl", hash = "sha256:5325463a7da2ef0c6bbfefb62a3dc883aebe679984709aee32a317907d0a8d3c"},
- {file = "tomlkit-0.11.7.tar.gz", hash = "sha256:f392ef70ad87a672f02519f99967d28a4d3047133e2d1df936511465fbb3791d"},
+ {file = "tomlkit-0.12.1-py3-none-any.whl", hash = "sha256:712cbd236609acc6a3e2e97253dfc52d4c2082982a88f61b640ecf0817eab899"},
+ {file = "tomlkit-0.12.1.tar.gz", hash = "sha256:38e1ff8edb991273ec9f6181244a6a391ac30e9f5098e7535640ea6be97a7c86"},
]
[[package]]
name = "typed-ast"
-version = "1.5.4"
+version = "1.5.5"
description = "a fork of Python 2 and 3 ast modules with type comment support"
category = "dev"
optional = false
python-versions = ">=3.6"
files = [
- {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"},
- {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"},
- {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"},
- {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"},
- {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"},
- {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"},
- {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"},
- {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"},
- {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"},
- {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"},
- {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"},
- {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"},
- {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"},
- {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"},
- {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"},
- {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"},
- {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"},
- {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"},
- {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"},
- {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"},
- {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"},
- {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"},
- {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"},
- {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"},
+ {file = "typed_ast-1.5.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4bc1efe0ce3ffb74784e06460f01a223ac1f6ab31c6bc0376a21184bf5aabe3b"},
+ {file = "typed_ast-1.5.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5f7a8c46a8b333f71abd61d7ab9255440d4a588f34a21f126bbfc95f6049e686"},
+ {file = "typed_ast-1.5.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:597fc66b4162f959ee6a96b978c0435bd63791e31e4f410622d19f1686d5e769"},
+ {file = "typed_ast-1.5.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d41b7a686ce653e06c2609075d397ebd5b969d821b9797d029fccd71fdec8e04"},
+ {file = "typed_ast-1.5.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5fe83a9a44c4ce67c796a1b466c270c1272e176603d5e06f6afbc101a572859d"},
+ {file = "typed_ast-1.5.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d5c0c112a74c0e5db2c75882a0adf3133adedcdbfd8cf7c9d6ed77365ab90a1d"},
+ {file = "typed_ast-1.5.5-cp310-cp310-win_amd64.whl", hash = "sha256:e1a976ed4cc2d71bb073e1b2a250892a6e968ff02aa14c1f40eba4f365ffec02"},
+ {file = "typed_ast-1.5.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c631da9710271cb67b08bd3f3813b7af7f4c69c319b75475436fcab8c3d21bee"},
+ {file = "typed_ast-1.5.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b445c2abfecab89a932b20bd8261488d574591173d07827c1eda32c457358b18"},
+ {file = "typed_ast-1.5.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc95ffaaab2be3b25eb938779e43f513e0e538a84dd14a5d844b8f2932593d88"},
+ {file = "typed_ast-1.5.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61443214d9b4c660dcf4b5307f15c12cb30bdfe9588ce6158f4a005baeb167b2"},
+ {file = "typed_ast-1.5.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6eb936d107e4d474940469e8ec5b380c9b329b5f08b78282d46baeebd3692dc9"},
+ {file = "typed_ast-1.5.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e48bf27022897577d8479eaed64701ecaf0467182448bd95759883300ca818c8"},
+ {file = "typed_ast-1.5.5-cp311-cp311-win_amd64.whl", hash = "sha256:83509f9324011c9a39faaef0922c6f720f9623afe3fe220b6d0b15638247206b"},
+ {file = "typed_ast-1.5.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:44f214394fc1af23ca6d4e9e744804d890045d1643dd7e8229951e0ef39429b5"},
+ {file = "typed_ast-1.5.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:118c1ce46ce58fda78503eae14b7664163aa735b620b64b5b725453696f2a35c"},
+ {file = "typed_ast-1.5.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be4919b808efa61101456e87f2d4c75b228f4e52618621c77f1ddcaae15904fa"},
+ {file = "typed_ast-1.5.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fc2b8c4e1bc5cd96c1a823a885e6b158f8451cf6f5530e1829390b4d27d0807f"},
+ {file = "typed_ast-1.5.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:16f7313e0a08c7de57f2998c85e2a69a642e97cb32f87eb65fbfe88381a5e44d"},
+ {file = "typed_ast-1.5.5-cp36-cp36m-win_amd64.whl", hash = "sha256:2b946ef8c04f77230489f75b4b5a4a6f24c078be4aed241cfabe9cbf4156e7e5"},
+ {file = "typed_ast-1.5.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2188bc33d85951ea4ddad55d2b35598b2709d122c11c75cffd529fbc9965508e"},
+ {file = "typed_ast-1.5.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0635900d16ae133cab3b26c607586131269f88266954eb04ec31535c9a12ef1e"},
+ {file = "typed_ast-1.5.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57bfc3cf35a0f2fdf0a88a3044aafaec1d2f24d8ae8cd87c4f58d615fb5b6311"},
+ {file = "typed_ast-1.5.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:fe58ef6a764de7b4b36edfc8592641f56e69b7163bba9f9c8089838ee596bfb2"},
+ {file = "typed_ast-1.5.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d09d930c2d1d621f717bb217bf1fe2584616febb5138d9b3e8cdd26506c3f6d4"},
+ {file = "typed_ast-1.5.5-cp37-cp37m-win_amd64.whl", hash = "sha256:d40c10326893ecab8a80a53039164a224984339b2c32a6baf55ecbd5b1df6431"},
+ {file = "typed_ast-1.5.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fd946abf3c31fb50eee07451a6aedbfff912fcd13cf357363f5b4e834cc5e71a"},
+ {file = "typed_ast-1.5.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ed4a1a42df8a3dfb6b40c3d2de109e935949f2f66b19703eafade03173f8f437"},
+ {file = "typed_ast-1.5.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:045f9930a1550d9352464e5149710d56a2aed23a2ffe78946478f7b5416f1ede"},
+ {file = "typed_ast-1.5.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:381eed9c95484ceef5ced626355fdc0765ab51d8553fec08661dce654a935db4"},
+ {file = "typed_ast-1.5.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bfd39a41c0ef6f31684daff53befddae608f9daf6957140228a08e51f312d7e6"},
+ {file = "typed_ast-1.5.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8c524eb3024edcc04e288db9541fe1f438f82d281e591c548903d5b77ad1ddd4"},
+ {file = "typed_ast-1.5.5-cp38-cp38-win_amd64.whl", hash = "sha256:7f58fabdde8dcbe764cef5e1a7fcb440f2463c1bbbec1cf2a86ca7bc1f95184b"},
+ {file = "typed_ast-1.5.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:042eb665ff6bf020dd2243307d11ed626306b82812aba21836096d229fdc6a10"},
+ {file = "typed_ast-1.5.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:622e4a006472b05cf6ef7f9f2636edc51bda670b7bbffa18d26b255269d3d814"},
+ {file = "typed_ast-1.5.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1efebbbf4604ad1283e963e8915daa240cb4bf5067053cf2f0baadc4d4fb51b8"},
+ {file = "typed_ast-1.5.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0aefdd66f1784c58f65b502b6cf8b121544680456d1cebbd300c2c813899274"},
+ {file = "typed_ast-1.5.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:48074261a842acf825af1968cd912f6f21357316080ebaca5f19abbb11690c8a"},
+ {file = "typed_ast-1.5.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:429ae404f69dc94b9361bb62291885894b7c6fb4640d561179548c849f8492ba"},
+ {file = "typed_ast-1.5.5-cp39-cp39-win_amd64.whl", hash = "sha256:335f22ccb244da2b5c296e6f96b06ee9bed46526db0de38d2f0e5a6597b81155"},
+ {file = "typed_ast-1.5.5.tar.gz", hash = "sha256:94282f7a354f36ef5dbce0ef3467ebf6a258e370ab33d5b40c249fa996e590dd"},
]
[[package]]
name = "typing-extensions"
-version = "4.5.0"
+version = "4.7.1"
description = "Backported and Experimental Type Hints for Python 3.7+"
category = "main"
optional = false
python-versions = ">=3.7"
files = [
- {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"},
- {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"},
+ {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"},
+ {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"},
]
[[package]]
diff --git a/nnoir/pyproject.toml b/nnoir/pyproject.toml
index 0f6f3c8..5ad5a7e 100644
--- a/nnoir/pyproject.toml
+++ b/nnoir/pyproject.toml
@@ -39,15 +39,21 @@ requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.pysen]
-version = "0.10"
+version = "0.10.5"
[tool.pysen.lint]
enable_black = true
enable_flake8 = false # disabled due to too many errors
enable_isort = true
-enable_mypy = false # disabled because currently pysen doesn't support mypy ^1.0
+enable_mypy = true
mypy_preset = "strict"
line_length = 128
py_version = "py37"
+
[[tool.pysen.lint.mypy_targets]]
paths = ["."]
+
+[tool.mypy]
+[[tool.mypy.overrides]]
+module = "msgpack.*"
+ignore_missing_imports = true
\ No newline at end of file