diff --git a/python/singa/autograd.py b/python/singa/autograd.py index 518d53a15..e3459018d 100644 --- a/python/singa/autograd.py +++ b/python/singa/autograd.py @@ -208,7 +208,7 @@ def backward(y, dy=None): if y_stores_grad and tensor_dep[x_id] == 0: # store the gradient for final return, e.g. for parameters. # it may cause a delay to yield. Only after src_op's all - # output tensors have recieved the gradients, then output + # output tensors have received the gradients, then output g = not_ready[src_op][y_idx] tg = Tensor(device=g.device(), data=g, diff --git a/python/singa/opt.py b/python/singa/opt.py index 015eea869..1e4528e8f 100755 --- a/python/singa/opt.py +++ b/python/singa/opt.py @@ -920,12 +920,12 @@ def backward_and_update_half(self, self.opt.step() def backward_and_partial_update(self, loss, threshold=2097152): - """Performs backward propagation from the loss and parameter update using asychronous training. + """Performs backward propagation from the loss and parameter update using asynchronous training. THIS IS A EXPERIMENTAL FUNCTION FOR RESEARCH PURPOSE: From the loss, it performs backward propagation to get the gradients and do the parameter update. It fuses the tensors smaller than the threshold value to reduce network latency, - as well as performing asychronous training where one parameter partition is all-reduced + as well as performing asynchronous training where one parameter partition is all-reduced per iteration. The size of the parameter partition depends on the threshold value. Args: diff --git a/python/singa/sonnx.py b/python/singa/sonnx.py index 01d704a38..625a30c29 100755 --- a/python/singa/sonnx.py +++ b/python/singa/sonnx.py @@ -1787,7 +1787,7 @@ def run_node(cls, node, inputs, device='CPU', opset_version=_opset_version): node.op_type, len(valid_inputs), len(inputs)) operator = cls._onnx_node_to_singa_op(node, opset_version) - # seperate weights with inputs, and init inputs as Tensor + # separate weights with inputs, and init inputs as Tensor weights = {} _inputs = [] for (key, val) in zip(valid_inputs, inputs):