microsoft / onnxruntime

ONNX Runtime: cross-platform, high performance ML inferencing and training accelerator
https://onnxruntime.ai
MIT License
14.64k stars 2.93k forks source link

Fix spelling errors #7841

Closed m-roberts closed 3 years ago

m-roberts commented 3 years ago

Found a few around the place. Struggling to do a clone of a fork right now, so here is my patch:

diff --git a/include/onnxruntime/core/framework/execution_provider.h b/include/onnxruntime/core/framework/execution_provider.h
index 2b7944f..55d23df 100644
--- a/include/onnxruntime/core/framework/execution_provider.h
+++ b/include/onnxruntime/core/framework/execution_provider.h
@@ -216,7 +216,7 @@ class IExecutionProvider {
                                  std::vector<NodeComputeInfo>& node_compute_funcs);
 #endif

-  // Fusion approach that is suppported
+  // Fusion approach that is supported
   enum class FusionStyle {
     // The node fusion will create an onnxruntime::Function based Node that contains a completely new Graph instance
     // in the Node body. The original nodes and initializers are copied to the new Graph instance in Function::Body().
diff --git a/js/web/lib/onnxjs/backends/webgl/ops/binary-op.ts b/js/web/lib/onnxjs/backends/webgl/ops/binary-op.ts
index 3bb0f0e..02f2e77 100644
--- a/js/web/lib/onnxjs/backends/webgl/ops/binary-op.ts
+++ b/js/web/lib/onnxjs/backends/webgl/ops/binary-op.ts
@@ -38,7 +38,7 @@ export class WebGLBinaryOp extends BinaryOp implements WebGLOperator {
       const aBcast = inputs[0].dims.length !== 0 ? 'bcastIndices_A(indices, aindices);' : 'aindices[0] = 0;';
       const bBcast = inputs[1].dims.length !== 0 ? 'bcastIndices_B(indices, bindices);' : 'bindices[0] = 0;';

-      // TODO: for packed tensors, we need to implement logic to caculate textCoords for broadcast tensor
+      // TODO: for packed tensors, we need to implement logic to calculate textCoords for broadcast tensor
       const shaderSource = `
       ${this.glslFunc.body}
       float process(int indices[${outputRank}]) {
diff --git a/onnxruntime/core/framework/op_node_proto_helper.cc b/onnxruntime/core/framework/op_node_proto_helper.cc
index cd3dfc6..d6c609f 100644
--- a/onnxruntime/core/framework/op_node_proto_helper.cc
+++ b/onnxruntime/core/framework/op_node_proto_helper.cc
@@ -84,7 +84,7 @@ inline constexpr int ArrayTypeToAttributeType<std::string>() {
       return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "No attribute with name:'", name, "'is defined."); \
     }                                                                                              \
     if (!HasTyped<T>(attr)) {                                                                      \
-      return Status(ONNXRUNTIME, FAIL, "Attibute name and type don't match");                      \
+      return Status(ONNXRUNTIME, FAIL, "Attribute name and type don't match");                      \
     } else {                                                                                       \
       *value = static_cast<T>(attr->type());                                                       \
       return Status::OK();                                                                         \
diff --git a/onnxruntime/core/optimizer/attention_fusion.cc b/onnxruntime/core/optimizer/attention_fusion.cc
index b10b16a..f7a7966 100644
--- a/onnxruntime/core/optimizer/attention_fusion.cc
+++ b/onnxruntime/core/optimizer/attention_fusion.cc
@@ -627,7 +627,7 @@ bool AttentionFusion::FuseSubGraph(Node& layer_norm, const Node& add_after_layer

   std::vector<const Node::EdgeEnd*> edges;
   if (!graph_utils::FindPath(add_after_layer_norm, true, parent_path, edges, logger)) {
-    DEBUG_LOG("Faild to find path v");
+    DEBUG_LOG("Failed to find path v");
     return false;
   }

diff --git a/onnxruntime/core/optimizer/attention_fusion_helper.h b/onnxruntime/core/optimizer/attention_fusion_helper.h
index 97a52bb..892642a 100644
--- a/onnxruntime/core/optimizer/attention_fusion_helper.h
+++ b/onnxruntime/core/optimizer/attention_fusion_helper.h
@@ -82,7 +82,7 @@ bool MatchGemmSubgraph(Graph& graph,

   std::vector<const Node::EdgeEnd*> edges;
   if (!graph_utils::FindPath(node_after_gemm_reshape, true, parent_path, edges, logger)) {
-    DEBUG_LOG("Faild to match gemm path");
+    DEBUG_LOG("Failed to match gemm path");
     return false;
   }

@@ -142,7 +142,7 @@ bool MatchGemmSubgraph(Graph& graph,
   // Find the concat node for Gather paths.
   std::vector<graph_utils::EdgeEndToMatch> edge_to_match{{0, 1, "Concat", {4, 11, 13}, kOnnxDomain}};
   if (!graph_utils::FindPath(reshape_after_gemm, true, edge_to_match, edges, logger)) {
-    DEBUG_LOG("Faild to match concat node for Gather paths");
+    DEBUG_LOG("Failed to match concat node for Gather paths");
     return false;
   }

@@ -168,7 +168,7 @@ bool MatchGemmSubgraph(Graph& graph,
         {0, 0, "Shape", {1, 13}, kOnnxDomain}};

     if (!graph_utils::FindPath(concat_after_gather, true, gather_path1, edges, logger)) {
-      DEBUG_LOG("Faild to match gemm gather path");
+      DEBUG_LOG("Failed to match gemm gather path");
       return false;
     }

@@ -371,7 +371,7 @@ bool MatchUnidirMaskSubgraph(const Graph& graph, const Node& add_node, MatchUnid

   std::vector<const Node::EdgeEnd*> edges;
   if (!graph_utils::FindPath(add_node, true, root_path, edges, logger)) {
-    DEBUG_LOG("Faild to match the path (Div-->Where-->Add) for unidirectional mask");
+    DEBUG_LOG("Failed to match the path (Div-->Where-->Add) for unidirectional mask");
     return false;
   }

@@ -394,7 +394,7 @@ bool MatchUnidirMaskSubgraph(const Graph& graph, const Node& add_node, MatchUnid
       {0, 0, "Shape", {1, 13}, kOnnxDomain}};

   if (!graph_utils::FindPath(where_node, true, path1, edges, logger)) {
-    DEBUG_LOG("Faild to match path 1 for unidirectional mask");
+    DEBUG_LOG("Failed to match path 1 for unidirectional mask");
     return false;
   }

@@ -451,7 +451,7 @@ bool MatchUnidirMaskSubgraph(const Graph& graph, const Node& add_node, MatchUnid

   if (!graph_utils::FindPath(last_slice, true, slice_ends_path, edges, logger) ||
       edges[1]->GetNode().Index() != squeeze1.Index()) {
-    DEBUG_LOG("Faild to match path 2 for unidirectional mask");
+    DEBUG_LOG("Failed to match path 2 for unidirectional mask");
     return false;
   }

@@ -463,7 +463,7 @@ bool MatchUnidirMaskSubgraph(const Graph& graph, const Node& add_node, MatchUnid

   if (!graph_utils::FindPath(mask_slice, true, slice_ends_path, edges, logger) ||
       edges[1]->GetNode().Index() != squeeze1.Index()) {
-    DEBUG_LOG("Faild to match path 3 for unidirectional mask");
+    DEBUG_LOG("Failed to match path 3 for unidirectional mask");
     return false;
   }

@@ -479,7 +479,7 @@ bool MatchUnidirMaskSubgraph(const Graph& graph, const Node& add_node, MatchUnid
       {0, 0, "Shape", {1, 13}, kOnnxDomain}};

   if (!graph_utils::FindPath(sub, true, path4, edges, logger)) {
-    DEBUG_LOG("Faild to match path 4 for unidirectional mask");
+    DEBUG_LOG("Failed to match path 4 for unidirectional mask");
     return false;
   }

@@ -1302,7 +1302,7 @@ bool FuseGptAttention(Node& layer_norm, Graph& graph, int64_t hidden_size, std::

   std::vector<const Node::EdgeEnd*> edges;
   if (!graph_utils::FindPath(*gemm1_result.input_node, true, path1, edges, logger)) {
-    DEBUG_LOG("Faild to find path to qkv_matmul");
+    DEBUG_LOG("Failed to find path to qkv_matmul");
     return false;
   }

@@ -1323,7 +1323,7 @@ bool FuseGptAttention(Node& layer_norm, Graph& graph, int64_t hidden_size, std::
       {2, 0, "Split", {2, 11, 13}, kOnnxDomain}};

   if (!graph_utils::FindPath(has_past ? *v_concat : qkv_matmul, true, path2, edges, logger)) {
-    DEBUG_LOG("Faild to find path v to Split");
+    DEBUG_LOG("Failed to find path v to Split");
     return false;
   }

diff --git a/onnxruntime/core/providers/cpu/controlflow/if.cc b/onnxruntime/core/providers/cpu/controlflow/if.cc
index df34f67..be07186 100644
--- a/onnxruntime/core/providers/cpu/controlflow/if.cc
+++ b/onnxruntime/core/providers/cpu/controlflow/if.cc
@@ -298,7 +298,7 @@ Status IfImpl::AllocateOutputTensors() {
       outputs_.push_back({AllocationType::IfOutput, *context_.GetOutputMLValue(index)});
     } else {
       // Shouldn't hit this
-      return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Only tensors or sequence of tensors are suppported");
+      return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Only tensors or sequence of tensors are supported");
     }

     ++index;
diff --git a/onnxruntime/core/providers/cpu/sequence/sequence_ops.cc b/onnxruntime/core/providers/cpu/sequence/sequence_ops.cc
index 3ed6b89..db363c1 100644
--- a/onnxruntime/core/providers/cpu/sequence/sequence_ops.cc
+++ b/onnxruntime/core/providers/cpu/sequence/sequence_ops.cc
@@ -311,7 +311,7 @@ Status SequenceConstruct::Compute(OpKernelContext* context) const {
     const auto* X = context->Input<Tensor>(input_idx);
     if (input_idx > 0 && X->DataType() != first_dtype) {
       ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
-                      "Violation of the requirment that all input tensors must have the same data type.");
+                      "Violation of the requirement that all input tensors must have the same data type.");
     }
   }

diff --git a/onnxruntime/core/providers/cpu/tensor/unique.cc b/onnxruntime/core/providers/cpu/tensor/unique.cc
index 7232fc9..6d8b549 100644
--- a/onnxruntime/core/providers/cpu/tensor/unique.cc
+++ b/onnxruntime/core/providers/cpu/tensor/unique.cc
@@ -53,7 +53,7 @@ ONNX_OPERATOR_SET_SCHEMA(
             1,
             "indices",
             "A 1-D INT64 tensor "
-            "containing indices of 'Y' elements' first occurance in 'X'. "
+            "containing indices of 'Y' elements' first occurrence in 'X'. "
             "When 'axis' is provided, it contains indices to subtensors in input 'X' on the 'axis'. "
             "When 'axis' is not provided, it contains indices to values in the flattened input tensor. ",
             "tensor(int64)",
diff --git a/onnxruntime/core/providers/openvino/ov_versions/capability_2020_3.cc b/onnxruntime/core/providers/openvino/ov_versions/capability_2020_3.cc
index 5bde56e..6dc2963 100644
--- a/onnxruntime/core/providers/openvino/ov_versions/capability_2020_3.cc
+++ b/onnxruntime/core/providers/openvino/ov_versions/capability_2020_3.cc
@@ -224,7 +224,7 @@ static bool IsUnsupportedOpMode(const Node* node, const GraphViewer& graph_viewe
       return true;
   } else if (optype == "TopK") {
     //TopK opset 10 is currently not supported.
-    //K as input is currently not suppported.
+    //K as input is currently not supported.
     return node->InputDefs().size() > 1;
   } else if (optype == "ReduceMin") {
     //Only FP32, INT32 and U8 data types are supported
diff --git a/onnxruntime/core/providers/rknpu/shaper.h b/onnxruntime/core/providers/rknpu/shaper.h
index ac093e1..9a60912 100644
--- a/onnxruntime/core/providers/rknpu/shaper.h
+++ b/onnxruntime/core/providers/rknpu/shaper.h
@@ -10,7 +10,7 @@ namespace onnxruntime {
 namespace rknpu {

 /**
- * Help to caculate the output shape of nodes.
+ * Help to calculate the output shape of nodes.
  */
 class Shaper {
  public:
diff --git a/onnxruntime/python/onnxruntime_pybind_state.cc b/onnxruntime/python/onnxruntime_pybind_state.cc
index 42d4858..a8def44 100644
--- a/onnxruntime/python/onnxruntime_pybind_state.cc
+++ b/onnxruntime/python/onnxruntime_pybind_state.cc
@@ -1362,7 +1362,7 @@ void addObjectMethods(py::module& m, Environment& env) {
       .value("CPU", OrtMemType::OrtMemTypeCPU)
       .value("DEFAULT", OrtMemType::OrtMemTypeDefault);

-  py::class_<OrtDevice> device(m, "OrtDevice", R"pbdoc(ONNXRuntime device informaion.)pbdoc");
+  py::class_<OrtDevice> device(m, "OrtDevice", R"pbdoc(ONNXRuntime device information.)pbdoc");
   device.def(py::init<OrtDevice::DeviceType, OrtDevice::MemoryType, OrtDevice::DeviceId>())
       .def("device_id", &OrtDevice::Id, R"pbdoc(Device Id.)pbdoc")
       .def("device_type", &OrtDevice::Type, R"pbdoc(Device Type.)pbdoc")
diff --git a/orttraining/orttraining/core/framework/pipeline.cc b/orttraining/orttraining/core/framework/pipeline.cc
index 0ff29bd..5fc8bf9 100644
--- a/orttraining/orttraining/core/framework/pipeline.cc
+++ b/orttraining/orttraining/core/framework/pipeline.cc
@@ -190,7 +190,7 @@ std::vector<int> PipelineScheduler::FindForwardComputeTime(const std::vector<int
       }

       if (s > 0 && t <= forward_time.at(s - 1)) {
-        // Foward of the s-th stage must happen after forward of (s-1)-th stage.
+        // Forward of the s-th stage must happen after forward of (s-1)-th stage.
         // Note that forward_time[s] is the time slot of the s-th stage.
         continue;
       }
diff --git a/orttraining/orttraining/python/training/ortmodule/_execution_agent.py b/orttraining/orttraining/python/training/ortmodule/_execution_agent.py
index 878b073..3cf5a6e 100644
--- a/orttraining/orttraining/python/training/ortmodule/_execution_agent.py
+++ b/orttraining/orttraining/python/training/ortmodule/_execution_agent.py
@@ -82,7 +82,7 @@ class TrainingAgent(object):
                  providers=None, provider_options=None):
         """
         :param path_or_bytes: filename or serialized ONNX or ORT format model in a byte string
-        :param fw_feed_names: Feed names for foward pass.
+        :param fw_feed_names: Feed names for forward pass.
         :param fw_outputs_device_info: Device info for fetches in forward pass.
         :param bw_fetches_names: Fetch names for backward pass.
         :param bw_outputs_device_info: Device info for fetches in backward pass.
codemzs commented 3 years ago

Hi @m-roberts Thanks for fixing spelling mistakes but please open a PR according to these guidelines to submit the change.

m-roberts commented 3 years ago

As I said, I am unable to do so.

codemzs commented 3 years ago

No problem, feel free to open a PR per the guidelines whenever you are able to do so. We don't accept patches over github issues but thank you again for the patch.