Skip to content

Commit

Permalink
fix typo
Browse files Browse the repository at this point in the history
  • Loading branch information
kiszk committed Apr 30, 2024
1 parent e0d2c24 commit 99b70e1
Show file tree
Hide file tree
Showing 9 changed files with 11 additions and 11 deletions.
4 changes: 2 additions & 2 deletions torch/_inductor/codecache.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ def write(
specified_dir: str = "",
) -> Tuple[str, str]:
# use striped content to compute hash so we don't end up with different
# hashes just because the content begins/ends with differnet number of
# hashes just because the content begins/ends with different number of
# spaces.
key: str = get_hash(content.strip(), extra, hash_type)
basename, subdir, path = get_path(key, extension, specified_dir)
Expand Down Expand Up @@ -1289,7 +1289,7 @@ def pick_vec_isa() -> VecISA:
if not _valid_vec_isa_list:
return invalid_vec_isa

# If the simdlen is None, it indicates determin the vectorization length automatically
# If the simdlen is None, it indicates determine the vectorization length automatically
if config.cpp.simdlen is None:
assert _valid_vec_isa_list
return _valid_vec_isa_list[0]
Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

def _arg_str(a):
if isinstance(a, sympy.Expr):
# If this return value containting the _MAGIC_SYMPY_ERROR_STRING
# If this return value containing the _MAGIC_SYMPY_ERROR_STRING
# is used as part of the final generated C++ code,
# a CUTLASSEVTOpNotImplementedError is raised to indicate that
# the op could not be converted to a valid EVT expression.
Expand Down
4 changes: 2 additions & 2 deletions torch/_inductor/codegen/multi_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def get_all_call_args(call_args_list):
It will fail if any kernel has the same argument passed in multiple times.
Check test_pass_same_arg_multi_times in test_multi_kernel.py
Instead, we pick the longest call args and assert that otehr call args are
Instead, we pick the longest call args and assert that other call args are
a subset of it.
"""
return _get_all_args(call_args_list)
Expand Down Expand Up @@ -128,7 +128,7 @@ def call{idx}(need_clone_args=False):
)

# add subkernel src code hashes to the multi-kernel source code so changing a
# subkernel implementation will result in a differnt py file for
# subkernel implementation will result in a different py file for
# multi-kernel. This makes cache implementation straightforward since
# we can decide cache file name based on multi-kernel py file name
# directly.
Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/codegen/triton.py
Original file line number Diff line number Diff line change
Expand Up @@ -1360,7 +1360,7 @@ def should_use_persistent_reduction(self) -> bool:
}.get(self.reduction_hint, 64)

# If multi_kernel is enabled, we do more aggressive persistent reduction.
# This may result in some persisent reductions slower than the
# This may result in some persistent reductions slower than the
# corresponding non-persistent reductions. MultiKernel will do benchmarking
# to pick the faster one.
if config.triton.multi_kernel:
Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/decomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def register_decomposition(ops):


# TODO: for now, inductor doesn't handle asserts
# because the condition is symbool -> tensor in the graph.
# because the condition is symbol -> tensor in the graph.
@register_decomposition([aten._assert_async.msg])
def assert_async_msg_decomp(tensor, msg):
return
Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/fx_passes/group_batch_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -753,7 +753,7 @@ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):

class BatchPointwiseOpsPreGradFusion(BatchPointwiseOpsFusionFactory):
"""
Batch poinwise ops (e.g., sigmoid, relu, tanh) fusion in pre grad pass.
Batch pointwise ops (e.g., sigmoid, relu, tanh) fusion in pre grad pass.
We fuse it in random place, and the introduced stack node may be merged in split cat.
"""

Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/ir.py
Original file line number Diff line number Diff line change
Expand Up @@ -1766,7 +1766,7 @@ def wrapper_fn(idx, reduction_idx):
)


# This signifies a scan op that should go through TritonSplitScanKernel codgen on CUDA.
# This signifies a scan op that should go through TritonSplitScanKernel codegen on CUDA.
@dataclasses.dataclass
class SplitScan(Scan):
pass
Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/lowering.py
Original file line number Diff line number Diff line change
Expand Up @@ -548,7 +548,7 @@ def _convert_element_type(x: TensorBox, dtype: torch.dtype):
if dtype.is_complex or x.get_dtype().is_complex:
if x.get_size():
# Decompose since aa aten fallback is more friendly for c++ codegen.
# This decompostion doesn't work for empty tensor, which needs more investigation.
# This decomposition doesn't work for empty tensor, which needs more investigation.
dst = empty_like(x, dtype=dtype)
ir.InplaceCopyFallback.create(dst, x)
return dst
Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/runtime/triton_heuristics.py
Original file line number Diff line number Diff line change
Expand Up @@ -721,7 +721,7 @@ def coordinate_descent_tuning(self, launcher, *args, **kwargs):
E.g., assuming regular autotune only get one config C1; while max-autotune get 4 configs C1, C2, C3, C4
and max-autotune figure out C3 is the best.
Then if coordinate descnt tuning is run with max-autotune disabled, it will start from C1;
Then if coordinate desecnt tuning is run with max-autotune disabled, it will start from C1;
while if coordinate descent tuning is run with max-autotune enabled, it will start from C3.
"""
if (
Expand Down

0 comments on commit 99b70e1

Please sign in to comment.