Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Add docstring and typehint
Signed-off-by: Chang Liu (Enterprise Products) <9713593+chang-l@users.noreply.github.com>
  • Loading branch information
chang-l committed Nov 8, 2025
commit f3dbbb8c07730bafbe79d395eaae4a0093f3cb07
3 changes: 2 additions & 1 deletion tensorrt_llm/_torch/attention_backend/sparse/dsa.py
Original file line number Diff line number Diff line change
Expand Up @@ -574,7 +574,8 @@ def update_for_spec_dec(self):


@maybe_compile(dynamic=True)
def _scale(weights, q_scale, s):
def _scale(weights: torch.Tensor, q_scale: torch.Tensor,
s: float) -> torch.Tensor:
return weights * q_scale.squeeze(-1) * s


Expand Down
9 changes: 9 additions & 0 deletions tensorrt_llm/_torch/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,15 @@ def get_device_uuid(device_idx: int) -> str:


def maybe_compile(func=None, **compile_kwargs):
"""
Conditionally compile a function with torch.compile.
If is_piecewise_running() is True, the function will be compiled with torch.compile.
Args:
func: The function to decorate (optional, for direct decoration).
**compile_kwargs: Keyword arguments for torch.compile.
Returns:
The conditionally compiled function..
"""

def decorator(f):

Expand Down