Skip to content

Commit 1270c78

Browse files
oulgenpytorchmergebot
authored andcommitted
Add logging for num_triton_bundles (pytorch#139807)
Summary: Adding logs for number of inductor cache triton bundles Test Plan: Ran adhoc code and looked at dynamo_compile/sandbox https://fburl.com/scuba/dynamo_compile/sandbox/nhktfy19 Differential Revision: D65490826 Pull Request resolved: pytorch#139807 Approved by: https://github.com/masnesral
1 parent 9018326 commit 1270c78

File tree

3 files changed

+14
-0
lines changed

3 files changed

+14
-0
lines changed

torch/_dynamo/convert_frame.py

+5
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,7 @@
105105
from .trace_rules import is_numpy
106106
from .utils import (
107107
CleanupManager,
108+
codecache_metrics,
108109
CompilationMetrics,
109110
counters,
110111
dynamo_timed,
@@ -973,6 +974,7 @@ def format_guard_failures() -> str:
973974
fail_user_frame_lineno: Optional[int] = None
974975
torch._dynamo.utils.ReinplaceCounters.clear()
975976
guarded_code = None
977+
codecache_metrics.clear()
976978
try:
977979
guarded_code = compile_inner(code, one_graph, hooks, transform)
978980
return guarded_code
@@ -1058,6 +1060,7 @@ def format_guard_failures() -> str:
10581060
remote_fx_graph_cache_put_time = frame_phase_timing[frame_key].get(
10591061
"remote_fx_graph_cache_put", None
10601062
)
1063+
num_triton_bundles = codecache_metrics.get("num_triton_bundles", None)
10611064
torch._dynamo.utils.ReinplaceCounters.log()
10621065

10631066
else:
@@ -1078,6 +1081,7 @@ def format_guard_failures() -> str:
10781081
remote_cache_time_saved = None
10791082
remote_fx_graph_cache_get_time = None
10801083
remote_fx_graph_cache_put_time = None
1084+
num_triton_bundles = None
10811085

10821086
structured_logging_overhead_s = (
10831087
torch._logging.get_structured_logging_overhead()
@@ -1146,6 +1150,7 @@ def clean_for_json(d: Dict[str, Any]) -> Dict[str, Any]:
11461150
config.specialize_float,
11471151
json.dumps(config_dict),
11481152
True, # is_forward
1153+
num_triton_bundles,
11491154
to_int_ms(remote_fx_graph_cache_get_time),
11501155
to_int_ms(remote_fx_graph_cache_put_time),
11511156
start_time_us=start_time_ns // 1000,

torch/_dynamo/utils.py

+6
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,8 @@
144144
lambda: collections.defaultdict(float)
145145
)
146146

147+
codecache_metrics: Counter[str] = collections.Counter()
148+
147149
timer_counter = itertools.count()
148150

149151

@@ -419,6 +421,9 @@ def dynamo_timed(
419421
remote_cache_time_saved_s=remote_cache_time_saved,
420422
structured_logging_overhead_s=structured_logging_overhead_s,
421423
is_forward=False, # is_forward
424+
num_triton_bundles=codecache_metrics.get(
425+
"num_triton_bundles", None
426+
),
422427
remote_fx_graph_cache_get_time_ms=to_int_ms(
423428
remote_fx_graph_cache_get_time
424429
),
@@ -899,6 +904,7 @@ class CompilationMetrics:
899904
specialize_float: Optional[bool] = None
900905
dynamo_config: Optional[str] = None
901906
is_forward: Optional[bool] = None
907+
num_triton_bundles: Optional[int] = None
902908
remote_fx_graph_cache_get_time_ms: Optional[int] = None
903909
remote_fx_graph_cache_put_time_ms: Optional[int] = None
904910
start_time_us: Optional[int] = None

torch/_inductor/codecache.py

+3
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@
5555
from torch import SymInt, Tensor
5656
from torch._dynamo.utils import (
5757
add_remote_cache_time_saved,
58+
codecache_metrics,
5859
counters,
5960
dynamo_timed,
6061
get_chromium_event_logger,
@@ -1150,6 +1151,8 @@ def iterate_over_candidates() -> Generator[CompiledFxGraph, None, None]:
11501151
logger.add_event_data(
11511152
"inductor_compile", cached_kernel_names=meta.cached_kernel_names
11521153
)
1154+
if len(meta.cached_kernel_names) > 0:
1155+
codecache_metrics["num_triton_bundles"] += 1
11531156

11541157
inductor_meta = autotune_cache.inductor_meta_from_config()
11551158
AutotuneCacheBundler.begin_compile(inductor_meta, code=code)

0 commit comments

Comments
 (0)