Skip to content

Commit

Permalink
[ez] Use noquant to make dashboard logic easier (pytorch#1430)
Browse files Browse the repository at this point in the history
Summary:
A small fix for output json results for llama, sam, sam2 benchmarks to make the code
easier for calculating geomean speedup of autoquant v.s. noquant

Test Plan:
local test

Reviewers:

Subscribers:

Tasks:

Tags:
  • Loading branch information
jerryzh168 authored Dec 18, 2024
1 parent a5a53a2 commit a03ca99
Show file tree
Hide file tree
Showing 4 changed files with 3 additions and 5 deletions.
2 changes: 1 addition & 1 deletion examples/sam2_amg_server/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -658,7 +658,7 @@ def main(checkpoint_path,
headers = ["name", "dtype", "device", "arch", "metric", "actual", "target"]
name = "sam2-" + model_type
arch = get_arch_name()
dtype = "autoquant" if use_autoquant else ("compile" if fast else "base")
dtype = "autoquant" if use_autoquant else "noquant"
avg_time_per_run, max_memory_allocated_bytes, max_memory_allocated_percentage = result
memory_result = [name, dtype, device, arch, "memory(MiB)", max_memory_allocated_bytes, None]
memory_percent_result = [name, dtype, device, arch, "memory(%)", max_memory_allocated_percentage, None]
Expand Down
2 changes: 1 addition & 1 deletion torchao/_models/llama/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -940,7 +940,7 @@ def callback(x):
headers = ["name", "dtype", "device", "arch", "metric", "actual", "target"]
name = checkpoint_path.parent.name
arch = get_arch_name()
dtype = quantization or str(precision)
dtype = quantization or "noquant"
memory_result = [name, dtype, device, arch, "mem/s", bandwidth, None]
performance_result = [name, dtype, device, arch, "tok/s", tokpersec, None]
write_json_result = write_json_result_local if output_json_local else write_json_result_ossci
Expand Down
2 changes: 1 addition & 1 deletion torchao/_models/sam/eval_combo.py
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,7 @@ def mlp_only(mod, name):
headers = ["name", "dtype", "device", "arch", "metric", "actual", "target"]
name = sam_model_type
arch = get_arch_name()
dtype = compress or str(use_half) or "torch.float32"
dtype = compress or "noquant"
memory_result = [name, dtype, device, arch, "memory(MiB)", max_memory_allocated_bytes, None]
performance_result = [name, dtype, device, arch, "img_s(avg)", img_s, None]
write_json_result = write_json_result_local if output_json_local else write_json_result_ossci
Expand Down
2 changes: 0 additions & 2 deletions torchao/_models/sam2/build_sam.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,6 @@ def build_sam2(
"++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
]
# Read config and init model
import os
print("cur path:", os.getcwd())
cfg = compose(config_name=config_file, overrides=hydra_overrides_extra)
OmegaConf.resolve(cfg)
model = instantiate(cfg.model, _recursive_=True)
Expand Down

0 comments on commit a03ca99

Please sign in to comment.