From 0e3f7a96eb7962028322507e35cbc590d3bebe65 Mon Sep 17 00:00:00 2001 From: Utkarsh Singh Date: Tue, 27 Jan 2026 11:12:26 +0530 Subject: [PATCH] Simplify training progress logging to avoid reliance on global batch_size. --- beginner_source/basics/optimization_tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beginner_source/basics/optimization_tutorial.py b/beginner_source/basics/optimization_tutorial.py index 82bfaa8f07c..73cace5b332 100644 --- a/beginner_source/basics/optimization_tutorial.py +++ b/beginner_source/basics/optimization_tutorial.py @@ -163,7 +163,7 @@ def train_loop(dataloader, model, loss_fn, optimizer): optimizer.zero_grad() if batch % 100 == 0: - loss, current = loss.item(), batch * batch_size + len(X) + loss, current = loss.item(), batch * len(X) print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")