moved output_graphs into main script and restructured
training log is seperate from normal output & is compressed slightly adjusted lr made final test stage test 4xTRAIN_BATCHSZ number of samples
This commit is contained in:
@@ -2,11 +2,16 @@
|
||||
# pairwise_compare.py
|
||||
import logging
|
||||
import random
|
||||
import lzma
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
|
||||
import re
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
import pairwise_comp_nn as comp_nn
|
||||
|
||||
# early pytorch device setup
|
||||
@@ -20,13 +25,60 @@ TRAIN_PROGRESS = 10
|
||||
BATCH_LOWER = -100.0
|
||||
BATCH_UPPER = 100.0
|
||||
DO_VERBOSE_EARLY_TRAIN = False
|
||||
|
||||
# Files
|
||||
MODEL_PATH = "./files/pwcomp.model"
|
||||
LOGGING_PATH = "./files/output.log"
|
||||
EMBED_CHART_PATH = "./files/embedding_chart.png"
|
||||
EMBEDDINGS_DATA = "./files/embedding_data.csv"
|
||||
EMBEDDINGS_DATA_PATH = "./files/embedding_data.csv"
|
||||
TRAINING_LOG_PATH = "./files/training.log.xz"
|
||||
|
||||
def plt_embeddings(model: comp_nn.PairwiseComparator):
|
||||
import matplotlib.pyplot as plt
|
||||
def parse_training_log(file_path: str) -> pd.DataFrame:
|
||||
text: str = ""
|
||||
with lzma.open(file_path, mode='rt') as f:
|
||||
text = f.read()
|
||||
|
||||
|
||||
pattern = re.compile(r"step=\s*(\d+)\s+loss=([0-9.]+)\s+acc=([0-9.]+)")
|
||||
rows = [(int(s), float(l), float(a)) for s, l, a in pattern.findall(text)]
|
||||
df = pd.DataFrame(rows, columns=["step", "loss", "acc"]).sort_values("step").reset_index(drop=True)
|
||||
|
||||
# Avoid log(0) issues for loss plot by clamping at a tiny positive value
|
||||
eps = 1e-10
|
||||
df["loss_clamped"] = df["loss"].clip(lower=eps)
|
||||
|
||||
return df
|
||||
|
||||
def plt_loss_tstep(df: pd.DataFrame) -> None:
|
||||
# Plot 1: Loss
|
||||
plt.figure(figsize=(8, 4))
|
||||
plt.plot(df["step"], df["loss_clamped"])
|
||||
plt.yscale("log")
|
||||
plt.xlabel("Step")
|
||||
plt.ylabel("Loss (log scale)")
|
||||
plt.title("Training Loss vs Step")
|
||||
plt.tight_layout()
|
||||
plt.savefig('./files/training_loss_v_step.png')
|
||||
plt.close()
|
||||
|
||||
return None
|
||||
|
||||
def plt_acc_tstep(df: pd.DataFrame, eps=1e-10) -> None:
|
||||
# Plot 2: Accuracy
|
||||
df["err"] = (1.0 - df["acc"]).clip(lower=eps)
|
||||
plt.figure(figsize=(8, 4))
|
||||
plt.plot(df["step"], df["err"])
|
||||
plt.yscale("log")
|
||||
plt.xlabel("Step")
|
||||
plt.ylabel("Error rate (1 - accuracy) (log scale)")
|
||||
plt.title("Training Error Rate vs Step")
|
||||
plt.tight_layout()
|
||||
plt.savefig('./files/training_error_v_step.png')
|
||||
plt.close()
|
||||
|
||||
return None
|
||||
|
||||
def plt_embeddings(model: comp_nn.PairwiseComparator) -> None:
|
||||
import csv
|
||||
|
||||
log.info("Starting embeddings sweep...")
|
||||
@@ -35,7 +87,7 @@ def plt_embeddings(model: comp_nn.PairwiseComparator):
|
||||
xs = torch.arange(
|
||||
BATCH_LOWER,
|
||||
BATCH_UPPER + 1.0,
|
||||
1.0,
|
||||
0.1,
|
||||
).unsqueeze(1).to(DEVICE) # shape: (N, 1)
|
||||
|
||||
embeddings = model.embed(xs) # shape: (N, d)
|
||||
@@ -49,13 +101,17 @@ def plt_embeddings(model: comp_nn.PairwiseComparator):
|
||||
|
||||
plt.legend()
|
||||
plt.savefig(EMBED_CHART_PATH)
|
||||
#plt.show()
|
||||
plt.close()
|
||||
|
||||
# save all our embeddings data to csv
|
||||
csv_data = list(zip(xs.squeeze().tolist(), embeddings.tolist()))
|
||||
with open(file=EMBEDDINGS_DATA, mode="w", newline='') as f:
|
||||
with open(file=EMBEDDINGS_DATA_PATH, mode="w", newline='') as f:
|
||||
csv_file = csv.writer(f)
|
||||
csv_file.writerows(csv_data)
|
||||
|
||||
return None
|
||||
|
||||
def get_torch_info():
|
||||
def get_torch_info() -> None:
|
||||
log.info("PyTorch Version: %s", torch.__version__)
|
||||
log.info("HIP Version: %s", torch.version.hip)
|
||||
log.info("CUDA support: %s", torch.cuda.is_available())
|
||||
@@ -65,14 +121,14 @@ def get_torch_info():
|
||||
|
||||
log.info("Using %s compute mode", DEVICE)
|
||||
|
||||
def set_seed(seed: int):
|
||||
def set_seed(seed: int) -> None:
|
||||
random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.manual_seed_all(seed)
|
||||
|
||||
# 1) Data: pairs (a, b) with label y = 1 if a > b else 0
|
||||
def sample_batch(batch_size: int, low=BATCH_LOWER, high=BATCH_UPPER):
|
||||
# Data: pairs (a, b) with label y = 1 if a > b else 0 -> (a,b,y)
|
||||
def sample_batch(batch_size: int, low=BATCH_LOWER, high=BATCH_UPPER, epsi=1e-4) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
a = (high - low) * torch.rand(batch_size, 1) + low
|
||||
b = (high - low) * torch.rand(batch_size, 1) + low
|
||||
|
||||
@@ -88,34 +144,39 @@ def training_entry():
|
||||
set_seed(0)
|
||||
|
||||
model = comp_nn.PairwiseComparator(d=DIMENSIONS).to(DEVICE)
|
||||
opt = torch.optim.AdamW(model.parameters(), lr=9e-4, weight_decay=1e-3)
|
||||
opt = torch.optim.AdamW(model.parameters(), lr=8e-4, weight_decay=1e-3)
|
||||
|
||||
# 4) Train
|
||||
for step in range(TRAIN_STEPS):
|
||||
a, b, y = sample_batch(TRAIN_BATCHSZ)
|
||||
a, b, y = a.to(DEVICE), b.to(DEVICE), y.to(DEVICE)
|
||||
log.info(f"Using {TRAINING_LOG_PATH} as the logging destination for training...")
|
||||
with lzma.open(TRAINING_LOG_PATH, mode='wt') as tlog:
|
||||
# training loop
|
||||
training_start_time = datetime.datetime.now()
|
||||
for step in range(TRAIN_STEPS):
|
||||
a, b, y = sample_batch(TRAIN_BATCHSZ)
|
||||
a, b, y = a.to(DEVICE), b.to(DEVICE), y.to(DEVICE)
|
||||
|
||||
logits = model(a, b)
|
||||
loss_fn = F.binary_cross_entropy_with_logits(logits, y)
|
||||
logits = model(a, b)
|
||||
loss_fn = F.binary_cross_entropy_with_logits(logits, y)
|
||||
|
||||
opt.zero_grad()
|
||||
loss_fn.backward()
|
||||
opt.step()
|
||||
opt.zero_grad()
|
||||
loss_fn.backward()
|
||||
opt.step()
|
||||
|
||||
if step <= TRAIN_PROGRESS and DO_VERBOSE_EARLY_TRAIN is True:
|
||||
with torch.no_grad():
|
||||
pred = (torch.sigmoid(logits) > 0.5).float()
|
||||
acc = (pred == y).float().mean().item()
|
||||
log.info(f"step={step:5d} loss={loss_fn.item():.7f} acc={acc:.7f}")
|
||||
elif step % TRAIN_PROGRESS == 0:
|
||||
with torch.no_grad():
|
||||
pred = (torch.sigmoid(logits) > 0.5).float()
|
||||
acc = (pred == y).float().mean().item()
|
||||
log.info(f"step={step:5d} loss={loss_fn.item():.7f} acc={acc:.7f}")
|
||||
if step % TRAIN_PROGRESS == 0:
|
||||
with torch.no_grad():
|
||||
pred = (torch.sigmoid(logits) > 0.5).float()
|
||||
acc = (pred == y).float().mean().item()
|
||||
tlog.write(f"step={step:5d} loss={loss_fn.item():.7f} acc={acc:.7f}\n")
|
||||
|
||||
# 5) Quick test: evaluate final model accuracy on fresh pairs
|
||||
# also print to normal text log occasionally to show some activity.
|
||||
if step % 2500 == 0:
|
||||
log.info(f"still training... step={step} of {TRAIN_STEPS}")
|
||||
|
||||
training_end_time = datetime.datetime.now()
|
||||
log.info(f"Training steps complete. Start time: {training_start_time} End time: {training_end_time}")
|
||||
|
||||
# evaluate final model accuracy on fresh pairs
|
||||
with torch.no_grad():
|
||||
a, b, y = sample_batch(TRAIN_BATCHSZ)
|
||||
a, b, y = sample_batch(TRAIN_BATCHSZ*4)
|
||||
a, b, y = a.to(DEVICE), b.to(DEVICE), y.to(DEVICE)
|
||||
logits = model(a, b)
|
||||
pred = (torch.sigmoid(logits) > 0.5).float()
|
||||
@@ -174,12 +235,17 @@ if __name__ == '__main__':
|
||||
# alt call patern
|
||||
# python3 pairwise_compare.py train
|
||||
# python3 pairwise_compare.py infer
|
||||
# python3 pairwise_compare.py graphs
|
||||
if len(sys.argv) > 1:
|
||||
mode = sys.argv[1].strip().lower()
|
||||
if mode == "train":
|
||||
training_entry()
|
||||
elif mode == "infer":
|
||||
infer_entry()
|
||||
elif mode == "graphs":
|
||||
data = parse_training_log(TRAINING_LOG_PATH)
|
||||
plt_loss_tstep(data)
|
||||
plt_acc_tstep(data)
|
||||
else:
|
||||
log.error(f"Unknown operation: {mode}")
|
||||
log.error("Invalid call syntax, call script as \"train.py\" or \"infer.py\" or as pairwise_compare.py <mode> where mode is \"train\" or \"infer\"")
|
||||
|
||||
Reference in New Issue
Block a user