Compare commits

...

2 Commits

Author SHA1 Message Date
cfef24921d moved output_graphs into main script and restructured
training log is seperate from normal output & is compressed
slightly adjusted lr
made final test stage test 4xTRAIN_BATCHSZ number of samples
2025-12-23 10:18:02 -05:00
9ea8ef3458 default k=0.5 2025-12-23 10:14:08 -05:00
3 changed files with 99 additions and 70 deletions

View File

@@ -1,37 +0,0 @@
import re
import pandas as pd
import matplotlib.pyplot as plt
text = r"""
"""
pattern = re.compile(r"step=\s*(\d+)\s+loss=([0-9.]+)\s+acc=([0-9.]+)")
rows = [(int(s), float(l), float(a)) for s, l, a in pattern.findall(text)]
df = pd.DataFrame(rows, columns=["step", "loss", "acc"]).sort_values("step").reset_index(drop=True)
# Avoid log(0) issues for loss plot by clamping at a tiny positive value
eps = 1e-10
df["loss_clamped"] = df["loss"].clip(lower=eps)
# Plot 1: Loss
plt.figure(figsize=(9, 4.8))
plt.plot(df["step"], df["loss_clamped"])
plt.yscale("log")
plt.xlabel("Step")
plt.ylabel("Loss (log scale)")
plt.title("Training Loss vs Step")
plt.tight_layout()
plt.savefig('./files/training_loss_v_step.png')
plt.show()
# Plot 2: Accuracy
df["err"] = (1.0 - df["acc"]).clip(lower=eps)
plt.figure(figsize=(9, 4.8))
plt.plot(df["step"], df["err"])
plt.yscale("log")
plt.xlabel("Step")
plt.ylabel("Error rate (1 - accuracy) (log scale)")
plt.title("Training Error Rate vs Step")
plt.tight_layout()
plt.savefig('./files/training_error_v_step.png')
plt.show()

View File

@@ -16,7 +16,7 @@ class NumberEmbedder(nn.Module):
# 3) Comparator head: takes (ea, eb, e) -> logit for "a > b" # 3) Comparator head: takes (ea, eb, e) -> logit for "a > b"
class PairwiseComparator(nn.Module): class PairwiseComparator(nn.Module):
def __init__(self, d=4, hidden=16, k=1.0): def __init__(self, d=4, hidden=16, k=0.5):
super().__init__() super().__init__()
self.log_k = nn.Parameter(torch.tensor([k])) self.log_k = nn.Parameter(torch.tensor([k]))
self.embed = NumberEmbedder(d, hidden) self.embed = NumberEmbedder(d, hidden)

View File

@@ -2,11 +2,16 @@
# pairwise_compare.py # pairwise_compare.py
import logging import logging
import random import random
import lzma
import torch import torch
from torch import nn from torch import nn
from torch.nn import functional as F from torch.nn import functional as F
import re
import pandas as pd
import matplotlib.pyplot as plt
import pairwise_comp_nn as comp_nn import pairwise_comp_nn as comp_nn
# early pytorch device setup # early pytorch device setup
@@ -20,13 +25,60 @@ TRAIN_PROGRESS = 10
BATCH_LOWER = -100.0 BATCH_LOWER = -100.0
BATCH_UPPER = 100.0 BATCH_UPPER = 100.0
DO_VERBOSE_EARLY_TRAIN = False DO_VERBOSE_EARLY_TRAIN = False
# Files
MODEL_PATH = "./files/pwcomp.model" MODEL_PATH = "./files/pwcomp.model"
LOGGING_PATH = "./files/output.log" LOGGING_PATH = "./files/output.log"
EMBED_CHART_PATH = "./files/embedding_chart.png" EMBED_CHART_PATH = "./files/embedding_chart.png"
EMBEDDINGS_DATA = "./files/embedding_data.csv" EMBEDDINGS_DATA_PATH = "./files/embedding_data.csv"
TRAINING_LOG_PATH = "./files/training.log.xz"
def plt_embeddings(model: comp_nn.PairwiseComparator): def parse_training_log(file_path: str) -> pd.DataFrame:
import matplotlib.pyplot as plt text: str = ""
with lzma.open(file_path, mode='rt') as f:
text = f.read()
pattern = re.compile(r"step=\s*(\d+)\s+loss=([0-9.]+)\s+acc=([0-9.]+)")
rows = [(int(s), float(l), float(a)) for s, l, a in pattern.findall(text)]
df = pd.DataFrame(rows, columns=["step", "loss", "acc"]).sort_values("step").reset_index(drop=True)
# Avoid log(0) issues for loss plot by clamping at a tiny positive value
eps = 1e-10
df["loss_clamped"] = df["loss"].clip(lower=eps)
return df
def plt_loss_tstep(df: pd.DataFrame) -> None:
# Plot 1: Loss
plt.figure(figsize=(8, 4))
plt.plot(df["step"], df["loss_clamped"])
plt.yscale("log")
plt.xlabel("Step")
plt.ylabel("Loss (log scale)")
plt.title("Training Loss vs Step")
plt.tight_layout()
plt.savefig('./files/training_loss_v_step.png')
plt.close()
return None
def plt_acc_tstep(df: pd.DataFrame, eps=1e-10) -> None:
# Plot 2: Accuracy
df["err"] = (1.0 - df["acc"]).clip(lower=eps)
plt.figure(figsize=(8, 4))
plt.plot(df["step"], df["err"])
plt.yscale("log")
plt.xlabel("Step")
plt.ylabel("Error rate (1 - accuracy) (log scale)")
plt.title("Training Error Rate vs Step")
plt.tight_layout()
plt.savefig('./files/training_error_v_step.png')
plt.close()
return None
def plt_embeddings(model: comp_nn.PairwiseComparator) -> None:
import csv import csv
log.info("Starting embeddings sweep...") log.info("Starting embeddings sweep...")
@@ -35,7 +87,7 @@ def plt_embeddings(model: comp_nn.PairwiseComparator):
xs = torch.arange( xs = torch.arange(
BATCH_LOWER, BATCH_LOWER,
BATCH_UPPER + 1.0, BATCH_UPPER + 1.0,
1.0, 0.1,
).unsqueeze(1).to(DEVICE) # shape: (N, 1) ).unsqueeze(1).to(DEVICE) # shape: (N, 1)
embeddings = model.embed(xs) # shape: (N, d) embeddings = model.embed(xs) # shape: (N, d)
@@ -49,13 +101,17 @@ def plt_embeddings(model: comp_nn.PairwiseComparator):
plt.legend() plt.legend()
plt.savefig(EMBED_CHART_PATH) plt.savefig(EMBED_CHART_PATH)
#plt.show() plt.close()
# save all our embeddings data to csv
csv_data = list(zip(xs.squeeze().tolist(), embeddings.tolist())) csv_data = list(zip(xs.squeeze().tolist(), embeddings.tolist()))
with open(file=EMBEDDINGS_DATA, mode="w", newline='') as f: with open(file=EMBEDDINGS_DATA_PATH, mode="w", newline='') as f:
csv_file = csv.writer(f) csv_file = csv.writer(f)
csv_file.writerows(csv_data) csv_file.writerows(csv_data)
return None
def get_torch_info(): def get_torch_info() -> None:
log.info("PyTorch Version: %s", torch.__version__) log.info("PyTorch Version: %s", torch.__version__)
log.info("HIP Version: %s", torch.version.hip) log.info("HIP Version: %s", torch.version.hip)
log.info("CUDA support: %s", torch.cuda.is_available()) log.info("CUDA support: %s", torch.cuda.is_available())
@@ -65,14 +121,14 @@ def get_torch_info():
log.info("Using %s compute mode", DEVICE) log.info("Using %s compute mode", DEVICE)
def set_seed(seed: int): def set_seed(seed: int) -> None:
random.seed(seed) random.seed(seed)
torch.manual_seed(seed) torch.manual_seed(seed)
if torch.cuda.is_available(): if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) torch.cuda.manual_seed_all(seed)
# 1) Data: pairs (a, b) with label y = 1 if a > b else 0 # Data: pairs (a, b) with label y = 1 if a > b else 0 -> (a,b,y)
def sample_batch(batch_size: int, low=BATCH_LOWER, high=BATCH_UPPER): def sample_batch(batch_size: int, low=BATCH_LOWER, high=BATCH_UPPER, epsi=1e-4) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
a = (high - low) * torch.rand(batch_size, 1) + low a = (high - low) * torch.rand(batch_size, 1) + low
b = (high - low) * torch.rand(batch_size, 1) + low b = (high - low) * torch.rand(batch_size, 1) + low
@@ -88,34 +144,39 @@ def training_entry():
set_seed(0) set_seed(0)
model = comp_nn.PairwiseComparator(d=DIMENSIONS).to(DEVICE) model = comp_nn.PairwiseComparator(d=DIMENSIONS).to(DEVICE)
opt = torch.optim.AdamW(model.parameters(), lr=9e-4, weight_decay=1e-3) opt = torch.optim.AdamW(model.parameters(), lr=8e-4, weight_decay=1e-3)
# 4) Train log.info(f"Using {TRAINING_LOG_PATH} as the logging destination for training...")
for step in range(TRAIN_STEPS): with lzma.open(TRAINING_LOG_PATH, mode='wt') as tlog:
a, b, y = sample_batch(TRAIN_BATCHSZ) # training loop
a, b, y = a.to(DEVICE), b.to(DEVICE), y.to(DEVICE) training_start_time = datetime.datetime.now()
for step in range(TRAIN_STEPS):
a, b, y = sample_batch(TRAIN_BATCHSZ)
a, b, y = a.to(DEVICE), b.to(DEVICE), y.to(DEVICE)
logits = model(a, b) logits = model(a, b)
loss_fn = F.binary_cross_entropy_with_logits(logits, y) loss_fn = F.binary_cross_entropy_with_logits(logits, y)
opt.zero_grad() opt.zero_grad()
loss_fn.backward() loss_fn.backward()
opt.step() opt.step()
if step <= TRAIN_PROGRESS and DO_VERBOSE_EARLY_TRAIN is True: if step % TRAIN_PROGRESS == 0:
with torch.no_grad(): with torch.no_grad():
pred = (torch.sigmoid(logits) > 0.5).float() pred = (torch.sigmoid(logits) > 0.5).float()
acc = (pred == y).float().mean().item() acc = (pred == y).float().mean().item()
log.info(f"step={step:5d} loss={loss_fn.item():.7f} acc={acc:.7f}") tlog.write(f"step={step:5d} loss={loss_fn.item():.7f} acc={acc:.7f}\n")
elif step % TRAIN_PROGRESS == 0:
with torch.no_grad():
pred = (torch.sigmoid(logits) > 0.5).float()
acc = (pred == y).float().mean().item()
log.info(f"step={step:5d} loss={loss_fn.item():.7f} acc={acc:.7f}")
# 5) Quick test: evaluate final model accuracy on fresh pairs # also print to normal text log occasionally to show some activity.
if step % 2500 == 0:
log.info(f"still training... step={step} of {TRAIN_STEPS}")
training_end_time = datetime.datetime.now()
log.info(f"Training steps complete. Start time: {training_start_time} End time: {training_end_time}")
# evaluate final model accuracy on fresh pairs
with torch.no_grad(): with torch.no_grad():
a, b, y = sample_batch(TRAIN_BATCHSZ) a, b, y = sample_batch(TRAIN_BATCHSZ*4)
a, b, y = a.to(DEVICE), b.to(DEVICE), y.to(DEVICE) a, b, y = a.to(DEVICE), b.to(DEVICE), y.to(DEVICE)
logits = model(a, b) logits = model(a, b)
pred = (torch.sigmoid(logits) > 0.5).float() pred = (torch.sigmoid(logits) > 0.5).float()
@@ -174,12 +235,17 @@ if __name__ == '__main__':
# alt call patern # alt call patern
# python3 pairwise_compare.py train # python3 pairwise_compare.py train
# python3 pairwise_compare.py infer # python3 pairwise_compare.py infer
# python3 pairwise_compare.py graphs
if len(sys.argv) > 1: if len(sys.argv) > 1:
mode = sys.argv[1].strip().lower() mode = sys.argv[1].strip().lower()
if mode == "train": if mode == "train":
training_entry() training_entry()
elif mode == "infer": elif mode == "infer":
infer_entry() infer_entry()
elif mode == "graphs":
data = parse_training_log(TRAINING_LOG_PATH)
plt_loss_tstep(data)
plt_acc_tstep(data)
else: else:
log.error(f"Unknown operation: {mode}") log.error(f"Unknown operation: {mode}")
log.error("Invalid call syntax, call script as \"train.py\" or \"infer.py\" or as pairwise_compare.py <mode> where mode is \"train\" or \"infer\"") log.error("Invalid call syntax, call script as \"train.py\" or \"infer.py\" or as pairwise_compare.py <mode> where mode is \"train\" or \"infer\"")