Compare commits

...

9 Commits

Author SHA1 Message Date
9af36e7145 man python stinks... 2025-12-25 14:54:08 +00:00
23fddbe5b9 updated comment for PairwiseComparator 2025-12-25 14:53:35 +00:00
acbccebb2c renamed ADAMW_WiDECAY 2025-12-25 14:48:07 +00:00
edf8d46123 wut 2025-12-25 02:13:23 +00:00
4f500e8b4c write all data on training to training log 2025-12-25 02:07:49 +00:00
921e24b451 embeded # of hidden neurons in model save data
added more valves for options.
will update the console every 5 seconds when training now
2025-12-25 01:52:19 +00:00
d46712ff53 make the defaults more sane for the task
its still overkill
2025-12-25 01:23:54 +00:00
cd72cd7052 changes to figure sizes and add labels to embedding chart 2025-12-23 13:44:38 -05:00
755161c152 unused var 2025-12-23 13:29:52 -05:00
2 changed files with 32 additions and 23 deletions

View File

@@ -3,7 +3,7 @@ from torch import nn
# 2) Number "embedding" network: R -> R^d # 2) Number "embedding" network: R -> R^d
class NumberEmbedder(nn.Module): class NumberEmbedder(nn.Module):
def __init__(self, d=4, hidden=16): def __init__(self, d=2, hidden=4):
super().__init__() super().__init__()
self.net = nn.Sequential( self.net = nn.Sequential(
nn.Linear(1, hidden), nn.Linear(1, hidden),
@@ -14,9 +14,9 @@ class NumberEmbedder(nn.Module):
def forward(self, x): def forward(self, x):
return self.net(x) return self.net(x)
# 3) Comparator head: takes (ea, eb, e) -> logit for "a > b" # MLP Comparator head: takes (ea, eb, e) -> logit for "a > b"
class PairwiseComparator(nn.Module): class PairwiseComparator(nn.Module):
def __init__(self, d=4, hidden=16, k=0.5): def __init__(self, d=2, hidden=4, k=0.5):
super().__init__() super().__init__()
self.log_k = nn.Parameter(torch.tensor([k])) self.log_k = nn.Parameter(torch.tensor([k]))
self.embed = NumberEmbedder(d, hidden) self.embed = NumberEmbedder(d, hidden)
@@ -29,7 +29,7 @@ class PairwiseComparator(nn.Module):
) )
def forward(self, a, b): def forward(self, a, b):
# trying to force antisym here: h(a,b)=h(b,a) # trying to force antisym here: h(a,b)=-h(b,a)
phi = self.head(self.embed(a-b)) phi = self.head(self.embed(a-b))
phi_neg = self.head(self.embed(b-a)) phi_neg = self.head(self.embed(b-a))
logit = phi - phi_neg logit = phi - phi_neg

View File

@@ -18,12 +18,14 @@ DEVICE = torch.accelerator.current_accelerator() if torch.accelerator.is_availab
# Valves # Valves
DIMENSIONS = 2 DIMENSIONS = 2
TRAIN_STEPS = 5000 HIDDEN_NEURONS = 4
ADAMW_LR = 5e-3
ADAMW_DECAY = 5e-4
TRAIN_STEPS = 2000
TRAIN_BATCHSZ = 8192 TRAIN_BATCHSZ = 8192
TRAIN_PROGRESS = 10 TRAIN_PROGRESS = 10
BATCH_LOWER = -100.0 BATCH_LOWER = -100.0
BATCH_UPPER = 100.0 BATCH_UPPER = 100.0
DO_VERBOSE_EARLY_TRAIN = False
# Files # Files
MODEL_PATH = "./files/pwcomp.model" MODEL_PATH = "./files/pwcomp.model"
@@ -40,7 +42,6 @@ def parse_training_log(file_path: str) -> pd.DataFrame:
with lzma.open(file_path, mode='rt') as f: with lzma.open(file_path, mode='rt') as f:
text = f.read() text = f.read()
pattern = re.compile(r"step=\s*(\d+)\s+loss=([0-9.]+)\s+acc=([0-9.]+)") pattern = re.compile(r"step=\s*(\d+)\s+loss=([0-9.]+)\s+acc=([0-9.]+)")
rows = [(int(s), float(l), float(a)) for s, l, a in pattern.findall(text)] rows = [(int(s), float(l), float(a)) for s, l, a in pattern.findall(text)]
df = pd.DataFrame(rows, columns=["step", "loss", "acc"]).sort_values("step").reset_index(drop=True) df = pd.DataFrame(rows, columns=["step", "loss", "acc"]).sort_values("step").reset_index(drop=True)
@@ -54,7 +55,7 @@ def parse_training_log(file_path: str) -> pd.DataFrame:
# TODO: Move plotting into its own file # TODO: Move plotting into its own file
def plt_loss_tstep(df: pd.DataFrame) -> None: def plt_loss_tstep(df: pd.DataFrame) -> None:
# Plot 1: Loss # Plot 1: Loss
plt.figure(figsize=(8, 4)) plt.figure(figsize=(10, 6))
plt.plot(df["step"], df["loss_clamped"]) plt.plot(df["step"], df["loss_clamped"])
plt.yscale("log") plt.yscale("log")
plt.xlabel("Step") plt.xlabel("Step")
@@ -70,7 +71,7 @@ def plt_loss_tstep(df: pd.DataFrame) -> None:
def plt_acc_tstep(df: pd.DataFrame, eps=1e-10) -> None: def plt_acc_tstep(df: pd.DataFrame, eps=1e-10) -> None:
# Plot 2: Accuracy # Plot 2: Accuracy
df["err"] = (1.0 - df["acc"]).clip(lower=eps) df["err"] = (1.0 - df["acc"]).clip(lower=eps)
plt.figure(figsize=(8, 4)) plt.figure(figsize=(10, 6))
plt.plot(df["step"], df["err"]) plt.plot(df["step"], df["err"])
plt.yscale("log") plt.yscale("log")
plt.xlabel("Step") plt.xlabel("Step")
@@ -100,10 +101,14 @@ def plt_embeddings(model: comp_nn.PairwiseComparator) -> None:
# move data back to CPU for plotting # move data back to CPU for plotting
embeddings = embeddings.cpu() embeddings = embeddings.cpu()
xs = xs.cpu() xs = xs.cpu()
# Plot 3: x vs h(x)
plt.figure(figsize=(10, 6))
for i in range(embeddings.shape[1]): for i in range(embeddings.shape[1]):
plt.plot(xs.squeeze(), embeddings[:, i], label=f"dim {i}") plt.plot(xs.squeeze(), embeddings[:, i], label=f"dim {i}")
plt.title("x vs h(x)")
plt.xlabel("x [input]")
plt.ylabel("h(x) [embedding]")
plt.legend() plt.legend()
plt.savefig(EMBED_CHART_PATH) plt.savefig(EMBED_CHART_PATH)
plt.close() plt.close()
@@ -151,13 +156,15 @@ def training_entry():
# the seed should initialized normally otherwise # the seed should initialized normally otherwise
set_seed(0) set_seed(0)
model = comp_nn.PairwiseComparator(d=DIMENSIONS).to(DEVICE) model = comp_nn.PairwiseComparator(d=DIMENSIONS, hidden=HIDDEN_NEURONS).to(DEVICE)
opt = torch.optim.AdamW(model.parameters(), lr=8e-4, weight_decay=1e-3) opt = torch.optim.AdamW(model.parameters(), lr=ADAMW_LR, weight_decay=ADAMW_DECAY)
log.info(f"Using {TRAINING_LOG_PATH} as the logging destination for training...") log.info(f"Using {TRAINING_LOG_PATH} as the logging destination for training...")
with lzma.open(TRAINING_LOG_PATH, mode='wt') as tlog: with lzma.open(TRAINING_LOG_PATH, mode='wt') as tlog:
# training loop # training loop
training_start_time = datetime.datetime.now() training_start_time = datetime.datetime.now()
last_ack = datetime.datetime.now()
for step in range(TRAIN_STEPS): for step in range(TRAIN_STEPS):
a, b, y = sample_batch(TRAIN_BATCHSZ) a, b, y = sample_batch(TRAIN_BATCHSZ)
a, b, y = a.to(DEVICE), b.to(DEVICE), y.to(DEVICE) a, b, y = a.to(DEVICE), b.to(DEVICE), y.to(DEVICE)
@@ -169,15 +176,17 @@ def training_entry():
loss_fn.backward() loss_fn.backward()
opt.step() opt.step()
if step % TRAIN_PROGRESS == 0: with torch.no_grad():
with torch.no_grad(): pred = (torch.sigmoid(logits) > 0.5).float()
pred = (torch.sigmoid(logits) > 0.5).float() acc = (pred == y).float().mean().item()
acc = (pred == y).float().mean().item() tlog.write(f"step={step:5d} loss={loss_fn.item():.7f} acc={acc:.7f}\n")
tlog.write(f"step={step:5d} loss={loss_fn.item():.7f} acc={acc:.7f}\n")
# also print to normal text log occasionally to show some activity. # also print to normal text log occasionally to show some activity.
if step % 2500 == 0: # every 10 steps check if its been longer than 5 seconds since we've updated the user
if step % 10 == 0:
if (datetime.datetime.now() - last_ack).total_seconds() > 5:
log.info(f"still training... step={step} of {TRAIN_STEPS}") log.info(f"still training... step={step} of {TRAIN_STEPS}")
last_ack = datetime.datetime.now()
training_end_time = datetime.datetime.now() training_end_time = datetime.datetime.now()
log.info(f"Training steps complete. Start time: {training_start_time} End time: {training_end_time}") log.info(f"Training steps complete. Start time: {training_start_time} End time: {training_end_time}")
@@ -193,14 +202,14 @@ def training_entry():
log.info(f"Final test acc: {acc} errors: {errors}") log.info(f"Final test acc: {acc} errors: {errors}")
# embed model dimensions into the model serialization # embed model dimensions into the model serialization
torch.save({"state_dict": model.state_dict(), "d": DIMENSIONS}, MODEL_PATH) torch.save({"state_dict": model.state_dict(), "d": DIMENSIONS, "h": HIDDEN_NEURONS}, MODEL_PATH)
log.info(f"Saved PyTorch Model State to {MODEL_PATH}") log.info(f"Saved PyTorch Model State to {MODEL_PATH}")
def infer_entry(): def infer_entry():
get_torch_info() get_torch_info()
model_ckpt = torch.load(MODEL_PATH, map_location=DEVICE) model_ckpt = torch.load(MODEL_PATH, map_location=DEVICE)
model = comp_nn.PairwiseComparator(d=model_ckpt["d"]).to(DEVICE) model = comp_nn.PairwiseComparator(d=model_ckpt["d"], hidden=model_ckpt["h"]).to(DEVICE)
model.load_state_dict(model_ckpt["state_dict"]) model.load_state_dict(model_ckpt["state_dict"])
model.eval() model.eval()
@@ -226,7 +235,7 @@ def graphs_entry():
get_torch_info() get_torch_info()
model_ckpt = torch.load(MODEL_PATH, map_location=DEVICE) model_ckpt = torch.load(MODEL_PATH, map_location=DEVICE)
model = comp_nn.PairwiseComparator(d=model_ckpt["d"]).to(DEVICE) model = comp_nn.PairwiseComparator(d=model_ckpt["d"], hidden=model_ckpt["h"]).to(DEVICE)
model.load_state_dict(model_ckpt["state_dict"]) model.load_state_dict(model_ckpt["state_dict"])
model.eval() model.eval()