Compare commits

...

17 Commits

Author SHA1 Message Date
9af36e7145 man python stinks... 2025-12-25 14:54:08 +00:00
23fddbe5b9 updated comment for PairwiseComparator 2025-12-25 14:53:35 +00:00
acbccebb2c renamed ADAMW_WiDECAY 2025-12-25 14:48:07 +00:00
edf8d46123 wut 2025-12-25 02:13:23 +00:00
4f500e8b4c write all data on training to training log 2025-12-25 02:07:49 +00:00
921e24b451 embeded # of hidden neurons in model save data
added more valves for options.
will update the console every 5 seconds when training now
2025-12-25 01:52:19 +00:00
d46712ff53 make the defaults more sane for the task
its still overkill
2025-12-25 01:23:54 +00:00
cd72cd7052 changes to figure sizes and add labels to embedding chart 2025-12-23 13:44:38 -05:00
755161c152 unused var 2025-12-23 13:29:52 -05:00
1d70935b64 general clean up & added help text
removed symbolic link calling path
2025-12-23 13:06:07 -05:00
0e2098ceec remove linked files 2025-12-23 12:30:18 -05:00
cfef24921d moved output_graphs into main script and restructured
training log is seperate from normal output & is compressed
slightly adjusted lr
made final test stage test 4xTRAIN_BATCHSZ number of samples
2025-12-23 10:18:02 -05:00
9ea8ef3458 default k=0.5 2025-12-23 10:14:08 -05:00
6e31865a84 added episilon for equality check
major layout changes in the network
2025-12-22 21:47:16 -05:00
997303028e output plots and csv data for e(x) over the trained range. 2025-12-22 12:23:59 -05:00
c3fbc44a34 add cpu pytorch instructions 2025-12-20 01:59:50 +00:00
5e5ad1bc20 fix log entry for model state saving
adjust default batch size and passes to be a bit more CPU pytorch friendly
2025-12-20 01:54:42 +00:00
6 changed files with 249 additions and 367 deletions

View File

@@ -25,11 +25,11 @@ source .venv/bin/activate
For instructions installing pytorch refer to the [PyTorch Home page]
```bash
pip3 install numpy
# use the nvidia CUDA or CPU only packages if required.
# I use the ROCm packages, so the repo uses the ROCm packages.
pip3 install numpy pandas matplotlib
# I use the ROCm packages
pip3 install torch torchvision --index-url https://download.pytorch.org/whl/rocm6.4
pip3 install pandas matplotlib
# if you need the CPU only package
# pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cpu
```
## Running the code

View File

@@ -1 +0,0 @@
pairwise_compare.py

View File

@@ -1,286 +0,0 @@
import re
import pandas as pd
import matplotlib.pyplot as plt
text = r"""INFO:__main__:step= 0 loss=1.3149878 acc=0.9093018
INFO:__main__:step= 100 loss=0.0089776 acc=0.9993286
INFO:__main__:step= 200 loss=0.0088239 acc=0.9996948
INFO:__main__:step= 300 loss=0.0075553 acc=0.9996948
INFO:__main__:step= 400 loss=0.0065352 acc=0.9995728
INFO:__main__:step= 500 loss=0.0053752 acc=0.9997559
INFO:__main__:step= 600 loss=0.0043060 acc=0.9998169
INFO:__main__:step= 700 loss=0.0045364 acc=0.9996338
INFO:__main__:step= 800 loss=0.0037988 acc=0.9996948
INFO:__main__:step= 900 loss=0.0037188 acc=0.9998779
INFO:__main__:step= 1000 loss=0.0034959 acc=0.9996338
INFO:__main__:step= 1100 loss=0.0032190 acc=0.9998169
INFO:__main__:step= 1200 loss=0.0033895 acc=1.0000000
INFO:__main__:step= 1300 loss=0.0031267 acc=0.9998779
INFO:__main__:step= 1400 loss=0.0028533 acc=0.9999390
INFO:__main__:step= 1500 loss=0.0024374 acc=0.9998779
INFO:__main__:step= 1600 loss=0.0025314 acc=0.9998779
INFO:__main__:step= 1700 loss=0.0024157 acc=1.0000000
INFO:__main__:step= 1800 loss=0.0019437 acc=1.0000000
INFO:__main__:step= 1900 loss=0.0019343 acc=0.9997559
INFO:__main__:step= 2000 loss=0.0020423 acc=0.9996948
INFO:__main__:step= 2100 loss=0.0025607 acc=0.9999390
INFO:__main__:step= 2200 loss=0.0020113 acc=0.9998779
INFO:__main__:step= 2300 loss=0.0017351 acc=0.9999390
INFO:__main__:step= 2400 loss=0.0017944 acc=0.9998169
INFO:__main__:step= 2500 loss=0.0016655 acc=0.9998169
INFO:__main__:step= 2600 loss=0.0016493 acc=0.9997559
INFO:__main__:step= 2700 loss=0.0016888 acc=1.0000000
INFO:__main__:step= 2800 loss=0.0017318 acc=0.9998779
INFO:__main__:step= 2900 loss=0.0016884 acc=0.9999390
INFO:__main__:step= 3000 loss=0.0012935 acc=0.9998779
INFO:__main__:step= 3100 loss=0.0011316 acc=1.0000000
INFO:__main__:step= 3200 loss=0.0015423 acc=0.9998169
INFO:__main__:step= 3300 loss=0.0008634 acc=0.9999390
INFO:__main__:step= 3400 loss=0.0005173 acc=1.0000000
INFO:__main__:step= 3500 loss=0.0005730 acc=0.9999390
INFO:__main__:step= 3600 loss=0.0007605 acc=1.0000000
INFO:__main__:step= 3700 loss=0.0006299 acc=0.9999390
INFO:__main__:step= 3800 loss=0.0006295 acc=0.9999390
INFO:__main__:step= 3900 loss=0.0003615 acc=1.0000000
INFO:__main__:step= 4000 loss=0.0004475 acc=1.0000000
INFO:__main__:step= 4100 loss=0.0005678 acc=0.9998779
INFO:__main__:step= 4200 loss=0.0003502 acc=1.0000000
INFO:__main__:step= 4300 loss=0.0005562 acc=0.9998779
INFO:__main__:step= 4400 loss=0.0005300 acc=0.9998779
INFO:__main__:step= 4500 loss=0.0004442 acc=0.9998779
INFO:__main__:step= 4600 loss=0.0002595 acc=1.0000000
INFO:__main__:step= 4700 loss=0.0003925 acc=1.0000000
INFO:__main__:step= 4800 loss=0.0003808 acc=1.0000000
INFO:__main__:step= 4900 loss=0.0003306 acc=0.9999390
INFO:__main__:step= 5000 loss=0.0003823 acc=0.9998779
INFO:__main__:step= 5100 loss=0.0002463 acc=1.0000000
INFO:__main__:step= 5200 loss=0.0003248 acc=0.9999390
INFO:__main__:step= 5300 loss=0.0002426 acc=1.0000000
INFO:__main__:step= 5400 loss=0.0002643 acc=1.0000000
INFO:__main__:step= 5500 loss=0.0003434 acc=1.0000000
INFO:__main__:step= 5600 loss=0.0003985 acc=0.9999390
INFO:__main__:step= 5700 loss=0.0004590 acc=0.9997559
INFO:__main__:step= 5800 loss=0.0002166 acc=1.0000000
INFO:__main__:step= 5900 loss=0.0002622 acc=0.9999390
INFO:__main__:step= 6000 loss=0.0003202 acc=1.0000000
INFO:__main__:step= 6100 loss=0.0003421 acc=1.0000000
INFO:__main__:step= 6200 loss=0.0004393 acc=0.9997559
INFO:__main__:step= 6300 loss=0.0002363 acc=1.0000000
INFO:__main__:step= 6400 loss=0.0000994 acc=1.0000000
INFO:__main__:step= 6500 loss=0.0001811 acc=1.0000000
INFO:__main__:step= 6600 loss=0.0003322 acc=0.9998779
INFO:__main__:step= 6700 loss=0.0002741 acc=0.9999390
INFO:__main__:step= 6800 loss=0.0002755 acc=0.9999390
INFO:__main__:step= 6900 loss=0.0001762 acc=0.9999390
INFO:__main__:step= 7000 loss=0.0002272 acc=0.9998779
INFO:__main__:step= 7100 loss=0.0001781 acc=1.0000000
INFO:__main__:step= 7200 loss=0.0002126 acc=1.0000000
INFO:__main__:step= 7300 loss=0.0002117 acc=0.9998779
INFO:__main__:step= 7400 loss=0.0001614 acc=1.0000000
INFO:__main__:step= 7500 loss=0.0002344 acc=1.0000000
INFO:__main__:step= 7600 loss=0.0003103 acc=0.9998169
INFO:__main__:step= 7700 loss=0.0001102 acc=1.0000000
INFO:__main__:step= 7800 loss=0.0001416 acc=0.9999390
INFO:__main__:step= 7900 loss=0.0001438 acc=0.9999390
INFO:__main__:step= 8000 loss=0.0002020 acc=0.9999390
INFO:__main__:step= 8100 loss=0.0001185 acc=1.0000000
INFO:__main__:step= 8200 loss=0.0001286 acc=0.9999390
INFO:__main__:step= 8300 loss=0.0001579 acc=0.9999390
INFO:__main__:step= 8400 loss=0.0002156 acc=0.9998779
INFO:__main__:step= 8500 loss=0.0001326 acc=1.0000000
INFO:__main__:step= 8600 loss=0.0001097 acc=0.9999390
INFO:__main__:step= 8700 loss=0.0000575 acc=1.0000000
INFO:__main__:step= 8800 loss=0.0001199 acc=1.0000000
INFO:__main__:step= 8900 loss=0.0001446 acc=0.9999390
INFO:__main__:step= 9000 loss=0.0002343 acc=0.9999390
INFO:__main__:step= 9100 loss=0.0000858 acc=1.0000000
INFO:__main__:step= 9200 loss=0.0001535 acc=1.0000000
INFO:__main__:step= 9300 loss=0.0001014 acc=1.0000000
INFO:__main__:step= 9400 loss=0.0000798 acc=1.0000000
INFO:__main__:step= 9500 loss=0.0001623 acc=1.0000000
INFO:__main__:step= 9600 loss=0.0000767 acc=1.0000000
INFO:__main__:step= 9700 loss=0.0002726 acc=0.9998779
INFO:__main__:step= 9800 loss=0.0001945 acc=0.9999390
INFO:__main__:step= 9900 loss=0.0002082 acc=0.9998779
INFO:__main__:step=10000 loss=0.0001320 acc=0.9999390
INFO:__main__:step=10100 loss=0.0002039 acc=0.9999390
INFO:__main__:step=10200 loss=0.0001236 acc=1.0000000
INFO:__main__:step=10300 loss=0.0001641 acc=0.9999390
INFO:__main__:step=10400 loss=0.0001063 acc=1.0000000
INFO:__main__:step=10500 loss=0.0001110 acc=1.0000000
INFO:__main__:step=10600 loss=0.0000836 acc=1.0000000
INFO:__main__:step=10700 loss=0.0001277 acc=0.9999390
INFO:__main__:step=10800 loss=0.0002018 acc=1.0000000
INFO:__main__:step=10900 loss=0.0001056 acc=1.0000000
INFO:__main__:step=11000 loss=0.0001680 acc=1.0000000
INFO:__main__:step=11100 loss=0.0001366 acc=0.9999390
INFO:__main__:step=11200 loss=0.0000372 acc=1.0000000
INFO:__main__:step=11300 loss=0.0001248 acc=0.9999390
INFO:__main__:step=11400 loss=0.0000712 acc=1.0000000
INFO:__main__:step=11500 loss=0.0001172 acc=1.0000000
INFO:__main__:step=11600 loss=0.0000921 acc=1.0000000
INFO:__main__:step=11700 loss=0.0000951 acc=1.0000000
INFO:__main__:step=11800 loss=0.0000610 acc=1.0000000
INFO:__main__:step=11900 loss=0.0000803 acc=1.0000000
INFO:__main__:step=12000 loss=0.0000788 acc=1.0000000
INFO:__main__:step=12100 loss=0.0001272 acc=1.0000000
INFO:__main__:step=12200 loss=0.0000690 acc=1.0000000
INFO:__main__:step=12300 loss=0.0001702 acc=1.0000000
INFO:__main__:step=12400 loss=0.0001313 acc=1.0000000
INFO:__main__:step=12500 loss=0.0000308 acc=1.0000000
INFO:__main__:step=12600 loss=0.0000845 acc=1.0000000
INFO:__main__:step=12700 loss=0.0000732 acc=1.0000000
INFO:__main__:step=12800 loss=0.0000183 acc=1.0000000
INFO:__main__:step=12900 loss=0.0000300 acc=1.0000000
INFO:__main__:step=13000 loss=0.0001123 acc=0.9999390
INFO:__main__:step=13100 loss=0.0000594 acc=1.0000000
INFO:__main__:step=13200 loss=0.0000668 acc=1.0000000
INFO:__main__:step=13300 loss=0.0000843 acc=0.9999390
INFO:__main__:step=13400 loss=0.0000407 acc=1.0000000
INFO:__main__:step=13500 loss=0.0000463 acc=1.0000000
INFO:__main__:step=13600 loss=0.0001134 acc=1.0000000
INFO:__main__:step=13700 loss=0.0000711 acc=1.0000000
INFO:__main__:step=13800 loss=0.0000646 acc=1.0000000
INFO:__main__:step=13900 loss=0.0000137 acc=1.0000000
INFO:__main__:step=14000 loss=0.0000803 acc=1.0000000
INFO:__main__:step=14100 loss=0.0001049 acc=1.0000000
INFO:__main__:step=14200 loss=0.0000583 acc=1.0000000
INFO:__main__:step=14300 loss=0.0000532 acc=1.0000000
INFO:__main__:step=14400 loss=0.0000281 acc=1.0000000
INFO:__main__:step=14500 loss=0.0000641 acc=1.0000000
INFO:__main__:step=14600 loss=0.0000408 acc=1.0000000
INFO:__main__:step=14700 loss=0.0000708 acc=1.0000000
INFO:__main__:step=14800 loss=0.0000410 acc=1.0000000
INFO:__main__:step=14900 loss=0.0000047 acc=1.0000000
INFO:__main__:step=15000 loss=0.0000676 acc=1.0000000
INFO:__main__:step=15100 loss=0.0001132 acc=0.9999390
INFO:__main__:step=15200 loss=0.0000244 acc=1.0000000
INFO:__main__:step=15300 loss=0.0000069 acc=1.0000000
INFO:__main__:step=15400 loss=0.0000572 acc=0.9999390
INFO:__main__:step=15500 loss=0.0001351 acc=1.0000000
INFO:__main__:step=15600 loss=0.0000896 acc=0.9999390
INFO:__main__:step=15700 loss=0.0000167 acc=1.0000000
INFO:__main__:step=15800 loss=0.0000382 acc=1.0000000
INFO:__main__:step=15900 loss=0.0000231 acc=1.0000000
INFO:__main__:step=16000 loss=0.0000428 acc=1.0000000
INFO:__main__:step=16100 loss=0.0000390 acc=1.0000000
INFO:__main__:step=16200 loss=0.0000236 acc=1.0000000
INFO:__main__:step=16300 loss=0.0001501 acc=0.9999390
INFO:__main__:step=16400 loss=0.0000269 acc=1.0000000
INFO:__main__:step=16500 loss=0.0000121 acc=1.0000000
INFO:__main__:step=16600 loss=0.0000089 acc=1.0000000
INFO:__main__:step=16700 loss=0.0000335 acc=1.0000000
INFO:__main__:step=16800 loss=0.0000302 acc=1.0000000
INFO:__main__:step=16900 loss=0.0000183 acc=1.0000000
INFO:__main__:step=17000 loss=0.0000311 acc=1.0000000
INFO:__main__:step=17100 loss=0.0000031 acc=1.0000000
INFO:__main__:step=17200 loss=0.0001091 acc=1.0000000
INFO:__main__:step=17300 loss=0.0000030 acc=1.0000000
INFO:__main__:step=17400 loss=0.0000742 acc=1.0000000
INFO:__main__:step=17500 loss=0.0000403 acc=1.0000000
INFO:__main__:step=17600 loss=0.0000163 acc=1.0000000
INFO:__main__:step=17700 loss=0.0000700 acc=1.0000000
INFO:__main__:step=17800 loss=0.0000477 acc=1.0000000
INFO:__main__:step=17900 loss=0.0000113 acc=1.0000000
INFO:__main__:step=18000 loss=0.0000013 acc=1.0000000
INFO:__main__:step=18100 loss=0.0000353 acc=1.0000000
INFO:__main__:step=18200 loss=0.0000010 acc=1.0000000
INFO:__main__:step=18300 loss=0.0000175 acc=1.0000000
INFO:__main__:step=18400 loss=0.0000156 acc=1.0000000
INFO:__main__:step=18500 loss=0.0000024 acc=1.0000000
INFO:__main__:step=18600 loss=0.0000125 acc=1.0000000
INFO:__main__:step=18700 loss=0.0001110 acc=0.9999390
INFO:__main__:step=18800 loss=0.0000066 acc=1.0000000
INFO:__main__:step=18900 loss=0.0000136 acc=1.0000000
INFO:__main__:step=19000 loss=0.0000629 acc=1.0000000
INFO:__main__:step=19100 loss=0.0000235 acc=1.0000000
INFO:__main__:step=19200 loss=0.0000301 acc=1.0000000
INFO:__main__:step=19300 loss=0.0000246 acc=1.0000000
INFO:__main__:step=19400 loss=0.0000824 acc=0.9999390
INFO:__main__:step=19500 loss=0.0000525 acc=1.0000000
INFO:__main__:step=19600 loss=0.0000315 acc=1.0000000
INFO:__main__:step=19700 loss=0.0000004 acc=1.0000000
INFO:__main__:step=19800 loss=0.0000337 acc=1.0000000
INFO:__main__:step=19900 loss=0.0000544 acc=1.0000000
INFO:__main__:step=20000 loss=0.0000134 acc=1.0000000
INFO:__main__:step=20100 loss=0.0000454 acc=1.0000000
INFO:__main__:step=20200 loss=0.0000668 acc=1.0000000
INFO:__main__:step=20300 loss=0.0000662 acc=1.0000000
INFO:__main__:step=20400 loss=0.0000337 acc=1.0000000
INFO:__main__:step=20500 loss=0.0000238 acc=1.0000000
INFO:__main__:step=20600 loss=0.0000206 acc=1.0000000
INFO:__main__:step=20700 loss=0.0000003 acc=1.0000000
INFO:__main__:step=20800 loss=0.0000557 acc=1.0000000
INFO:__main__:step=20900 loss=0.0000227 acc=1.0000000
INFO:__main__:step=21000 loss=0.0000002 acc=1.0000000
INFO:__main__:step=21100 loss=0.0000290 acc=1.0000000
INFO:__main__:step=21200 loss=0.0000373 acc=1.0000000
INFO:__main__:step=21300 loss=0.0000019 acc=1.0000000
INFO:__main__:step=21400 loss=0.0000635 acc=1.0000000
INFO:__main__:step=21500 loss=0.0000073 acc=1.0000000
INFO:__main__:step=21600 loss=0.0000388 acc=1.0000000
INFO:__main__:step=21700 loss=0.0000002 acc=1.0000000
INFO:__main__:step=21800 loss=0.0000169 acc=1.0000000
INFO:__main__:step=21900 loss=0.0000031 acc=1.0000000
INFO:__main__:step=22000 loss=0.0000075 acc=1.0000000
INFO:__main__:step=22100 loss=0.0000001 acc=1.0000000
INFO:__main__:step=22200 loss=0.0000096 acc=1.0000000
INFO:__main__:step=22300 loss=0.0000068 acc=1.0000000
INFO:__main__:step=22400 loss=0.0000303 acc=1.0000000
INFO:__main__:step=22500 loss=0.0000005 acc=1.0000000
INFO:__main__:step=22600 loss=0.0000111 acc=1.0000000
INFO:__main__:step=22700 loss=0.0000023 acc=1.0000000
INFO:__main__:step=22800 loss=0.0000003 acc=1.0000000
INFO:__main__:step=22900 loss=0.0000424 acc=1.0000000
INFO:__main__:step=23000 loss=0.0000186 acc=1.0000000
INFO:__main__:step=23100 loss=0.0000004 acc=1.0000000
INFO:__main__:step=23200 loss=0.0000085 acc=1.0000000
INFO:__main__:step=23300 loss=0.0000350 acc=1.0000000
INFO:__main__:step=23400 loss=0.0000005 acc=1.0000000
INFO:__main__:step=23500 loss=0.0000538 acc=0.9999390
INFO:__main__:step=23600 loss=0.0000021 acc=1.0000000
INFO:__main__:step=23700 loss=0.0000365 acc=1.0000000
INFO:__main__:step=23800 loss=0.0000281 acc=1.0000000
INFO:__main__:step=23900 loss=0.0000091 acc=1.0000000
INFO:__main__:step=24000 loss=0.0000045 acc=1.0000000
INFO:__main__:step=24100 loss=0.0000023 acc=1.0000000
INFO:__main__:step=24200 loss=0.0000197 acc=1.0000000
INFO:__main__:step=24300 loss=0.0000013 acc=1.0000000
INFO:__main__:step=24400 loss=0.0000174 acc=1.0000000
INFO:__main__:step=24500 loss=0.0000380 acc=1.0000000
INFO:__main__:step=24600 loss=0.0000105 acc=1.0000000
INFO:__main__:step=24700 loss=0.0000001 acc=1.0000000
INFO:__main__:step=24800 loss=0.0000193 acc=1.0000000
INFO:__main__:step=24900 loss=0.0000280 acc=1.0000000
"""
pattern = re.compile(r"step=\s*(\d+)\s+loss=([0-9.]+)\s+acc=([0-9.]+)")
rows = [(int(s), float(l), float(a)) for s, l, a in pattern.findall(text)]
df = pd.DataFrame(rows, columns=["step", "loss", "acc"]).sort_values("step").reset_index(drop=True)
# Avoid log(0) issues for loss plot by clamping at a tiny positive value
eps = 1e-10
df["loss_clamped"] = df["loss"].clip(lower=eps)
# Plot 1: Loss
plt.figure(figsize=(9, 4.8))
plt.plot(df["step"], df["loss_clamped"])
plt.yscale("log")
plt.xlabel("Step")
plt.ylabel("Loss (log scale)")
plt.title("Training Loss vs Step")
plt.tight_layout()
plt.savefig('./files/training_loss_v_step.png')
plt.show()
# Plot 2: Accuracy
df["err"] = (1.0 - df["acc"]).clip(lower=eps)
plt.figure(figsize=(9, 4.8))
plt.plot(df["step"], df["err"])
plt.yscale("log")
plt.xlabel("Step")
plt.ylabel("Error rate (1 - accuracy) (log scale)")
plt.title("Training Error Rate vs Step")
plt.tight_layout()
plt.savefig('./files/training_error_v_step.png')
plt.show()

View File

@@ -3,32 +3,35 @@ from torch import nn
# 2) Number "embedding" network: R -> R^d
class NumberEmbedder(nn.Module):
def __init__(self, d=8):
def __init__(self, d=2, hidden=4):
super().__init__()
self.net = nn.Sequential(
nn.Linear(1, 16),
nn.Linear(1, hidden),
nn.ReLU(),
nn.Linear(16, d),
nn.Linear(hidden, d),
)
def forward(self, x):
return self.net(x)
# 3) Comparator head: takes (ea, eb) -> logit for "a > b"
# MLP Comparator head: takes (ea, eb, e) -> logit for "a > b"
class PairwiseComparator(nn.Module):
def __init__(self, d=8):
def __init__(self, d=2, hidden=4, k=0.5):
super().__init__()
self.embed = NumberEmbedder(d)
self.log_k = nn.Parameter(torch.tensor([k]))
self.embed = NumberEmbedder(d, hidden)
self.head = nn.Sequential(
nn.Linear(2 * d + 1, 16),
nn.Linear(d, hidden),
nn.ReLU(),
nn.Linear(16, 1),
nn.Linear(hidden, hidden),
nn.ReLU(),
nn.Linear(hidden, 1),
)
def forward(self, a, b):
ea = self.embed(a)
eb = self.embed(b)
delta_ab = a - b
x = torch.cat([ea, eb, delta_ab], dim=-1)
# trying to force antisym here: h(a,b)=-h(b,a)
phi = self.head(self.embed(a-b))
phi_neg = self.head(self.embed(b-a))
logit = phi - phi_neg
return self.head(x) # logits
return (self.log_k ** 2) * logit

View File

@@ -2,27 +2,126 @@
# pairwise_compare.py
import logging
import random
import lzma
import torch
from torch import nn
from torch.nn import functional as F
import re
import pandas as pd
import matplotlib.pyplot as plt
import pairwise_comp_nn as comp_nn
# early pytorch device setup
DEVICE = torch.accelerator.current_accelerator() if torch.accelerator.is_available() else "cpu"
# Valves
DIMENSIONS = 1
TRAIN_STEPS = 25000
TRAIN_BATCHSZ = 16384
TRAIN_PROGRESS = 100
BATCH_LOWER = -512.0
BATCH_UPPER = 512.0
DO_VERBOSE_EARLY_TRAIN = False
DIMENSIONS = 2
HIDDEN_NEURONS = 4
ADAMW_LR = 5e-3
ADAMW_DECAY = 5e-4
TRAIN_STEPS = 2000
TRAIN_BATCHSZ = 8192
TRAIN_PROGRESS = 10
BATCH_LOWER = -100.0
BATCH_UPPER = 100.0
# Files
MODEL_PATH = "./files/pwcomp.model"
LOGGING_PATH = "./files/output.log"
EMBED_CHART_PATH = "./files/embedding_chart.png"
EMBEDDINGS_DATA_PATH = "./files/embedding_data.csv"
TRAINING_LOG_PATH = "./files/training.log.xz"
LOSS_CHART_PATH = "./files/training_loss_v_step.png"
ACC_CHART_PATH = "./files/training_error_v_step.png"
def get_torch_info():
# TODO: Move plotting into its own file
def parse_training_log(file_path: str) -> pd.DataFrame:
text: str = ""
with lzma.open(file_path, mode='rt') as f:
text = f.read()
pattern = re.compile(r"step=\s*(\d+)\s+loss=([0-9.]+)\s+acc=([0-9.]+)")
rows = [(int(s), float(l), float(a)) for s, l, a in pattern.findall(text)]
df = pd.DataFrame(rows, columns=["step", "loss", "acc"]).sort_values("step").reset_index(drop=True)
# Avoid log(0) issues for loss plot by clamping at a tiny positive value
eps = 1e-10
df["loss_clamped"] = df["loss"].clip(lower=eps)
return df
# TODO: Move plotting into its own file
def plt_loss_tstep(df: pd.DataFrame) -> None:
# Plot 1: Loss
plt.figure(figsize=(10, 6))
plt.plot(df["step"], df["loss_clamped"])
plt.yscale("log")
plt.xlabel("Step")
plt.ylabel("Loss (log scale)")
plt.title("Training Loss vs Step")
plt.tight_layout()
plt.savefig(LOSS_CHART_PATH)
plt.close()
return None
# TODO: Move plotting into its own file
def plt_acc_tstep(df: pd.DataFrame, eps=1e-10) -> None:
# Plot 2: Accuracy
df["err"] = (1.0 - df["acc"]).clip(lower=eps)
plt.figure(figsize=(10, 6))
plt.plot(df["step"], df["err"])
plt.yscale("log")
plt.xlabel("Step")
plt.ylabel("Error rate (1 - accuracy) (log scale)")
plt.title("Training Error Rate vs Step")
plt.tight_layout()
plt.savefig(ACC_CHART_PATH)
plt.close()
return None
# TODO: Move plotting into its own file
def plt_embeddings(model: comp_nn.PairwiseComparator) -> None:
import csv
log.info("Starting embeddings sweep...")
# samples for embedding mapping
with torch.no_grad():
xs = torch.arange(
BATCH_LOWER,
BATCH_UPPER + 1.0,
0.1,
).unsqueeze(1).to(DEVICE) # shape: (N, 1)
embeddings = model.embed(xs) # shape: (N, d)
# move data back to CPU for plotting
embeddings = embeddings.cpu()
xs = xs.cpu()
# Plot 3: x vs h(x)
plt.figure(figsize=(10, 6))
for i in range(embeddings.shape[1]):
plt.plot(xs.squeeze(), embeddings[:, i], label=f"dim {i}")
plt.title("x vs h(x)")
plt.xlabel("x [input]")
plt.ylabel("h(x) [embedding]")
plt.legend()
plt.savefig(EMBED_CHART_PATH)
plt.close()
# save all our embeddings data to csv
csv_data = list(zip(xs.squeeze().tolist(), embeddings.tolist()))
with open(file=EMBEDDINGS_DATA_PATH, mode="w", newline='') as f:
csv_file = csv.writer(f)
csv_file.writerows(csv_data)
return None
def get_torch_info() -> None:
log.info("PyTorch Version: %s", torch.__version__)
log.info("HIP Version: %s", torch.version.hip)
log.info("CUDA support: %s", torch.cuda.is_available())
@@ -32,60 +131,69 @@ def get_torch_info():
log.info("Using %s compute mode", DEVICE)
def set_seed(seed: int):
def set_seed(seed: int) -> None:
random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
# 1) Data: pairs (a, b) with label y = 1 if a > b else 0
def sample_batch(batch_size: int, low=BATCH_LOWER, high=BATCH_UPPER):
# pairs (a, b) with label y = 1 if a > b else 0 -> (a,b,y)
# uses epsi to select the window in which a == b for equality training
def sample_batch(batch_size: int, low=BATCH_LOWER, high=BATCH_UPPER, epsi=1e-4) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
a = (high - low) * torch.rand(batch_size, 1) + low
b = (high - low) * torch.rand(batch_size, 1) + low
# train for if a > b
y = (a > b).float()
epsi = 1e-4
y = torch.where(a > b + epsi, 1.0,
torch.where(a < b - epsi, 0.0, 0.5))
# removed but left for my notes; it seems training for equality hurts classifing results that are ~eq
# when trained only on "if a > b => y", the model produces more accurate results when classifing if things are equal (~.5 prob).
# eq = (a == b).float()
# y = gt + 0.5 * eq
return a, b, y
def training_entry():
get_torch_info()
# all prng seeds to 0 for deterministic outputs durring testing
# the seed should initialized normally otherwise
set_seed(0)
model = comp_nn.PairwiseComparator(d=DIMENSIONS).to(DEVICE)
opt = torch.optim.AdamW(model.parameters(), lr=9e-4, weight_decay=1e-3)
model = comp_nn.PairwiseComparator(d=DIMENSIONS, hidden=HIDDEN_NEURONS).to(DEVICE)
opt = torch.optim.AdamW(model.parameters(), lr=ADAMW_LR, weight_decay=ADAMW_DECAY)
# 4) Train
for step in range(TRAIN_STEPS):
a, b, y = sample_batch(TRAIN_BATCHSZ)
a, b, y = a.to(DEVICE), b.to(DEVICE), y.to(DEVICE)
log.info(f"Using {TRAINING_LOG_PATH} as the logging destination for training...")
with lzma.open(TRAINING_LOG_PATH, mode='wt') as tlog:
# training loop
training_start_time = datetime.datetime.now()
last_ack = datetime.datetime.now()
logits = model(a, b)
loss_fn = F.binary_cross_entropy_with_logits(logits, y)
for step in range(TRAIN_STEPS):
a, b, y = sample_batch(TRAIN_BATCHSZ)
a, b, y = a.to(DEVICE), b.to(DEVICE), y.to(DEVICE)
opt.zero_grad()
loss_fn.backward()
opt.step()
logits = model(a, b)
loss_fn = F.binary_cross_entropy_with_logits(logits, y)
opt.zero_grad()
loss_fn.backward()
opt.step()
if step <= TRAIN_PROGRESS and DO_VERBOSE_EARLY_TRAIN is True:
with torch.no_grad():
pred = (torch.sigmoid(logits) > 0.5).float()
acc = (pred == y).float().mean().item()
log.info(f"step={step:5d} loss={loss_fn.item():.7f} acc={acc:.7f}")
elif step % TRAIN_PROGRESS == 0:
with torch.no_grad():
pred = (torch.sigmoid(logits) > 0.5).float()
acc = (pred == y).float().mean().item()
log.info(f"step={step:5d} loss={loss_fn.item():.7f} acc={acc:.7f}")
tlog.write(f"step={step:5d} loss={loss_fn.item():.7f} acc={acc:.7f}\n")
# 5) Quick test: evaluate final model accuracy on fresh pairs
# also print to normal text log occasionally to show some activity.
# every 10 steps check if its been longer than 5 seconds since we've updated the user
if step % 10 == 0:
if (datetime.datetime.now() - last_ack).total_seconds() > 5:
log.info(f"still training... step={step} of {TRAIN_STEPS}")
last_ack = datetime.datetime.now()
training_end_time = datetime.datetime.now()
log.info(f"Training steps complete. Start time: {training_start_time} End time: {training_end_time}")
# evaluate final model accuracy on fresh pairs
with torch.no_grad():
a, b, y = sample_batch(TRAIN_BATCHSZ)
a, b, y = sample_batch(TRAIN_BATCHSZ*4)
a, b, y = a.to(DEVICE), b.to(DEVICE), y.to(DEVICE)
logits = model(a, b)
pred = (torch.sigmoid(logits) > 0.5).float()
@@ -94,12 +202,14 @@ def training_entry():
log.info(f"Final test acc: {acc} errors: {errors}")
# embed model dimensions into the model serialization
torch.save({"state_dict": model.state_dict(), "d": DIMENSIONS}, MODEL_PATH)
log.info("Saved PyTorch Model State to model.pth")
torch.save({"state_dict": model.state_dict(), "d": DIMENSIONS, "h": HIDDEN_NEURONS}, MODEL_PATH)
log.info(f"Saved PyTorch Model State to {MODEL_PATH}")
def infer_entry():
get_torch_info()
model_ckpt = torch.load(MODEL_PATH, map_location=DEVICE)
model = comp_nn.PairwiseComparator(d=model_ckpt["d"]).to(DEVICE)
model = comp_nn.PairwiseComparator(d=model_ckpt["d"], hidden=model_ckpt["h"]).to(DEVICE)
model.load_state_dict(model_ckpt["state_dict"])
model.eval()
@@ -115,42 +225,99 @@ def infer_entry():
with torch.no_grad():
probs = torch.sigmoid(model(a, b))
log.info(f"Output probabilities for {pairs.__len__()} pairs")
for (x, y), p in zip(pairs, probs):
log.info(f"P({x} > {y}) = {p.item():.3f}")
def graphs_entry():
get_torch_info()
model_ckpt = torch.load(MODEL_PATH, map_location=DEVICE)
model = comp_nn.PairwiseComparator(d=model_ckpt["d"], hidden=model_ckpt["h"]).to(DEVICE)
model.load_state_dict(model_ckpt["state_dict"])
model.eval()
plt_embeddings(model)
data = parse_training_log(TRAINING_LOG_PATH)
plt_loss_tstep(data)
plt_acc_tstep(data)
help_text = r"""
pairwise_compare.py — tiny pairwise "a > b?" neural comparator
USAGE
python3 pairwise_compare.py train
Train a PairwiseComparator on synthetic (a,b) pairs sampled uniformly from
[BATCH_LOWER, BATCH_UPPER]. Labels are:
1.0 if a > b + epsi
0.0 if a < b - epsi
0.5 otherwise (near-equality window)
Writes training metrics to:
./files/training.log.xz
Saves the trained model checkpoint to:
./files/pwcomp.model
python3 pairwise_compare.py infer
Load ./files/pwcomp.model and run inference on a built-in list of test pairs.
Prints probabilities as:
P(a > b) = sigmoid(model(a,b))
python3 pairwise_compare.py graphs
Load ./files/pwcomp.model and generate plots + exports:
./files/embedding_chart.png (embed(x) vs x for each embedding dimension)
./files/embedding_data.csv (x and embedding vectors)
./files/training_loss_v_step.png
./files/training_error_v_step.png (1 - acc, log scale)
Requires that ./files/training.log.xz exists (i.e., you ran "train" first).
FILES
./files/output.log General runtime log (info/errors)
./files/pwcomp.model Torch checkpoint: {"state_dict": ..., "d": DIMENSIONS}
./files/training.log.xz step/loss/acc trace used for plots
NOTES
- DEVICE is chosen via torch.accelerator if available, else CPU.
- Hyperparameters are controlled by the "Valves" constants near the top.
"""
if __name__ == '__main__':
import sys
import os
import datetime
# TODO: tidy up the paths to files and checking if the directory exists
if not os.path.exists("./files/"):
os.mkdir("./files")
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler(LOGGING_PATH),
logging.StreamHandler(stream=sys.stdout)
])
log = logging.getLogger(__name__)
logging.basicConfig(filename=LOGGING_PATH, level=logging.INFO)
log.info(f"Log file {LOGGING_PATH} opened {datetime.datetime.now()}")
log.info(f"Log opened {datetime.datetime.now()}")
get_torch_info()
name = os.path.basename(sys.argv[0])
if name == 'train.py':
training_entry()
elif name == 'infer.py':
infer_entry()
else:
# alt call patern
# python3 pairwise_compare.py train
# python3 pairwise_compare.py infer
if len(sys.argv) > 1:
mode = sys.argv[1].strip().lower()
if mode == "train":
# python3 pairwise_compare.py train
# python3 pairwise_compare.py infer
# python3 pairwise_compare.py graphs
if len(sys.argv) > 1:
match sys.argv[1].strip().lower():
case "train":
training_entry()
elif mode == "infer":
case "infer":
infer_entry()
else:
case "graphs":
graphs_entry()
case "help":
log.info(help_text)
case mode:
log.error(f"Unknown operation: {mode}")
log.error("Invalid call syntax, call script as \"train.py\" or \"infer.py\" or as pairwise_compare.py <mode> where mode is \"train\" or \"infer\"")
else:
log.error("Not enough arguments passed to script; call as train.py or infer.py or as pairwise_compare.py <mode> where mode is \"train\" or \"infer\"")
log.error("valid options are one of [\"train\", \"infer\", \"graphs\", \"help\"]")
log.info(help_text)
log.info(f"Log closed {datetime.datetime.now()}")

View File

@@ -1 +0,0 @@
pairwise_compare.py