make the defaults more sane for the task
its still overkill
This commit is contained in:
@@ -3,7 +3,7 @@ from torch import nn
|
|||||||
|
|
||||||
# 2) Number "embedding" network: R -> R^d
|
# 2) Number "embedding" network: R -> R^d
|
||||||
class NumberEmbedder(nn.Module):
|
class NumberEmbedder(nn.Module):
|
||||||
def __init__(self, d=4, hidden=16):
|
def __init__(self, d=2, hidden=4):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.net = nn.Sequential(
|
self.net = nn.Sequential(
|
||||||
nn.Linear(1, hidden),
|
nn.Linear(1, hidden),
|
||||||
@@ -16,7 +16,7 @@ class NumberEmbedder(nn.Module):
|
|||||||
|
|
||||||
# 3) Comparator head: takes (ea, eb, e) -> logit for "a > b"
|
# 3) Comparator head: takes (ea, eb, e) -> logit for "a > b"
|
||||||
class PairwiseComparator(nn.Module):
|
class PairwiseComparator(nn.Module):
|
||||||
def __init__(self, d=4, hidden=16, k=0.5):
|
def __init__(self, d=2, hidden=4, k=0.5):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.log_k = nn.Parameter(torch.tensor([k]))
|
self.log_k = nn.Parameter(torch.tensor([k]))
|
||||||
self.embed = NumberEmbedder(d, hidden)
|
self.embed = NumberEmbedder(d, hidden)
|
||||||
|
|||||||
Reference in New Issue
Block a user