Skip to content
Snippets Groups Projects
Commit 771bd3e9 authored by Nicolas Mansard's avatar Nicolas Mansard Committed by Nicolas Mansard
Browse files

Add input shape argument in the network.

parent 972e2f21
No related branches found
No related tags found
No related merge requests found
......@@ -6,15 +6,17 @@ import numpy as np
class Model(nn.Module):
def __init__(self,
hidden_layers_params:OrderedDict = OrderedDict([
('hidden layer 1', nn.Linear(in_features= 3 ,out_features=256)),
('hidden layer 1 activation', nn.Tanh()),
('hidden layer 2:', nn.Linear(in_features=256,out_features=256)),
('hidden layer 2 activation:', nn.Tanh()),
('hidden layer 3:', nn.Linear(in_features=256,out_features=1)),
])):
super(Model, self).__init__()
hidden_layers_params:OrderedDict = None,ninput = 3,nhidden = 256):
if hidden_layers_params is None:
hidden_layers_params = OrderedDict([
('hidden layer 1', nn.Linear(in_features= ninput ,out_features=nhidden)),
('hidden layer 1 activation', nn.Tanh()),
('hidden layer 2:', nn.Linear(in_features=nhidden,out_features=nhidden)),
('hidden layer 2 activation:', nn.Tanh()),
('hidden layer 3:', nn.Linear(in_features=nhidden,out_features=1)),
])
super(Model, self).__init__()
self.hidden_layers = nn.Sequential(hidden_layers_params)
......@@ -26,11 +28,12 @@ class Model(nn.Module):
if __name__=='__main__':
import torch.autograd.functional as F
x = torch.rand(100,3)
model = Model()
nx = 6 # 100
x = torch.rand(nx,2)
model = Model(ninput=2)
dy_hat = torch.vstack( [ F.jacobian(model, state).squeeze() for state in x ] )
d2y_hat = torch.stack( [ F.hessian(model, state).squeeze() for state in x ] )
xx = torch.rand(100,3,3)
xx = torch.rand(nx,2,2)
print(dy_hat.shape,d2y_hat.shape)
mse = torch.nn.functional.mse_loss(d2y_hat,xx)
print(mse)
\ No newline at end of file
print(mse)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment