Newer
Older
import torch
import torch.nn as nn
from collections import OrderedDict
import numpy as np
class Model(nn.Module):
def __init__(self,
hidden_layers_params:OrderedDict = None,ninput = 3,nhidden = 256):
if hidden_layers_params is None:
hidden_layers_params = OrderedDict([
('hidden layer 1', nn.Linear(in_features= ninput ,out_features=nhidden)),
('hidden layer 1 activation', nn.Tanh()),
('hidden layer 2:', nn.Linear(in_features=nhidden,out_features=nhidden)),
('hidden layer 2 activation:', nn.Tanh()),
('hidden layer 3:', nn.Linear(in_features=nhidden,out_features=1)),
])
super(Model, self).__init__()
self.hidden_layers = nn.Sequential(hidden_layers_params)
def forward(self, inputs):
out = self.hidden_layers(inputs)
return out
if __name__=='__main__':
import torch.autograd.functional as F
nx = 6 # 100
x = torch.rand(nx,2)
model = Model(ninput=2)
dy_hat = torch.vstack( [ F.jacobian(model, state).squeeze() for state in x ] )
d2y_hat = torch.stack( [ F.hessian(model, state).squeeze() for state in x ] )
print(dy_hat.shape,d2y_hat.shape)
mse = torch.nn.functional.mse_loss(d2y_hat,xx)