Skip to content
Snippets Groups Projects
neural_network.py 1.33 KiB
Newer Older
Amit Parag's avatar
Amit Parag committed
import torch
import torch.nn as nn
from collections import OrderedDict
import numpy as np


class Model(nn.Module):
    def __init__(self,
                 hidden_layers_params:OrderedDict = None,ninput = 3,nhidden = 256):
Amit Parag's avatar
Amit Parag committed

        if hidden_layers_params is None:
            hidden_layers_params = OrderedDict([
                ('hidden layer 1', nn.Linear(in_features= ninput ,out_features=nhidden)),
                ('hidden layer 1 activation', nn.Tanh()),
                ('hidden layer 2:', nn.Linear(in_features=nhidden,out_features=nhidden)),
                ('hidden layer 2 activation:', nn.Tanh()),
                ('hidden layer 3:', nn.Linear(in_features=nhidden,out_features=1)),
            ])
        super(Model, self).__init__()
Amit Parag's avatar
Amit Parag committed
        
        self.hidden_layers = nn.Sequential(hidden_layers_params)

    def forward(self, inputs):
        out         = self.hidden_layers(inputs)
        return out



if __name__=='__main__':
    import torch.autograd.functional as F
    nx = 6 # 100
    x = torch.rand(nx,2)
    model = Model(ninput=2)
Amit Parag's avatar
Amit Parag committed
    dy_hat = torch.vstack( [ F.jacobian(model, state).squeeze() for state in x ] )
    d2y_hat = torch.stack( [ F.hessian(model, state).squeeze() for state in x ] )
    xx = torch.rand(nx,2,2)
Amit Parag's avatar
Amit Parag committed
    print(dy_hat.shape,d2y_hat.shape)
    mse = torch.nn.functional.mse_loss(d2y_hat,xx)