diff --git a/neural_network.py b/neural_network.py
index 0ae014810d96baf6482a8a8d1648cd048832c043..27573ae2766b7c2e51d8b7c5a5291ec6078e32fa 100644
--- a/neural_network.py
+++ b/neural_network.py
@@ -6,15 +6,17 @@ import numpy as np
 
 class Model(nn.Module):
     def __init__(self,
-        hidden_layers_params:OrderedDict = OrderedDict([
-        ('hidden layer 1', nn.Linear(in_features= 3 ,out_features=256)),
-        ('hidden layer 1 activation', nn.Tanh()),
-        ('hidden layer 2:', nn.Linear(in_features=256,out_features=256)),
-        ('hidden layer 2 activation:', nn.Tanh()),
-        ('hidden layer 3:', nn.Linear(in_features=256,out_features=1)),
-        ])):
-        super(Model, self).__init__()
+                 hidden_layers_params:OrderedDict = None,ninput = 3,nhidden = 256):
 
+        if hidden_layers_params is None:
+            hidden_layers_params = OrderedDict([
+                ('hidden layer 1', nn.Linear(in_features= ninput ,out_features=nhidden)),
+                ('hidden layer 1 activation', nn.Tanh()),
+                ('hidden layer 2:', nn.Linear(in_features=nhidden,out_features=nhidden)),
+                ('hidden layer 2 activation:', nn.Tanh()),
+                ('hidden layer 3:', nn.Linear(in_features=nhidden,out_features=1)),
+            ])
+        super(Model, self).__init__()
         
         self.hidden_layers = nn.Sequential(hidden_layers_params)
 
@@ -26,11 +28,12 @@ class Model(nn.Module):
 
 if __name__=='__main__':
     import torch.autograd.functional as F
-    x = torch.rand(100,3)
-    model = Model()
+    nx = 6 # 100
+    x = torch.rand(nx,2)
+    model = Model(ninput=2)
     dy_hat = torch.vstack( [ F.jacobian(model, state).squeeze() for state in x ] )
     d2y_hat = torch.stack( [ F.hessian(model, state).squeeze() for state in x ] )
-    xx = torch.rand(100,3,3)
+    xx = torch.rand(nx,2,2)
     print(dy_hat.shape,d2y_hat.shape)
     mse = torch.nn.functional.mse_loss(d2y_hat,xx)
-    print(mse)
\ No newline at end of file
+    print(mse)