diff --git a/sobolev_grad.py b/sobolev_grad.py
index e34560b7885d332beef5698f54bfff3600d3142a..3dabcac36e90528b493f76b6364d348738f1212e 100644
--- a/sobolev_grad.py
+++ b/sobolev_grad.py
@@ -2,7 +2,8 @@
 
 import numpy as np
 import torch
-from neural_network import Model
+#from neural_network import Model
+from derivative_network import TanhDerivNet
 from datagen import dataGenerator
 import torch.autograd.functional as F
 import matplotlib.pyplot as plt
@@ -10,7 +11,8 @@ import matplotlib.pyplot as plt
 
 
 # ..............................................................................
-
+torch.manual_seed(0)
+np.random.seed(0)
 
 
 EPOCHS                = 50000                        # Number of Epochs
@@ -36,7 +38,8 @@ dataloader            = torch.utils.data.DataLoader(dataset, batch_size = number
                         shuffle=True, num_workers=4)
 
 
-network   = Model(ninput=X.shape[1])
+#network   = Model(ninput=X.shape[1])
+network = TanhDerivNet(ninput=X.shape[1])
 optimizer = torch.optim.Adam(params = network.parameters(), lr = lr)
 
 
@@ -47,62 +50,58 @@ epoch_loss_in_der1  = []
 epoch_loss_in_der2  = []
 
 
+floss = torch.nn.functional.mse_loss
 
 for epoch in range(EPOCHS):
     network.train()
     batch_loss_in_value = 0
     batch_loss_in_der1  = 0
-    batch_loss_in_der2  = 0
     for idx,(data) in enumerate(dataloader):
 
         x,y,dy,d2y = data
         
-        y_hat  = network(x)
-        
-        dy_hat  = torch.vstack( [ F.jacobian(network, state).squeeze() for state in x ] )   # Gradient of net
+        y_hat,dy_hat  = network(x)
+        dy_hat = dy_hat.squeeze()
+        #dy_hat  = torch.vstack( [ F.jacobian(network, state).squeeze() for state in x ] )   # Gradient of net
         #d2y_hat = torch.stack( [ F.hessian(network, state).squeeze() for state in x ] )     # Hessian of net
         
         
-        loss1   = torch.nn.functional.mse_loss(y_hat,y)
-        loss2   = torch.nn.functional.mse_loss(dy_hat, dy)
-        loss3   = 0#torch.nn.functional.mse_loss(d2y_hat, d2y) 
+        loss1   = floss(y_hat,y)
+        loss2   = floss(dy_hat, dy)
 
-        loss    = loss1 + 10*loss2 + loss3                            # Can add a sobolev factor to give weight to each loss term.
-                                                                   # But it does not really change anything     
+        loss    = loss1 + loss2                         # Can add a sobolev factor to give weight to each loss term.
+        #loss = loss2
+        
         optimizer.zero_grad()
         loss.backward()
         optimizer.step()
 
         batch_loss_in_value += loss1.item()
         batch_loss_in_der1  += loss2.item()
-        #batch_loss_in_der2  += loss3.item()
 
     epoch_loss_in_value.append( batch_loss_in_value / number_of_batches )
     epoch_loss_in_der1.append( batch_loss_in_der1 / number_of_batches )
-    #epoch_loss_in_der2.append( batch_loss_in_der2 / number_of_batches )
 
     
     if epoch % 10 == 0:
             print(f"EPOCH : {epoch}")
             print(f"Loss Values:  {loss1.item()}, Loss Grad : {loss2.item()}") #, Loss Hessian : {loss3.item()}")
-
+            #print(dy_hat-dy)
+            
 plt.ion()
 
-fig, (ax1, ax2, ax3) = plt.subplots(1,3)
+fig, (ax1, ax2, ax3) = plt.subplots(1,2)
 fig.suptitle(function_name.upper())
 
 ax1.semilogy(range(len(epoch_loss_in_value)), epoch_loss_in_value, c = "red")
 #ax2.semilogy(range(len(epoch_loss_in_der1)), epoch_loss_in_der1, c = "green")
-#ax3.semilogy(range(len(epoch_loss_in_der2)), epoch_loss_in_der2, c = "orange")
 
 ax1.set(title='Loss in Value')
 ax2.set(title='Loss in Gradient')
-ax3.set(title='Loss in Hessian')
 
 ax1.set_ylabel('Loss')
 ax1.set_xlabel('Epochs')
 ax2.set_xlabel('Epochs')
-ax3.set_xlabel('Epochs')
 
 
 
@@ -118,13 +117,16 @@ xplt = torch.tensor(LOAD['x'])
 yplt = torch.tensor(LOAD['y'])
 dyplt = torch.tensor(LOAD['dy'])
 
-ypred = network(xplt)
+ypred,dypred = network(xplt)
 
 plt.figure()
 plt.subplot(131)
 plt.scatter(xplt[:,0],xplt[:,1],c=yplt[:,0])
+plt.scatter(x[:,0],x[:,1],c=y[:,0].detach(),lw=1,s=200,edgecolor='k')
 plt.subplot(132)
 plt.scatter(xplt[:,0],xplt[:,1],c=ypred[:,0].detach())
+plt.scatter(x[:,0],x[:,1],c=y[:,0].detach(),lw=1,s=200,edgecolor='k')
 plt.subplot(133)
-plt.scatter(xplt[:,0],xplt[:,1],c=(ypred-yplt)[:,0].detach())
-plt.colorbar()
+plt.scatter(xplt[:,0],xplt[:,1],c=abs(ypred-yplt)[:,0].detach())
+plt.scatter(x[:,0],x[:,1],color= 'none',lw=2,s=200,edgecolor='w')
+#plt.colorbar()