Creating a PyTorch model to fit a curve

import torch
import matplotlib
matplotlib.use(‘TkAgg’)
import matplotlib.pyplot as plt
from torch import nn
import os
import numpy as np

从文件中加载数据

def load_data_from_file(file_path):
if not os.path.isfile(file_path):
raise FileNotFoundError(f"File {file_path} does not exist.")
data = np.loadtxt(file_path)
x = torch.tensor(data[:, 0], dtype=torch.float32) # 第一列
Ps = torch.tensor(data[:, 1], dtype=torch.float32) # 第二列
y = torch.tensor(data[:, 2], dtype=torch.float32) # 第三列
return x, Ps, y

加载数据

file_path = ‘D:/2-test/data.txt’ # 修改为实际路径
x, Ps, y = load_data_from_file(file_path)

定义NN模型,继承自nn.Module

class NeuralNetwork(nn.Module):
def init(self):
super(NeuralNetwork, self).init()
# 使用 nn.Parameter 定义待优化的参数
self.N0 = nn.Parameter(torch.tensor(1.0, dtype=torch.float32), requires_grad=True)
self.Pn0 = nn.Parameter(torch.tensor(1.0, dtype=torch.float32), requires_grad=True)
self.P1 = nn.Parameter(torch.tensor(1.0, dtype=torch.float32), requires_grad=True)
self.Pg0 = nn.Parameter(torch.tensor(1.0, dtype=torch.float32), requires_grad=True)
self.eta = nn.Parameter(torch.tensor(1.0, dtype=torch.float32), requires_grad=True)
self.Rn = torch.tensor(3.1e-10, dtype=torch.float32) # 固定值
self.Vv0 = nn.Parameter(torch.tensor(0.1, dtype=torch.float32), requires_grad=True) # 初始孔洞体积

def forward(self, Ps):
    dt = 1e-12
    # 计算成核率
    N_dot = torch.where(Ps > self.Pn0, self.N0 * torch.exp((Ps - self.Pn0) / self.P1), torch.tensor(0.0, dtype=torch.float32))
    # 计算孔洞成核体积
    Delta_Vn = (8 * torch.pi * N_dot * dt * self.Rn ** 3)
    # 计算孔洞生长体积
    Vg = self.Vv0 * 3 / 4 * (Ps - self.Pg0) / self.eta * dt
    # 总体积
    Vv = Vg + Delta_Vn
    return Vv

创建模型

model = NeuralNetwork()

定义损失函数和优化器

loss_fn = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

训练过程

batches = 5000
plt.figure(“regression”)

def calculate_rmse(predictions, targets):
return torch.sqrt(loss_fn(predictions, targets))

for i in range(batches):
Vv_pred = model(Ps)
loss = loss_fn(Vv_pred, y)
rmse = calculate_rmse(Vv_pred, y)

optimizer.zero_grad()
loss.backward()
optimizer.step()

if i % 100 == 0:
    loss_value = loss.item()
    rmse_value = rmse.item()
    print(f'loss: {loss_value} | RMSE: {rmse_value} | Batch: {i}')
    plt.cla()
    plt.plot(x.numpy(), y.numpy(), label='Actual')
    plt.plot(x.numpy(), Vv_pred.detach().numpy(), label='Predicted')
    plt.legend()
    plt.draw()
    plt.pause(0.01)

关闭交互模式

plt.ioff()
plt.cla()
plt.plot(x.numpy(), y.numpy(), label=‘Actual’)
plt.plot(x.numpy(), Vv_pred.detach().numpy(), label=‘Predicted’)
plt.legend()
plt.xlabel(‘x’)
plt.ylabel(‘y’)
plt.title(‘Final Regression Result’)
plt.savefig(“final_plot.png”)
plt.show()

打印模型参数

print(“N0:”, model.N0.item())
print(“Pn0:”, model.Pn0.item())
print(“P1:”, model.P1.item())
print(“Pg0:”, model.Pg0.item())
print(“eta:”, model.eta.item())
But this code doesn’t optimize the parameters, here’s the result:
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 2300
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 2400
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 2500
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 2600
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 2700
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 2800
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 2900
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 3000
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 3100
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 3200
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 3300
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 3400
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 3500
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 3600
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 3700
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 3800
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 3900
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 4000
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 4100
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 4200
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 4300
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 4400
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 4500
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 4600
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 4700
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 4800
loss: 0.10101878643035889 | RMSE: 0.3178345263004303 | Batch: 4900
N0: 1.0
Pn0: 1.0
P1: 1.0
Pg0: 1.0
eta: 1.0

进程已结束,退出代码 0

I would like to know how I can modify to optimize these parameters

Check if any trainable parameters have a valid gradient after the backward pass as it seems your training is stuck.

Below is a partial gradient:Batch 9000
N0: 1.0 | grad: -4.91339694994675e-34
Pn0: 1000000.0 | grad: 4.9133966275359996e-39
P1: 100000.0 | grad: 2.1853009929108123e-38
Pg0: 1000000.0 | grad: 1.487217987650169e-10
eta: 0.06056000292301178 | grad: -0.0023958131205290556
Vv0: -0.00014509086031466722 | grad: -0.9999971985816956
loss: 1.1067631244659424 | RMSE: 1.0520280599594116 | Batch: 9000
Batch 9100
N0: 1.0 | grad: -4.923352376907757e-34
Pn0: 1000000.0 | grad: 4.9233528531250274e-39
P1: 100000.0 | grad: 2.189718165930057e-38
Pg0: 1000000.0 | grad: 1.4765616507261825e-10
eta: 0.060560375452041626 | grad: -0.0023792905267328024
Vv0: -0.00014409114373847842 | grad: -0.9999971389770508
loss: 1.087465524673462 | RMSE: 1.042816162109375 | Batch: 9100
Batch 9200
N0: 1.0 | grad: -4.933487801441234e-34
Pn0: 1000000.0 | grad: 4.9334870436190245e-39
P1: 100000.0 | grad: 2.1942153530916146e-38
Pg0: 1000000.0 | grad: 1.4659039260234152e-10
eta: 0.06056074798107147 | grad: -0.0023627683985978365
Vv0: -0.00014309142716228962 | grad: -0.9999972581863403
loss: 1.0683401823043823 | RMSE: 1.0336054563522339 | Batch: 9200
Batch 9300
N0: 1.0 | grad: -4.9438073561445075e-34
Pn0: 1000000.0 | grad: 4.9438062055103125e-39
P1: 100000.0 | grad: 2.1987939556939495e-38
Pg0: 1000000.0 | grad: 1.455244397208233e-10
eta: 0.06056112051010132 | grad: -0.0023462465032935143
Vv0: -0.00014209171058610082 | grad: -0.9999972581863403
loss: 1.0493865013122559 | RMSE: 1.0243957042694092 | Batch: 9300
Batch 9400
N0: 1.0 | grad: -4.9543137960824625e-34
Pn0: 1000000.0 | grad: 4.954314542694284e-39
P1: 100000.0 | grad: 2.203456355944451e-38
Pg0: 1000000.0 | grad: 1.4445832030585137e-10
eta: 0.060561493039131165 | grad: -0.0023297248408198357
Vv0: -0.00014109199400991201 | grad: -0.9999972581863403
loss: 1.030604600906372 | RMSE: 1.0151870250701904 | Batch: 9400
Batch 9500
N0: 1.0 | grad: -4.965017223159676e-34
Pn0: 1000000.0 | grad: 4.965016259066333e-39
P1: 100000.0 | grad: 2.208205076180355e-38
Pg0: 1000000.0 | grad: 1.4339203435742576e-10
eta: 0.06056186556816101 | grad: -0.0023132034111768007
Vv0: -0.0001400922774337232 | grad: -0.9999973177909851
loss: 1.0119946002960205 | RMSE: 1.0059794187545776 | Batch: 9500
Batch 9600
N0: 1.0 | grad: -4.975917178198668e-34
Pn0: 1000000.0 | grad: 4.97591836111878e-39
P1: 100000.0 | grad: 2.2130417979598185e-38
Pg0: 1000000.0 | grad: 1.4232554024218302e-10
eta: 0.06056223809719086 | grad: -0.002296681981533766
Vv0: -0.0001390925608575344 | grad: -0.9999973177909851
loss: 0.9935567378997803 | RMSE: 0.9967731833457947 | Batch: 9600
Batch 9700
N0: 1.0 | grad: -4.987025140636458e-34
Pn0: 1000000.0 | grad: 4.9870250527470185e-39
P1: 100000.0 | grad: 2.2179703047886953e-38
Pg0: 1000000.0 | grad: 1.412588657156988e-10
eta: 0.0605626106262207 | grad: -0.002280161017552018
Vv0: -0.0001380928442813456 | grad: -0.9999974370002747
loss: 0.9752906560897827 | RMSE: 0.9875680804252625 | Batch: 9700
Batch 9800
N0: 1.0 | grad: -4.998342028828007e-34
Pn0: 1000000.0 | grad: 4.99834334044337e-39
P1: 100000.0 | grad: 2.2229918578356033e-38
Pg0: 1000000.0 | grad: 1.4019198302239744e-10
eta: 0.06056298315525055 | grad: -0.002263639820739627
Vv0: -0.0001370931277051568 | grad: -0.9999974966049194
loss: 0.9571964144706726 | RMSE: 0.9783641695976257 | Batch: 9800
Batch 9900
N0: 1.0 | grad: -5.009877485500413e-34
Pn0: 1000000.0 | grad: 5.009878829401692e-39
P1: 100000.0 | grad: 2.2281099603467033e-38
Pg0: 1000000.0 | grad: 1.3912490604006678e-10
eta: 0.060563355684280396 | grad: -0.0022471188567578793
Vv0: -0.000136093411128968 | grad: -0.9999974370002747
loss: 0.9392740726470947 | RMSE: 0.9691615104675293 | Batch: 9900