Ritz方法介绍
泊松方程引入
− Δ u = f , x ∈ Ω = ( 0 , 1 ) 2 -\Delta u=f,x \in \Omega=(0,1)^2 −Δu=f,x∈Ω=(0,1)2
u = g , x ∈ ∂ Ω u=g,x \in \partial \Omega u=g,x∈∂Ω
Ritz原理
原来问题等价于优化:
l o s s = 0.5 ∗ ∮ Ω ∇ u ⋅ ∇ u − 2 ∗ f u d x d y loss = 0.5*\oint_{\Omega} \nabla u \cdot \nabla u – 2*fu dxdy loss=0.5∗∮Ω∇u⋅∇u−2∗fudxdy
s . t u = g , x ∈ ∂ Ω s.t\quad u=g,x \in \partial \Omega s.tu=g,x∈∂Ω
转换原理
− ∇ u = τ , x ∈ Ω = ( 0 , 1 ) 2 -\nabla u=\tau,x \in \Omega=(0,1)^2 −∇u=τ,x∈Ω=(0,1)2
∇ τ = f , x ∈ Ω = ( 0 , 1 ) 2 \nabla \tau=f,x \in \Omega=(0,1)^2 ∇τ=f,x∈Ω=(0,1)2
u = g , x ∈ ∂ Ω u=g,x \in \partial \Omega u=g,x∈∂Ω
近似解
u = n e t f . f o r w a r d ( x ) ∗ l e n t h . f o r w a r d ( x ) + n e t g . f o r w a r d ( x ) u = netf.forward(x)*lenth.forward(x)+netg.forward(x) u=netf.forward(x)∗lenth.forward(x)+netg.forward(x)
很明显这个和原来的泊松方程等价,接下来展示代码,在这里本人为了两种方法能够简单转换,在代码设计上做了一个小技巧。
当使用ritz的时候,损失函数定义如下:
当使用转换原理的时候,损失函数定义如下:
上面这种网络的做法就可以在一个代码上,只需要修改损失函数的定义,就可以直接切换两种方法,十分省事
基础DRM
import torch
import time
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
def UU(X, order,prob):
if prob==1:
temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
if order[0]==0 and order[1]==0:
return torch.log(temp)
if order[0]==1 and order[1]==0:
return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
if order[0]==0 and order[1]==1:
return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
if order[0]==2 and order[1]==0:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if order[0]==1 and order[1]==1:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
* (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
+ temp**(-1) * (18)
if order[0]==0 and order[1]==2:
return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if prob==2:
if order[0]==0 and order[1]==0:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==1 and order[1]==0:
return (3*X[:,0]*X[:,0]-1) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==0 and order[1]==1:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))
if order[0]==2 and order[1]==0:
return (6*X[:,0]) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==1 and order[1]==1:
return (3*X[:,0]*X[:,0]-1) * \
(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))
if order[0]==0 and order[1]==2:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
2*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if prob==3:
temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]
temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1
if order[0]==0 and order[1]==0:
return temp1 * temp2**(-1)
if order[0]==1 and order[1]==0:
return (2*X[:,0]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,0])
if order[0]==0 and order[1]==1:
return (-2*X[:,1]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,1])
if order[0]==2 and order[1]==0:
return (2) * temp2**(-1) + \
2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if order[0]==1 and order[1]==1:
return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
(-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])
if order[0]==0 and order[1]==2:
return (-2) * temp2**(-1) + \
2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if prob==4:
temp = torch.exp(-4*X[:,1]*X[:,1])
if order[0]==0 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * temp + \
(1-ind) * (-(-X[:,0]+1)**4+1) * temp
if order[0]==1 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * (4*(X[:,0]+1)**3) * temp + \
(1-ind) * (4*(-X[:,0]+1)**3) * temp
if order[0]==0 and order[1]==1:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * (temp*(-8*X[:,1])) + \
(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(-8*X[:,1]))
if order[0]==2 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * (12*(X[:,0]+1)**2) * temp + \
(1-ind) * (-12*(-X[:,0]+1)**2) * temp
if order[0]==1 and order[1]==1:
ind = (X[:,0]<=0).float()
return ind * (4*(X[:,0]+1)**3) * (temp*(-8*X[:,1])) + \
(1-ind) * (4*(-X[:,0]+1)**3) * (temp*(-8*X[:,1]))
if order[0]==0 and order[1]==2:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * (temp*(64*X[:,1]*X[:,1]-8)) + \
(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(64*X[:,1]*X[:,1]-8))
def FF(X,prob):
return -UU(X,[2,0],prob) - UU(X,[0,2],prob)
#函数定义修改完成
class INSET():
def __init__(self,bound,nx,prob):
self.dim = 2
self.area = (bound[0,1] - bound[0,0])*(bound[1,1] - bound[1,0])
self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
self.size = nx[0]*nx[1]
self.X = torch.zeros(self.size,self.dim)#储存内点
for j in range(nx[1]):
for i in range(nx[0]):
self.X[j*nx[0] + i,0] = bound[0,0] + (i + 0.5)*self.hx[0]
self.X[j*nx[0] + i,1] = bound[1,0] + (j + 0.5)*self.hx[1]
self.u_acc = UU(self.X,[0,0],prob).view(-1,1)#储存内点精确解
self.right = FF(self.X,prob).view(-1,1)# - nabla A \nabla u = -c
class BDSET():#边界点取值
def __init__(self,bound,nx,prob):
self.dim = 2
self.DS = 2*(nx[0] + nx[1])
self.Dlenth = 2*(bound[0,1] - bound[0,0]) + 2*(bound[1,1] - bound[1,0])
self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
self.X = torch.zeros(self.DS,self.dim)#储存内点
m = 0
for i in range(nx[0]):
self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
self.X[m,1] = bound[1,0]
m = m + 1
for j in range(nx[1]):
self.X[m,0] = bound[0,1]
self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
m = m + 1
for i in range(nx[0]):
self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
self.X[m,1] = bound[1,1]
m = m + 1
for j in range(nx[1]):
self.X[m,0] = bound[0,0]
self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
m = m + 1
self.Dright = UU(self.X,[0,0],prob).view(-1,1)
class TESET():
def __init__(self,bound,nx,prob):
self.dim = 2
self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
M,N = nx[0] + 1,nx[1] + 1
self.size = M*N
self.X = torch.zeros(self.size,self.dim)#储存求解区域所有网格点,包括边界点
for j in range(N):
for i in range(M):
self.X[j*M + i,0] = bound[0,0] + i*self.hx[0]
self.X[j*M + i,1] = bound[1,0] + j*self.hx[1]
self.u_acc = UU(self.X,[0,0],prob).view(-1,1)#储存求解区域网格点对应精确解
#数据集修改完成
np.random.seed(1234)
torch.manual_seed(1234)
class SIN(nn.Module):#u = netg*lenthfactor + netf,此为netg网络所用的激活函数
def __init__(self,order):
super(SIN,self).__init__()
self.e = order
def forward(self,x):
return torch.sin(x)**self.e
class Res(nn.Module):
def __init__(self,input_size,output_size):
super(Res,self).__init__()
self.model = nn.Sequential(
nn.Linear(input_size,output_size),
SIN(1)
)
self.input = input_size
self.output = output_size
def forward(self,x):
x = self.model(x) + x@torch.eye(x.size(1),self.output)#模拟残差网络
return x
class NETF(nn.Module):#u = netg*lenthfactor + netf,此为netg,此netg逼近内部点取值
def __init__(self):
super(NETF,self).__init__()
self.model = nn.Sequential(
Res(2,10),
Res(10,10),
Res(10,10)
)
self.fc = torch.nn.Linear(10,1)
def forward(self,x):
out = self.model(x)
return self.fc(out)
def pred(netf,X):
return netf.forward(X)
def error(u_pred, u_acc):
#return (((u_pred-u_acc)**2).sum()/(u_acc**2).sum()) ** (0.5)
return (((u_pred-u_acc)**2).mean()) ** (0.5)
# ----------------------------------------------------------------------------------------------------
def Lossf(netf,inset,bdset):
inset.X.requires_grad = True
insetF = netf.forward(inset.X)
insetFx, = torch.autograd.grad(insetF, inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1))
u_in = insetF#inset.G为netg在inset.X上取值,后面训练时提供,此举为加快迭代速度
ux = insetFx#复合函数求导,提高迭代效率
out_in = (ux[:,0:1]**2 + ux[:,1:2]**2 - 2*inset.right*u_in).mean()
ub = netf.forward(bdset.X)
out_b = bdset.Dlenth * ((ub - bdset.Dright)**2).mean()
beta = 5e2
return out_in + beta*out_b
# Train neural network f
def Trainf(netf, inset, bdset, optimf, epochf):
print('train neural network f')
ERROR,BUZHOU = [],[]
lossf = Lossf(netf,inset,bdset)
lossoptimal = lossf
trainerror = error(netf.forward(inset.X), inset.u_acc)
print('epoch: %d, loss: %.3e, trainerror: %.3e'
%(0, lossf.item(), trainerror.item()))
torch.save(netf.state_dict(),'best_netf.pkl')
cycle = 100
for i in range(epochf):
st = time.time()
''' for j in range(cycle): optimf.zero_grad() lossf = Lossf(netf,inset) lossf.backward() optimf.step() '''
def closure():
optimf.zero_grad()
lossf = Lossf(netf,inset,bdset)
lossf.backward()
return lossf
optimf.step(closure)
lossf = Lossf(netf,inset,bdset)
if lossf < lossoptimal:
lossoptimal = lossf
torch.save(netf.state_dict(),'best_netf.pkl')
ela = time.time() - st
trainerror = error(netf.forward(inset.X), inset.u_acc)
ERROR.append(trainerror.item())
BUZHOU.append((i + 1)*cycle)
print('epoch:%d,lossf:%.3e,train error:%.3e,time:%.2f'%
((i + 1)*cycle,lossf.item(),trainerror,ela))
return ERROR,BUZHOU
def main():
# Configurations
prob = 1
bounds = torch.Tensor([0.0,1.0,0.0,1.0]).reshape(2,2)
nx_tr = [40,40]#训练集剖分
nx_te = [100,100]#测试集剖分
epochf = 15
lr = 1e-2
tests_num = 1
# ------------------------------------------------------------------------------------------------
testerror = torch.zeros(tests_num)
for it in range(tests_num):
inset = INSET(bounds, nx_tr, prob)
bdset = BDSET(bounds, nx_tr, prob)
teset = TESET(bounds, nx_te, prob)
netf = NETF()
#optimg = torch.optim.Adam(netg.parameters(), lr=lr)
#optimf = torch.optim.Adam(netf.parameters(), lr=lr)
optimf = torch.optim.LBFGS(netf.parameters(), lr=lr,max_iter = 100,history_size=2500,
line_search_fn = 'strong_wolfe')
start_time = time.time()
ERROR,BUZHOU = Trainf(netf, inset, bdset, optimf, epochf)
#print(ERROR,BUZHOU)
elapsed = time.time() - start_time
print('Train time: %.2f' %(elapsed))
netf.load_state_dict(torch.load('best_netf.pkl'))
te_U = pred(netf,teset.X)
testerror[it] = error(te_U, teset.u_acc)
print('testerror = %.3e\n' %(testerror[it].item()))
x_train = np.linspace(bounds[0,0],bounds[0,1],nx_te[0] + 1)
y_train = np.linspace(bounds[1,0],bounds[1,1],nx_te[1] + 1)
x,y = np.meshgrid(x_train,y_train)
plt.contourf(x,y,(te_U - teset.u_acc).detach().numpy().reshape(nx_te[0] + 1,nx_te[1] + 1).T,40,cmap = 'rainbow')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y')
plt.title('the PINN error at grid = %d * %d'%(nx_te[1] ,nx_te[0]))
plt.savefig('pinn.png')
print(testerror.data)
testerror_mean = testerror.mean()
testerror_std = testerror.std()
print('testerror_mean = %.3e, testerror_std = %.3e'
%(testerror_mean.item(),testerror_std.item()))
if __name__ == '__main__':
main()
代码和实验结果
import numpy as np
import matplotlib.pyplot as plt
import time
import torch
import torch.nn as nn
def UU(X, order,prob):
if prob==1:
temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
if order[0]==0 and order[1]==0:
return torch.log(temp)
if order[0]==1 and order[1]==0:
return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
if order[0]==0 and order[1]==1:
return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
if order[0]==2 and order[1]==0:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if order[0]==1 and order[1]==1:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
* (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
+ temp**(-1) * (18)
if order[0]==0 and order[1]==2:
return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if prob==2:
temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]
temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1
if order[0]==0 and order[1]==0:
return temp1 * temp2**(-1)
if order[0]==1 and order[1]==0:
return (2*X[:,0]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,0])
if order[0]==0 and order[1]==1:
return (-2*X[:,1]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,1])
if order[0]==2 and order[1]==0:
return (2) * temp2**(-1) + \
2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if order[0]==1 and order[1]==1:
return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
(-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])
if order[0]==0 and order[1]==2:
return (-2) * temp2**(-1) + \
2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if prob==3:
temp = torch.exp(-4*X[:,1]*X[:,1])
if order[0]==0 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * temp + \
(1-ind) * (-(-X[:,0]+1)**4+1) * temp
if order[0]==1 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * (4*(X[:,0]+1)**3) * temp + \
(1-ind) * (4*(-X[:,0]+1)**3) * temp
if order[0]==0 and order[1]==1:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * (temp*(-8*X[:,1])) + \
(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(-8*X[:,1]))
if order[0]==2 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * (12*(X[:,0]+1)**2) * temp + \
(1-ind) * (-12*(-X[:,0]+1)**2) * temp
if order[0]==1 and order[1]==1:
ind = (X[:,0]<=0).float()
return ind * (4*(X[:,0]+1)**3) * (temp*(-8*X[:,1])) + \
(1-ind) * (4*(-X[:,0]+1)**3) * (temp*(-8*X[:,1]))
if order[0]==0 and order[1]==2:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * (temp*(64*X[:,1]*X[:,1]-8)) + \
(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(64*X[:,1]*X[:,1]-8))
def FF(X,prob):
return -UU(X,[2,0],prob) - UU(X,[0,2],prob)
class INSET():#边界点取值
def __init__(self,bound,nx,prob):
self.dim = 2
#self.area = (bound[0,1] - bound[0,0])*(bound[1,1] - bound[1,0])
self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
self.size = nx[0]*nx[1]
self.X = torch.zeros(self.size,self.dim)#储存内点
self.nx = nx
for i in range(nx[0]):
for j in range(nx[1]):
self.X[i*nx[1] + j,0] = bound[0,0] + (i + 0.5)*self.hx[0]
self.X[i*nx[1] + j,1] = bound[1,0] + (j + 0.5)*self.hx[1]
self.u_acc = UU(self.X,[0,0],prob).reshape(-1,1)
self.right = FF(self.X,prob).reshape(-1,1)
#plt.scatter(self.X[:,0],self.X[:,1])
class BDSET():#边界点取值
def __init__(self,bound,nx,prob):
self.dim = 2
#self.area = (bound[0,1] - bound[0,0])*(bound[1,1] - bound[1,0])
self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
self.size = 2*(nx[0] + nx[1])
self.X = torch.zeros(self.size,self.dim)#储存内点
m = 0
for i in range(nx[0]):
self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
self.X[m,1] = bound[1,0]
m = m + 1
for j in range(nx[1]):
self.X[m,0] = bound[0,1]
self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
m = m + 1
for i in range(nx[0]):
self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
self.X[m,1] = bound[1,1]
m = m + 1
for j in range(nx[1]):
self.X[m,0] = bound[0,0]
self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
m = m + 1
#plt.scatter(self.X[:,0],self.X[:,1])
self.u_acc = UU(self.X,[0,0],prob).view(-1,1)#储存内点精确解
class TESET():
def __init__(self, bound, nx,prob):
self.bound = bound
self.nx = nx
self.hx = [(self.bound[0,1]-self.bound[0,0])/self.nx[0],
(self.bound[1,1]-self.bound[1,0])/self.nx[1]]
self.prob = prob
self.size = (self.nx[0] + 1)*(self.nx[1] + 1)
self.X = torch.zeros(self.size,2)
m = 0
for i in range(self.nx[0] + 1):
for j in range(self.nx[1] + 1):
self.X[m,0] = self.bound[0,0] + i*self.hx[0]
self.X[m,1] = self.bound[1,0] + j*self.hx[1]
m = m + 1
#plt.scatter(self.X[:,0],self.X[:,1])
self.u_acc = UU(self.X,[0,0],prob).reshape(self.size,1)
class LEN():
def __init__(self,bound,mu):
self.mu = mu
self.bound = bound
def forward(self,X):
L = 1.0
if X.ndim == 2:#如果是边界点,存储方式为[m,2]
for i in range(2):
L = L*(1 - (1 - (X[:,i] - self.bound[i,0]))**self.mu)
L = L*(1 - (1 - (self.bound[i,1] - X[:,i]))**self.mu)
return L.view(-1,1)
elif X.ndim == 3:#如果是内点,存储方式为[m,4,2],m表示内网格点数目
for i in range(2):
L = L*(1 - (1 - (X[:,:,i] - self.bound[i,0]))**self.mu)
L = L*(1 - (1 - (self.bound[i,1] - X[:,:,i]))**self.mu)
return L.view(X.shape[0],X.shape[1],1)
bound = torch.tensor([[0,1],[0,1]]).float()
nx = [10,10]
nx_te = [40,40]
prob = 2
mu = 3
lr = 1e-3
inset = INSET(bound,nx,prob)
bdset = BDSET(bound,nx,prob)
teset = TESET(bound,nx_te,prob)
print(inset.u_acc.shape)
np.random.seed(1234)
torch.manual_seed(1234)
class NETG(nn.Module):#u = netf*lenthfactor + netg,此为netg
def __init__(self):
super(NETG,self).__init__()
self.fc1 = torch.nn.Linear(2,10)
self.fc2 = torch.nn.Linear(10,10)
self.fc3 = torch.nn.Linear(10,1)
def forward(self,x):
out = torch.sin(self.fc1(x)) + x@torch.eye(x.size(-1),10)
out = torch.sin(self.fc2(out)) + out@torch.eye(out.size(-1),10)
#注意这个是x.size(-1),表示当BDSET,或者TESET的样本点输入的时候,取的是[m,2]的2,如果是INSET的样本点输入,取得是[m,4,2]的2
return self.fc3(out)
class SIN(nn.Module):#u = netg*lenthfactor + netf,此为netg网络所用的激活函数
def __init__(self,order):
super(SIN,self).__init__()
self.e = order
def forward(self,x):
return torch.sin(x)**self.e
class Res(nn.Module):
def __init__(self,input_size,output_size):
super(Res,self).__init__()
self.model = nn.Sequential(
nn.Linear(input_size,output_size),
SIN(1),
nn.Linear(output_size,output_size),
SIN(1)
)
self.input = input_size
self.output = output_size
def forward(self,x):
out = self.model(x) + x@torch.eye(x.size(-1),self.output)#模拟残差网络
#注意这个是x.size(-1),表示当BDSET,或者TESET的样本点输入的时候,取的是[m,2]的2,如果是INSET的样本点输入,取得是[m,4,2]的2
return out
class NETF(nn.Module):#u = netg*lenthfactor + netf,此为netg,此netg逼近内部点取值
def __init__(self):
super(NETF,self).__init__()
self.model = nn.Sequential(
Res(2,10),
Res(10,20),
Res(20,10)
)
self.mod = nn.Sequential(
Res(2,10),
Res(10,10),
Res(10,20),
)
self.fc = torch.nn.Linear(10,1)
self.fcm = torch.nn.Linear(20,2)
def forward(self,x):
out = self.model(x)
return self.fc(out)
def backward(self,x):
out = self.mod(x)
return self.fcm(out)
def pred(netg,netf,lenth,X):
return netg.forward(X) + netf.forward(X)*lenth.forward(X)
def error(u_pred, u_acc):
return max(abs(u_pred.reshape(-1,1) - u_acc.reshape(-1,1)))
#------------------------
def Lossg(netg,bdset):#拟合Dirichlet边界,这个就是简单的边界损失函数
ub = netg.forward(bdset.X)
return ((ub - bdset.u_acc)**2).mean()
def Traing(netg, bdset, optimg, epochg):
print('train neural network g')
lossg = Lossg(netg,bdset)
lossbest = lossg
print('epoch:%d,lossf:%.3e'%(0,lossg.item()))
torch.save(netg.state_dict(),'best_netg.pkl')
cycle = 100
for i in range(epochg):
st = time.time()
for j in range(cycle):
optimg.zero_grad()
lossg = Lossg(netg,bdset)
lossg.backward()
optimg.step()
if lossg < lossbest:
lossbest = lossg
torch.save(netg.state_dict(),'best_netg.pkl')
ela = time.time() - st
print('epoch:%d,lossg:%.3e,time:%.2f'%((i + 1)*cycle,lossg.item(),ela))
#----------------------
def Lossf(netf,inset):
inset.X.requires_grad = True
insetF = netf.forward(inset.X)#(81,4,1)
insetFx, = torch.autograd.grad(insetF, inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1))#(81,4,2)
u = inset.G + inset.L*insetF
ux = inset.Gx + inset.Lx*insetF + inset.L*insetFx#复合函数求导,提高迭代效率
out = 0.5*(ux**2).sum(1) - u*inset.right
return out.mean()
''' def Lossf(netf,inset): inset.X.requires_grad = True insetF = netf.forward(inset.X)#(81,4,1) insetFx, = torch.autograd.grad(insetF, inset.X, create_graph=True, retain_graph=True, grad_outputs=torch.ones(inset.size,1))#(81,4,2) ux = inset.Gx + inset.Lx*insetF + inset.L*insetFx#复合函数求导,提高迭代效率 tau = netf.backward(inset.X) taux, = torch.autograd.grad(tau[:,0:1], inset.X, create_graph=True, retain_graph=True, grad_outputs=torch.ones(inset.size,1))#(81,4,2) tauy, = torch.autograd.grad(tau[:,1:2], inset.X, create_graph=True, retain_graph=True, grad_outputs=torch.ones(inset.size,1))#(81,4,2) out = (tau + ux)**2 + (taux[:,0:1] + tauy[:,1:2] - inset.right)**2 return out.mean() '''
def Trainf(netf, inset,optimf, epochf):
print('train neural network f')
ERROR,BUZHOU = [],[]
lossf = Lossf(netf,inset)
lossoptimal = lossf
trainerror = error(pred(netg,netf,lenth,inset.X),inset.u_acc)#---------------------
print('epoch: %d, loss: %.3e, trainerror: %.3e'
%(0, lossf.item(), trainerror.item()))
torch.save(netf.state_dict(),'best_netf.pkl')
cycle = 100
for i in range(epochf):
st = time.time()
for j in range(cycle):
optimf.zero_grad()
lossf = Lossf(netf,inset)
lossf.backward()
optimf.step()
if lossf < lossoptimal:
lossoptimal = lossf
torch.save(netf.state_dict(),'best_netf.pkl')
ela = time.time() - st
trainerror = error(pred(netg,netf,lenth,inset.X),inset.u_acc)
ERROR.append(trainerror)
BUZHOU.append((i + 1)*cycle)
print('epoch:%d,lossf:%.3e,train error:%.3e,time:%.2f'%
((i + 1)*cycle,lossf.item(),trainerror,ela))
return ERROR,BUZHOU
def Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf):
# Train neural network g
Traing(netg, bdset, optimg, epochg)
netg.load_state_dict(torch.load('best_netg.pkl'))
# Calculate the length factor
inset.X.requires_grad = True
inset.L = lenth.forward(inset.X)
inset.Lx, = torch.autograd.grad(inset.L, inset.X,#计算长度因子关于内部点输入的梯度
create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1))
#inset.L = [m,4,1],inset.Lx = [m,4,2]
inset.L = inset.L.data; inset.Lx = inset.Lx.data
print(inset.L.shape,inset.Lx.shape)
inset.G = netg.forward(inset.X)
inset.Gx, = torch.autograd.grad(inset.G, inset.X,
create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1))
#inset.G = [m,4,1],inset.Gx = [m,4,2]
inset.G = inset.G.data; inset.Gx = inset.Gx.data
print(inset.G.shape,inset.Gx.shape)
# Train neural network f
ERROR,BUZHOU = Trainf(netf, inset, optimf, epochf)
return ERROR,BUZHOU
err = [];step = []
PROB = [1,2,3]
for prob in PROB:
bound = torch.tensor([[0,1],[0,1]]).float()
nx = [10,10]
nx_te = [40,40]
mu = 3
lr = 1e-3
inset = INSET(bound,nx,prob)
bdset = BDSET(bound,nx,prob)
teset = TESET(bound,nx_te,prob)
lenth = LEN(bound,mu)
netg = NETG()
netf = NETF()
optimg = torch.optim.Adam(netg.parameters(), lr=lr)
optimf = torch.optim.Adam(netf.parameters(), lr=lr)
epochg = 10
epochf = 10
start_time = time.time()
ERROR,BUZHOU = Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf)
err.append(ERROR)
step.append(BUZHOU)
#print(ERROR,BUZHOU)
elapsed = time.time() - start_time
print('Train time: %.2f' %(elapsed))
netg.load_state_dict(torch.load('best_netg.pkl'))
netf.load_state_dict(torch.load('best_netf.pkl'))
te_U = pred(netg, netf, lenth, teset.X)
testerror = error(te_U, teset.u_acc)
print('testerror = %.3e\n' %(testerror.item()))
plt.plot(step[0],err[0],'r*',label = 'prob = 1')
plt.plot(step[0],err[0],'r')
plt.plot(step[1],err[1],'ro',label = 'prob = 2')
plt.plot(step[1],err[1],'r')
plt.plot(step[2],err[2],'r.',label = 'prob = 3')
plt.plot(step[2],err[2],'r')
plt.legend(loc = 'upper right')
plt.savefig('D3M.jpg')
Ritz
转换原理
转换原理的进一步改进
上面设置forward和backward两个函数来逼近u和 τ \tau τ,那么我们可以修改网络的节点以及对应的损失函数,得到新的网络搭建方法:
一共有3个地方需要做对应的修改,看以下图片:
代码其他地方保持不变,数值结果如下:
import torch
import time
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
def UU(X, order,prob):
if prob==1:
temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
if order[0]==0 and order[1]==0:
return torch.log(temp)
if order[0]==1 and order[1]==0:
return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
if order[0]==0 and order[1]==1:
return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
if order[0]==2 and order[1]==0:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if order[0]==1 and order[1]==1:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
* (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
+ temp**(-1) * (18)
if order[0]==0 and order[1]==2:
return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if prob==2:
if order[0]==0 and order[1]==0:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==1 and order[1]==0:
return (3*X[:,0]*X[:,0]-1) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==0 and order[1]==1:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))
if order[0]==2 and order[1]==0:
return (6*X[:,0]) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==1 and order[1]==1:
return (3*X[:,0]*X[:,0]-1) * \
(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))
if order[0]==0 and order[1]==2:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
2*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if prob==3:
temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]
temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1
if order[0]==0 and order[1]==0:
return temp1 * temp2**(-1)
if order[0]==1 and order[1]==0:
return (2*X[:,0]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,0])
if order[0]==0 and order[1]==1:
return (-2*X[:,1]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,1])
if order[0]==2 and order[1]==0:
return (2) * temp2**(-1) + \
2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if order[0]==1 and order[1]==1:
return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
(-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])
if order[0]==0 and order[1]==2:
return (-2) * temp2**(-1) + \
2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if prob==4:
temp = torch.exp(-4*X[:,1]*X[:,1])
if order[0]==0 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * temp + \
(1-ind) * (-(-X[:,0]+1)**4+1) * temp
if order[0]==1 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * (4*(X[:,0]+1)**3) * temp + \
(1-ind) * (4*(-X[:,0]+1)**3) * temp
if order[0]==0 and order[1]==1:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * (temp*(-8*X[:,1])) + \
(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(-8*X[:,1]))
if order[0]==2 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * (12*(X[:,0]+1)**2) * temp + \
(1-ind) * (-12*(-X[:,0]+1)**2) * temp
if order[0]==1 and order[1]==1:
ind = (X[:,0]<=0).float()
return ind * (4*(X[:,0]+1)**3) * (temp*(-8*X[:,1])) + \
(1-ind) * (4*(-X[:,0]+1)**3) * (temp*(-8*X[:,1]))
if order[0]==0 and order[1]==2:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * (temp*(64*X[:,1]*X[:,1]-8)) + \
(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(64*X[:,1]*X[:,1]-8))
def FF(X,prob):
return -UU(X,[2,0],prob) - UU(X,[0,2],prob)
#函数定义修改完成
class INSET():
def __init__(self,bound,nx,prob):
self.dim = 2
self.area = (bound[0,1] - bound[0,0])*(bound[1,1] - bound[1,0])
self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
self.size = nx[0]*nx[1]
self.X = torch.zeros(self.size,self.dim)#储存内点
for j in range(nx[1]):
for i in range(nx[0]):
self.X[j*nx[0] + i,0] = bound[0,0] + (i + 0.5)*self.hx[0]
self.X[j*nx[0] + i,1] = bound[1,0] + (j + 0.5)*self.hx[1]
self.u_acc = UU(self.X,[0,0],prob).view(-1,1)#储存内点精确解
self.right = FF(self.X,prob).view(-1,1)# - nabla A \nabla u = -c
class BDSET():#边界点取值
def __init__(self,bound,nx,prob):
self.dim = 2
self.DS = 2*(nx[0] + nx[1])
self.Dlenth = 2*(bound[0,1] - bound[0,0]) + 2*(bound[1,1] - bound[1,0])
self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
self.X = torch.zeros(self.DS,self.dim)#储存内点
m = 0
for i in range(nx[0]):
self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
self.X[m,1] = bound[1,0]
m = m + 1
for j in range(nx[1]):
self.X[m,0] = bound[0,1]
self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
m = m + 1
for i in range(nx[0]):
self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
self.X[m,1] = bound[1,1]
m = m + 1
for j in range(nx[1]):
self.X[m,0] = bound[0,0]
self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
m = m + 1
self.Dright = UU(self.X,[0,0],prob).view(-1,1)
class TESET():
def __init__(self,bound,nx,prob):
self.dim = 2
self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
M,N = nx[0] + 1,nx[1] + 1
self.size = M*N
self.X = torch.zeros(self.size,self.dim)#储存求解区域所有网格点,包括边界点
for j in range(N):
for i in range(M):
self.X[j*M + i,0] = bound[0,0] + i*self.hx[0]
self.X[j*M + i,1] = bound[1,0] + j*self.hx[1]
self.u_acc = UU(self.X,[0,0],prob).view(-1,1)#储存求解区域网格点对应精确解
#数据集修改完成
np.random.seed(1234)
torch.manual_seed(1234)
class SIN(nn.Module):#u = netg*lenthfactor + netf,此为netg网络所用的激活函数
def __init__(self,order):
super(SIN,self).__init__()
self.e = order
def forward(self,x):
return (torch.sin(x) + x)**self.e
class Res(nn.Module):
def __init__(self,input_size,output_size):
super(Res,self).__init__()
self.model = nn.Sequential(
nn.Linear(input_size,output_size),
SIN(1)
)
self.input = input_size
self.output = output_size
def forward(self,x):
x = self.model(x) + x@torch.eye(x.size(1),self.output)#模拟残差网络
return x
class NETF(nn.Module):#u = netg*lenthfactor + netf,此为netg,此netg逼近内部点取值
def __init__(self):
super(NETF,self).__init__()
self.model = nn.Sequential(
Res(2,16),
Res(16,16),
Res(16,16)
)
self.fc = torch.nn.Linear(16,1)
def forward(self,x):
out = self.model(x)
return self.fc(out)
def pred(netf,X):
return netf.forward(X)
def error(u_pred, u_acc):
#return (((u_pred-u_acc)**2).sum()/(u_acc**2).sum()) ** (0.5)
return (((u_pred-u_acc)**2).mean()) ** (0.5)
# ----------------------------------------------------------------------------------------------------
def Lossf(netf,inset,bdset):
inset.X.requires_grad = True
insetF = netf.forward(inset.X)
insetFx, = torch.autograd.grad(insetF, inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1))
u_in = insetF#inset.G为netg在inset.X上取值,后面训练时提供,此举为加快迭代速度
ux = insetFx#复合函数求导,提高迭代效率
out_in = 0.5*inset.area * ((ux*ux).sum(1)).mean() - inset.area * (inset.right*u_in).mean()
ub = netf.forward(bdset.X)
out_b = bdset.Dlenth * ((ub - bdset.Dright)**2).mean()
beta = 5e2
return out_in + beta*out_b
# Train neural network f
def Trainf(netf, inset, bdset, optimf, epochf):
print('train neural network f')
ERROR,BUZHOU = [],[]
lossf = Lossf(netf,inset,bdset)
lossoptimal = lossf
trainerror = error(netf.forward(inset.X), inset.u_acc)
print('epoch: %d, loss: %.3e, trainerror: %.3e'
%(0, lossf.item(), trainerror.item()))
torch.save(netf.state_dict(),'best_netf.pkl')
cycle = 100
for i in range(epochf):
st = time.time()
''' for j in range(cycle): optimf.zero_grad() lossf = Lossf(netf,inset) lossf.backward() optimf.step() '''
def closure():
optimf.zero_grad()
lossf = Lossf(netf,inset,bdset)
lossf.backward()
return lossf
optimf.step(closure)
lossf = Lossf(netf,inset,bdset)
if lossf < lossoptimal:
lossoptimal = lossf
torch.save(netf.state_dict(),'best_netf.pkl')
ela = time.time() - st
trainerror = error(netf.forward(inset.X), inset.u_acc)
ERROR.append(trainerror.item())
BUZHOU.append((i + 1)*cycle)
print('epoch:%d,lossf:%.3e,train error:%.3e,time:%.2f'%
((i + 1)*cycle,lossf.item(),trainerror,ela))
return ERROR,BUZHOU
def main():
# Configurations
prob = 2
bounds = torch.Tensor([0.0,1.0,0.0,1.0]).reshape(2,2)
nx_tr = [41,41]#训练集剖分
nx_te = [101,101]#测试集剖分
epochf = 10
lr = 1e0
tests_num = 1
# ------------------------------------------------------------------------------------------------
testerror = torch.zeros(tests_num)
for it in range(tests_num):
inset = INSET(bounds, nx_tr, prob)
bdset = BDSET(bounds, nx_tr, prob)
teset = TESET(bounds, nx_te, prob)
netf = NETF()
#optimg = torch.optim.Adam(netg.parameters(), lr=lr)
#optimf = torch.optim.Adam(netf.parameters(), lr=lr)
optimf = torch.optim.LBFGS(netf.parameters(), lr=lr,max_iter = 100,history_size=2500,
line_search_fn = 'strong_wolfe')
start_time = time.time()
ERROR,BUZHOU = Trainf(netf, inset, bdset, optimf, epochf)
#print(ERROR,BUZHOU)
elapsed = time.time() - start_time
print('Train time: %.2f' %(elapsed))
netf.load_state_dict(torch.load('best_netf.pkl'))
te_U = pred(netf,teset.X)
testerror[it] = error(te_U, teset.u_acc)
print('testerror = %.3e\n' %(testerror[it].item()))
print(testerror.data)
testerror_mean = testerror.mean()
testerror_std = testerror.std()
print('testerror_mean = %.3e, testerror_std = %.3e'
%(testerror_mean.item(),testerror_std.item()))
if __name__ == '__main__':
main()
参考文献
效果似乎不怎么好,如何做进一步的改进留给读者思考
今天的文章pytorch rl_pytorch flask[通俗易懂]分享到此就结束了,感谢您的阅读。
版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。
如需转载请保留出处:https://bianchenghao.cn/62459.html