90 lines
3.0 KiB
Python
90 lines
3.0 KiB
Python
import torch
|
|
import torch.nn as nn
|
|
from joblib import load as joblib_load
|
|
|
|
#load NN model once
|
|
class NuNet(nn.Module):
|
|
def __init__(self, inputs, outputs, layers, neurons):
|
|
super().__init__()
|
|
layerList = []
|
|
layerList.append(nn.Linear(inputs, neurons)) # input layer
|
|
for _ in range(layers):
|
|
layerList.append(nn.Linear(neurons, neurons)) # hidden layers
|
|
layerList.append(nn.Linear(neurons, outputs)) # output layer
|
|
self.layers = nn.ModuleList(layerList)
|
|
|
|
def forward(self, x):
|
|
for layer in self.layers[:-1]:
|
|
x = nn.functional.tanh(layer(x))
|
|
return self.layers[-1](x)
|
|
# Set Re to match model file trained in NN_SR.py
|
|
NN_Re = 10000
|
|
NN_bool = True # False -> fall back to keps AKN
|
|
|
|
if NN_bool:
|
|
_NN_model = torch.load(f'nn/model-f_2-f_mu-Re{NN_Re}.pth',
|
|
weights_only=False)
|
|
_NN_model.eval()
|
|
|
|
# scalers: input0 = yplus, input1 = ystar -> order from NN_SR.py
|
|
_scaler_yp = joblib_load(f'nn/scaler-input0-f_2-f_mu-Re{NN_Re}.bin')
|
|
_scaler_ys = joblib_load(f'nn/scaler-input1-f_2-f_mu-Re{NN_Re}.bin')
|
|
|
|
_mm = np.loadtxt(f'nn/min-max-f_2-f_mu-Re{NN_Re}.txt')
|
|
_yplus_min, _yplus_max = _mm[0], _mm[1]
|
|
_ystar_min, _ystar_max = _mm[2], _mm[3]
|
|
_f2_min, _f2_max = _mm[4], _mm[5]
|
|
_fmu_min, _fmu_max = _mm[6], _mm[7]
|
|
|
|
|
|
def calceps(su2d, sp2d, eps2d, gen):
|
|
if iter == 0:
|
|
print(f'calceps called (NN_bool={NN_bool}, Re={NN_Re})')
|
|
|
|
ueps = (eps2d * viscos) ** 0.25
|
|
ystar = ueps * dist / viscos
|
|
rt = k2d ** 2 / (eps2d * viscos)
|
|
|
|
if NN_bool:
|
|
|
|
ustar_col = (viscos * u2d[:, 0] / yp2d[:, 0]) ** 0.5
|
|
yplus_2d = yp2d * ustar_col[:, None] / viscos
|
|
|
|
yplus_2d = np.clip(yplus_2d, _yplus_min, _yplus_max)
|
|
ystar_2d = np.clip(ystar, _ystar_min, _ystar_max)
|
|
|
|
X = np.zeros((ni * nj, 2))
|
|
X[:, 0] = _scaler_yp.transform(yplus_2d.reshape(-1, 1))[:, 0]
|
|
X[:, 1] = _scaler_ys.transform(ystar_2d.reshape(-1, 1))[:, 0]
|
|
|
|
with torch.no_grad():
|
|
preds = _NN_model(torch.tensor(X, dtype=torch.float32)).numpy()
|
|
|
|
|
|
f2 = np.clip(preds[:, 0].reshape(ni, nj), _f2_min, _f2_max)
|
|
fmu2d = np.clip(preds[:, 1].reshape(ni, nj), _fmu_min, _fmu_max)
|
|
fmu2d = np.minimum(fmu2d, 1.0)
|
|
|
|
else:
|
|
# standard analytic AKN expressions
|
|
f2 = ((1 - np.exp(-ystar / 3.1)) ** 2) * (1. - 0.3 * np.exp(-(rt / 6.5) ** 2))
|
|
fmu2d = ((1 - np.exp(-ystar / 14)) ** 2) * (1 + 5 / rt ** 0.75 * np.exp(-(rt / 200) ** 2))
|
|
fmu2d = np.minimum(fmu2d, 1.0)
|
|
|
|
|
|
# production term: C_eps1 * Cmu * fmu * Pk * (eps/k) * vol
|
|
su2d = su2d + c_eps_1 * cmu * fmu2d * gen * k2d * vol
|
|
|
|
# dissipation term: -C_eps2 * f2 * eps^2/k
|
|
sp2d = sp2d - c_eps_2 * f2 * eps2d * vol / k2d
|
|
|
|
# case-specific source modifications
|
|
su2d, sp2d = modify_eps(su2d, sp2d)
|
|
|
|
ap2d = aw2d + ae2d + as2d + an2d - sp2d
|
|
|
|
# under-relaxation
|
|
ap2d = ap2d / urf_eps
|
|
su2d = su2d + (1 - urf_eps) * ap2d * eps2d
|
|
|
|
return su2d, sp2d, ap2d, fmu2d |