This page was generated from unit-10.2-tensorflow/TensorFlowNGSolve.ipynb.

10.2 NGSolve with tensor-flow

  1. Feischl + J. Schöberl

[1]:
from ngsolve import *
from ngsolve.webgui import Draw
import numpy as np
[2]:
mesh = Mesh(unit_square.GenerateMesh(maxh=0.2))

fes = H1(mesh, order=2, dirichlet=".*")
u,v = fes.TnT()
a = BilinearForm(grad(u)*grad(v)*dx)
f = LinearForm(v*dx)
gfu = GridFunction(fes)

deform = GridFunction(VectorH1(mesh, order=1))

solve a parametric problem:

[3]:
def Solve(Ax, Ay):
    mesh.UnsetDeformation()
    deform.Interpolate ( (x*y*Ax, x*y*Ay) )
    mesh.SetDeformation(deform)
    a.Assemble()
    f.Assemble()
    gfu.vec.data = a.mat.Inverse(fes.FreeDofs()) * f.vec
[4]:
Solve(0.8, 0.5)
Draw (gfu);
[5]:
# np.asarray (gfu.vec)
[6]:
n_data = 50   # number of datapoints
input_dim = 2   # dimension of each datapoint
data_in = np.random.uniform(0,1,size=(n_data,input_dim))  # artificial datapoints
# print (data_in)
[7]:
output_dim = fes.ndof
data_out = np.zeros((n_data, output_dim))

for i, (ax,ay) in enumerate(data_in):
    Solve (ax, ay)
    data_out[i,:] = gfu.vec
[8]:
print (output_dim)
133

start the training …

[9]:
import tensorflow as tf

# func = 'relu' #activation function
func = 'swish' #activation function
model = tf.keras.models.Sequential([
    tf.keras.layers.Dense(10,input_shape=(input_dim,),activation=func),
    tf.keras.layers.Dense(20,activation=func),
    tf.keras.layers.Dense(50,activation=func),
    tf.keras.layers.Dense(output_dim)
    ])

# Standard Adam optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)

# Loss function (error functional) mean squared error (least squares)
@tf.function
def loss_fn(y_true, y_pred):
   return tf.math.reduce_mean(tf.math.square(y_true-y_pred))

loss_fn = tf.keras.losses.MeanSquaredError()

# Training
epochs = 1000 # number of times the data is used for training
batch_size = 1024  # each gradient descent step uses 1024 datapoints
model.compile(optimizer=optimizer,loss=loss_fn)
model.fit(data_in,data_out,epochs=epochs,batch_size=batch_size,verbose=0)
print(loss_fn(data_out,model(data_in)).numpy())
2024-04-23 02:16:47.666930: I external/local_tsl/tsl/cuda/cudart_stub.cc:32] Could not find cuda drivers on your machine, GPU will not be used.
2024-04-23 02:16:47.672365: I external/local_tsl/tsl/cuda/cudart_stub.cc:32] Could not find cuda drivers on your machine, GPU will not be used.
2024-04-23 02:16:49.241913: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
/usr/lib/python3/dist-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.4
  warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
6.6396956e-07
[10]:
output = model(np.array([[0.1,0.2]]))
outputvec = output.numpy().flatten()

Solve (0.1, 0.2)
Draw (gfu)

gfumodel = GridFunction(fes)
gfumodel.vec.FV()[:] = outputvec

Draw (gfumodel)
Draw (gfu-gfumodel, mesh);
print ("err = ", Integrate((gfu-gfumodel)**2, mesh)**0.5)
err =  0.000527287321205847
[ ]:

[ ]: