Day 17 S25¶

Galerkin for arbitrary differential equation using hat function basis¶

In [1]:
import sympy as sp
sp.init_printing()
In [2]:
from matplotlib import rcdefaults
rcdefaults()  # restore default matplotlib rc parameters
%config InlineBackend.figure_format='retina'   
import seaborn as sns  # wrapper for matplotlib that provides prettier styles and more
import matplotlib.pyplot as plt  # use matplotlib functionality directly
%matplotlib inline   
sns.set()
In [3]:
import numpy as np

cook up a differential equation with specified solution¶

In [4]:
def f_for_chosen(r,s,y):
    x = sp.symbols('x')
    fexpr = sp.expand(-sp.diff( r(x)*sp.diff(y(x),x), x ) + s(x)*y(x))
    return sp.lambdify(x, fexpr)

def r(x): return 1+x**2 
def s(x): return 2-x
def y(x): return x**2*(1-x)
f = f_for_chosen( r,s,y )
x = sp.symbols('x')
f(x)
Out[4]:
$\displaystyle x^{4} + 9 x^{3} - 4 x^{2} + 6 x - 2$
In [10]:
def make_phi(template,h,xi):
    def phi(x):
        return template( (x-xi)/h )
    return phi

def pwlinear_spline(x):
    f = np.zeros_like(x)
    subinterval = (x>=-1) & (x< 0); subx = x[subinterval]+1; f[subinterval] = subx
    subinterval = (x>= 0) & (x< 1); subx = 1-x[subinterval]; f[subinterval] = subx
    return f

def calculate_hat_Aij(r,s,im1,jm1,gridx):
    i = im1 + 1
    j = jm1 + 1
    x = sp.symbols('x')
    h = gridx[1] - gridx[0]
    #print('h =',h)
    if j<i:
        assert(0)  # use symmetry to get the lower left triangle
    elif i==j:
        rpart = sp.integrate(  r(x)/h**2                  , (x, gridx[i-1], gridx[i+1]) )
        spart = sp.integrate(  s(x)*(x-gridx[i-1])**2/h**2, (x, gridx[i-1], gridx[i  ]) ) + \
                sp.integrate(  s(x)*(x-gridx[i+1])**2/h**2, (x, gridx[i  ], gridx[i+1]) )
    elif j==i+1:
        rpart = sp.integrate( -r(x)/h**2, (x,gridx[i  ], gridx[i+1]) )
        spart = sp.integrate(  s(x)*(x-gridx[i])*(gridx[i+1]-x)/h**2, (x,gridx[i  ], gridx[i+1]) )
    else:
        rpart = 0
        spart = 0
    return rpart + spart

def calculate_hat_bi(f,im1,gridx):
    i = im1+1
    h = gridx[1] - gridx[0]
    x = sp.symbols('x')
    #print(gridx[i-1], gridx[i  ], gridx[i+1])
    return sp.integrate(  f(x)*(x-gridx[i-1])/h, (x, gridx[i-1], gridx[i  ]) ) + \
           sp.integrate(  f(x)*(gridx[i+1]-x)/h, (x, gridx[i  ], gridx[i+1]) )

fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(15,10))
gs = axs[0,1].get_gridspec()
for ax in axs[:, -1]:
    ax.remove()
axbig = fig.add_subplot(gs[:, -1])

# Ackleh example
def r(x): return 1+x
def s(x): return 0
def f(x): return 100


# arbitrary diff eq
if False:
    def f_for_chosen(r,s,y):  # find the f that makes y the exact solutions
        x = sp.symbols('x')
        fexpr = sp.expand(-sp.diff( r(x)*sp.diff(y(x),x), x ) + s(x)*y(x))
        return sp.lambdify(x, fexpr)

    def r(x): return 1+x**2 
    def s(x): return 2-x
    def y(x): return x**2*(1-x)
    f = f_for_chosen( r,s,y )
    x = sp.symbols('x')
    f(x)

ns = []
errors = []
for p,M in enumerate([2**k-1 for k in range(1,6)]):
    np2 = M + 2

    # build the basis functions (pwlinear hat functions)
    gridx = np.linspace(0,1,np2)
    h = gridx[1] - gridx[0]
    phis = [make_phi(pwlinear_spline,h,xi) for xi in gridx[1:-1]]
    
    n = np2 - 2
    A = np.zeros((n,n))
    b = np.empty(n)
    for i in range(n):
        for j in range(i,n):
            A[i,j] = calculate_hat_Aij(r,s,i,j,gridx)
            if j!=i: A[j,i] = A[i,j]
        b[i] = calculate_hat_bi(f,i,gridx)
    #display(A)
    #display(b)
    
    # solve for the coefficients
    a = np.linalg.solve(A,b)
    
    # make pictures
    x = np.linspace(0,1,500)
    approx = np.sum([ai*phi(x) for ai,phi in zip(a,phis)],axis=0)
    exact = -100*x + 100*np.log(1+x)/np.log(2)
    #if p==0: axs[0,0].plot(x, exact,'k',label='exact solution')
    axs[0,0].plot(x, approx,alpha=0.5,label=f'galerkin, h={h}')
    axs[1,0].plot(x,approx-exact,label=f'error, h={h}')
    ns.append(n)
    errors.append(np.abs(approx-exact).max())

axs[0,0].set_ylabel('exact and approximations')
axs[0,0].legend()
axs[1,0].set_xlabel('x')
axs[1,0].set_ylabel('error(x)')
axs[1,0].legend()
axbig.set_aspect(1)
axbig.loglog(ns,errors,'o')
axbig.set_xlabel('n')
axbig.set_ylabel('max error');

l = x<=0.5
r = x> 0.5
axs[0,0].plot(x[l],100/6*x[l]    ,color='cyan',lw=5,alpha=0.25)
axs[0,0].plot(x[r],100/6*(1-x[r]),color='cyan',lw=5,alpha=0.25);
In [7]:
# ratios of successive max errors as h is repeatedly halved
[errors[i]/errors[i+1] for i in range(len(errors)-1)]
Out[7]:
$\displaystyle \left[ 3.30413599515452, \ 3.6171125399262, \ 3.80246600627831, \ 3.89613285819206\right]$

Empirically, error goes down by a factor of about 4 when h is halved.

In [ ]: