import numpy as np import matplotlib.pyplot as plt def newton_step(f,fp,x): return -f(x)/fp(x) def draw_newton_iteration(f,fp,x0s,a,b,nits): fig, (ax1,ax2) = plt.subplots(2,1,figsize=(3,6)) x = np.linspace(a,b,200) # Newton construction ax1.plot(x,0*x ,color='k',lw=2,alpha=0.5) # horizontal axis ax1.plot(x,f(x),color='m',lw=3,alpha=0.5) def g(x): return x - f(x)/fp(x) ax2.plot(x,x,color='k',lw=2,alpha=0.5) ax2.plot(x,g(x),color='g',lw=2,alpha=0.5) ax1.text(a,1,'f',color='m',fontsize=30,alpha=0.5) ax2.text(a,b,'g',color='g',fontsize=30,alpha=0.5) for x0 in x0s: x = x0 print(0,f'{x:20.15f}') for k in range(nits): xnew = x + newton_step(f,fp,x) ax1.plot([x,x,xnew],[0,f(x),0],lw=1.5,alpha=0.5,color='b') ax2.plot([x,x,xnew],[x,xnew,xnew],lw=1.5,alpha=0.5,color='r') x = xnew print(k+1,f'{x:20.15f}') ax1.set_xlim(a,b) ax1.set_ylim(-1,1) ax2.set_xlim(a,b) ax2.set_ylim(a,b)
def f(x): return np.tanh(x) def fp(x): return 1 - np.tanh(x)**2 draw_newton_iteration(f,fp,[1.06],-1.8,1.8,6) draw_newton_iteration(f,fp,[1.1 ],-1.8,1.8,6)
Starting at 1.06, and starting at 1.1:
for "black-box" f, f' not available
same idea as Newton (go to root of linearization), use (xk − 1, xk)-secant slope to approximate f' at xk
Thm 2.8 gives conditions for convergence and its rate.
Motivation - multi-dimensional Newton