In [ ]:
import numpy as np
import matplotlib.pyplot as plt

2.4 Newton's method convergence - or not¶

In [ ]:
def newton_step(f,fp,x):
    return -f(x)/fp(x)

def draw_newton_iteration(f,fp,x0s,a,b,nits):
    fig, (ax1,ax2) = plt.subplots(2,1,figsize=(3,6))
    x = np.linspace(a,b,200)
    # Newton construction
    ax1.plot(x,0*x ,color='k',lw=2,alpha=0.5) # horizontal axis
    ax1.plot(x,f(x),color='m',lw=3,alpha=0.5)

    def g(x):
        return x - f(x)/fp(x)
    ax2.plot(x,x,color='k',lw=2,alpha=0.5)
    ax2.plot(x,g(x),color='g',lw=2,alpha=0.5)

    ax1.text(a,1,'f',color='m',fontsize=30,alpha=0.5)
    ax2.text(a,b,'g',color='g',fontsize=30,alpha=0.5)

    for x0 in x0s:
        x = x0
        print(0,f'{x:20.15f}')
        for k in range(nits):
            xnew = x + newton_step(f,fp,x)
            ax1.plot([x,x,xnew],[0,f(x),0],lw=1.5,alpha=0.5,color='b')
            ax2.plot([x,x,xnew],[x,xnew,xnew],lw=1.5,alpha=0.5,color='r')
            x = xnew
            print(k+1,f'{x:20.15f}')
    ax1.set_xlim(a,b)
    ax1.set_ylim(-1,1)
    ax2.set_xlim(a,b)
    ax2.set_ylim(a,b)
    
        
    
In [ ]:
def f(x):
    return np.tanh(x)
def fp(x):
    return 1 - np.tanh(x)**2
draw_newton_iteration(f,fp,[1.06],-1.8,1.8,6)

Secant method¶

If derivative not readily available, a fast alternative to Newton is the secant method.

In [ ]:
def f(x): return np.cos(x) - x
x0 = 0
x1 = 1
print(x0,x1)
for k in range(8):
    if x1 == x0: break
    x2 = x1 - f(x1)*(x1-x0)/(f(x1)-f(x0))
    x0,x1 = x1,x2
    print(x0,x1)

... converges pretty fast!

In [ ]: