HW 1

You might also like

Download as pdf or txt
Download as pdf or txt
You are on page 1of 29

HW 1

September 3, 2022

[1]: # Problem 1a
# Plot function f(x)=1-2*ln(x) to verify the presence of zeros in the interval␣
,→(1,2)

# Label the exact location of p=e^(1/2)

import matplotlib.pyplot as plt


import numpy as np
import matplotlib.patches as mpatches

x= np.linspace(1,2,101)
f= 1-2*np.log(x)
xp=np.e**(1/2)
fp=0
plt.plot(x,f,'r', label = 'f(x)=1-2ln(x)')
plt.plot(xp,fp,'b.', label = 'p=e^(1/2)')

plt.legend()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()

1
[2]: #Problem 1b
#Bisection Algo
def bisection(eps,func,a,b,Nmax):

import matplotlib.pyplot as plt


import numpy as np
import matplotlib.patches as mpatches

pn = np.zeros(Nmax)
pn.fill(np.nan)

sfa = np.sign(func(a))

for ii in range(0,Nmax):
pn[ii] = a + (b-a)/2.0

if ((b-a) < (2.0*eps)):


print("The convergence tolerance has been met")
print("after {0:d} iterations".format(ii+1))
pn = pn[~np.isnan(pn)]
return(pn)

sfp = np.sign(func(pn[ii]))
if ( (sfa*sfp) < 0.0 ):

2
b = pn[ii]
else:
a = pn[ii]
sfa = np.sign(func(a))

print("The convergence tolerance has not been met")


print("after Nmax = {0:d} iterations".format(Nmax))
return(pn)

[3]: a1 = 1
b1 = 2
Nmax1 = 5
eps1 = 0.00001

def f1(x):
f1x=1-2*np.log(x)
return f1x

pn1 = bisection(eps1,f1,a1,b1,Nmax1)

The convergence tolerance has not been met


after Nmax = 5 iterations

[4]: p1 = np.exp(1/2)
n1 = np.linspace(1,Nmax1,Nmax1)
e1 = abs(pn1-p1)
eb1 = (b1-a1)/(2.0**n1)

print("Iter.\t\tApprox.\t\tAbs. Error\tTheor. Bound")


print("n\t\tp_n\t\t|e|\t\t(b-a)/(2^n)\n")

for ii in range(0,Nmax1):
print("{0:f}\t{1:f}\t{2:f}\t{3:f}".\
format(n1[ii],pn1[ii],e1[ii],eb1[ii]))

Iter. Approx. Abs. Error Theor. Bound


n p_n |e| (b-a)/(2^n)

1.000000 1.500000 0.148721 0.500000


2.000000 1.750000 0.101279 0.250000
3.000000 1.625000 0.023721 0.125000
4.000000 1.687500 0.038779 0.062500
5.000000 1.656250 0.007529 0.031250

[5]: #The absolute error does not decrease on every iteration


#as compared to the theoritical error, abs error is always smaller

3
[6]: #Problem 1c Error vs Number of Iterations
x=[1,2,3,4,5]
y=[.148,.101,.023,.038,.0075]
plt.plot(x,y,'r', label = 'Bisection Method Error')

plt.legend()
plt.xlabel('Number of Iterations')
plt.ylabel('Error')
plt.show()

[7]: #Problem 1d
#The Bisection method has a limitation of having a slow convergence since it is␣
,→based

#on halving the interval and takes a few iteration if the root is about the␣
,→center of the intial interval

#and if the root is close th the bounds, also if a root exists at the tangent,␣
,→the bisection methid will fail

[8]: #Problem 2a
def f2(x):
f2x = (1/x)-48
return f2x
a2 = 0.01
b2 = .1

4
Nmax2 = 1000
eps2 = 0.00001
pn2 = bisection(eps2,f2,a2,b2,Nmax2)

print("\npn is printed to ten digits.")


print("The first five digits are significant.")
print("pn {0:10.9f}".format(pn2[-1]))

The convergence tolerance has been met


after 14 iterations

pn is printed to ten digits.


The first five digits are significant.
pn 0.020838013

[9]: import numpy as np


n2 = np.linspace(1,len(pn2),len(pn2))
plt.plot(n2,pn2,'r')
plt.legend([r'$p_n$'])
plt.xlabel('Iteration Number n')
plt.ylabel('Approximation pn to the Root p')
plt.show()

5
[10]: p2 = 1/48
e2 = abs(p2-pn2)
plt.plot(n2,e2,'b')
plt.legend(['|e|'])
plt.xlabel('Iteration Number n')
plt.ylabel('Absolute Error |e|')
plt.show()

[11]: #Problem 3a Perform five iterations of Newton's method.


def newton(f,fp,po,Nmax,eps):

import numpy as np

def g(x,f,fp):

gx = x - f(x)/fp(x)
return(gx)
pn = np.zeros(Nmax+1)
pn.fill(np.nan)
pn[0] = po

for ii in range(1,Nmax+1):
pn[ii] = g(pn[ii-1],f,fp)
if ( abs( pn[ii]-pn[ii-1] ) < eps ):
print("The convergence tolerance has been met")

6
print("after {0:d} iterations".format(ii))
pn = pn[~np.isnan(pn)]
return(pn)

print("The convergence tolerance has not been met")


print("after Nmax = {0:d} iterations".format(Nmax))
return(pn)

[12]: def f(x):


fx = x**3-15
return(fx)

def fdash(x):
fdashx = 3*x**2
return(fdashx)

p_o = 2
Nmax = 5
eps = .00001
p_n = newton(f,fdash,p_o,Nmax,eps)

print("\nApproximations of the Root p:")


for ii in range(0,len(p_n)):
print("Iteration {0:d}: p_n({1:d}) = {2:9.8e}"\
.format(ii,ii,p_n[ii]))

The convergence tolerance has been met


after 5 iterations

Approximations of the Root p:


Iteration 0: p_n(0) = 2.00000000e+00
Iteration 1: p_n(1) = 2.58333333e+00
Iteration 2: p_n(2) = 2.47144179e+00
Iteration 3: p_n(3) = 2.46622313e+00
Iteration 4: p_n(4) = 2.46621207e+00
Iteration 5: p_n(5) = 2.46621207e+00

[13]: #Problem 3b
print("n\t|p_(n)-p_(n-1)|\t|p_(n-1)-p|\t|p_(n)-p|")
p_n6=p_n
p6 = (15)**(1.0/3.0)

pnpn1 = np.zeros(len(p_n6)-1)
pnpn1.fill(np.nan)
pn1p = np.zeros(len(p_n6)-1)
pn1p.fill(np.nan)
pnp = np.zeros(len(p_n6)-1)

7
pnp.fill(np.nan)

for ii in range(1,len(p_n6)):
pnpn1[ii-1] = abs(p_n6[ii]-p_n6[ii-1])
pn1p [ii-1] = abs(p_n6[ii-1]-p6)
pnp[ii-1] = abs(p_n6[ii]-p6)

for ii in range(0,len(p_n6)-1):
print("{0:d}\t{1:10.9e}\t{2:10.9e}\t{3:10.9e}".\
format(ii+1,pnpn1[ii],pn1p[ii],pnp[ii]))

n |p_(n)-p_(n-1)| |p_(n-1)-p| |p_(n)-p|


1 5.833333333e-01 4.662120743e-01 1.171212590e-01
2 1.118915482e-01 1.171212590e-01 5.229710847e-03
3 5.218652288e-03 5.229710847e-03 1.105855878e-05
4 1.105850919e-05 1.105855878e-05 4.958700117e-11
5 4.958655708e-11 4.958700117e-11 4.440892099e-16

[14]: #Problem 3c
pnp_pn1p_ratio = pnp / pn1p**2.0
print("Ratio |p_(n)-p| / |p_(n-1)-p|")
for ii in range(0,len(pnp)):
print("Iteration {0:d}: {1:10.9e}".\
format(ii+1,pnp_pn1p_ratio[ii]))

Ratio |p_(n)-p| / |p_(n-1)-p|


Iteration 1: 5.388510062e-01
Iteration 2: 3.812468050e-01
Iteration 3: 4.043367130e-01
Iteration 4: 4.054812563e-01
Iteration 5: 1.806069805e+05

[15]: #This value approaches |f"(p)/2f'(p)| on the 4th iteration above


ans = (4*(15**((1/3))))/(2*2*(15**(2/3)))
print("{0:10.9f}".format(ans))

0.405480133

[16]: #Problem 4a
def f_a(x):
f_ax = np.exp(x) + x*x -x - 7
return(f_ax)

x_a= np.linspace(-3,2,501)
f_array_a = f_a(x_a)

plt.plot(x_a,f_array_a,'b')

8
plt.axhline(y=0,color='k',linestyle='--')
plt.legend([r'$f(x)=e^x+x^2-x-4$','y=0'])
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()

[17]: #Roots are near x=-2.2 and x=1.8 with gived e=10^-6
#f'(x)=e^x+2x-1
def fdash_a(x):
fd_ax = np.exp(x) + 2*x - 1.0
return(fd_ax)
eps_a1 = 10**-6.0
eps_a2 = 10**-6.0
po_a1 = -2.2
po_a2 = 1.8
Nmax_a1 = 100
Nmax_a2 = 100
pn_a1 = newton(f_a,fdash_a,po_a1,Nmax_a1,eps_a1)
pn_a2 = newton(f_a,fdash_a,po_a2,Nmax_a2,eps_a2)
print("\nFinal approximation of the first root of f(x):")
print("p_n({0:d}) = {1:10.9e}".format(len(pn_a1),pn_a1[-1]))
print("\nFinal approximation of the second root of f(x):")
print("p_n({0:d}) = {1:10.9e}".format(len(pn_a2),pn_a2[-1]))
p_a1 = pn_a1[-1]

9
p_a2 = pn_a2[-1]
logabs_en_a1 = np.log(abs( pn_a1[1:-1] - p_a1 ))
logabs_en1_a1 = np.log(abs( pn_a1[0:-2] - p_a1 ))
logabs_en_a2 = np.log(abs( pn_a2[1:-1] - p_a2 ))
logabs_en1_a2 = np.log(abs( pn_a2[0:-2] - p_a2 ))
plt.plot(logabs_en1_a1,logabs_en_a1,'r')
plt.xlabel('ln( |e_{n-1}| )')
plt.ylabel('ln( |e_{n}| )')
plt.show()
plt.plot(logabs_en1_a2,logabs_en_a2,'r')
plt.xlabel('ln( |e_{n-1}| )')
plt.ylabel('ln( |e_{n}| )')
plt.show()

The convergence tolerance has been met


after 3 iterations
The convergence tolerance has been met
after 4 iterations

Final approximation of the first root of f(x):


p_n(4) = -2.171324295e+00

Final approximation of the second root of f(x):


p_n(5) = 1.741839662e+00

10
[18]: #Problem 4b
def f_b(x):
f_bx = x**3.0 - x**2 - 9*x + 4
return(f_bx)

x_b= np.linspace(-4,4,101)
f_array_b = f_b(x_b)

plt.plot(x_b,f_array_b,'b')
plt.axhline(y=0,color='k',linestyle='--')
plt.legend([r'$f(x)=x^3-x^2-10x+11$','y=0'])
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()

11
[19]: #Roots are near x=-2.8, x=0.5, x=3.3
#f'(x)=3x^2-2x-9
def fdash_b(x):
fdash_bx = 3.0*x**2.0 - 2.0*x-10.0
return(fdash_bx)

eps_b1 = 10**-6.0
eps_b2 = 10**-6.0
eps_b3 = 10**-6.0
po_b1 = -2.8
po_b2 = 0.5
po_b3 = 3.3
Nmax_b1 = 100
Nmax_b2 = 100
Nmax_b3 = 100
pn_b1 = newton(f_b,fdash_b,po_b1,Nmax_b1,eps_b1)
pn_b2 = newton(f_b,fdash_b,po_b2,Nmax_b2,eps_b2)
pn_b3 = newton(f_b,fdash_b,po_b3,Nmax_b3,eps_b3)

print("\nFinal approximation of the first root of f(x):")


print("pn({0:d}) = {1:10.9e}".format(len(pn_b1),pn_b1[-1]))
print("\nFinal approximation of the second root of f(x):")
print("pn({0:d}) = {1:10.9e}".format(len(pn_b2),pn_b2[-1]))
print("\nFinal approximation of the third root of f(x):")
print("pn({0:d}) = {1:10.9e}".format(len(pn_b3),pn_b3[-1]))

12
p_b1 = pn_b1[-1]
p_b2 = pn_b2[-1]
p_b3 = pn_b3[-1]

logabs_en_b1 = np.log(abs( pn_b1[1:-1] - p_b1 ))


logabs_en1_b1 = np.log(abs( pn_b1[0:-2] - p_b1 ))
logabs_en_b2 = np.log(abs( pn_b2[1:-1] - p_b2 ))
logabs_en1_b2 = np.log(abs( pn_b2[0:-2] - p_b2 ))
logabs_en_b3 = np.log(abs( pn_b3[1:-1] - p_b3 ))
logabs_en1_b3 = np.log(abs( pn_b3[0:-2] - p_b3 ))

plt.plot(logabs_en1_b1,logabs_en_b1,'r')
plt.xlabel('ln( |e_{n-1}| )')
plt.ylabel('ln( |e_{n}| )')
plt.show()
plt.plot(logabs_en1_b2,logabs_en_b2,'r')
plt.xlabel('ln( |e_{n-1}| )')
plt.ylabel('ln( |e_{n}| )')
plt.show()
plt.plot(logabs_en1_b3,logabs_en_b3,'r')
plt.xlabel('ln( |e_{n-1}| )')
plt.ylabel('ln( |e_{n}| )')
plt.show()

The convergence tolerance has been met


after 5 iterations
The convergence tolerance has been met
after 6 iterations
The convergence tolerance has been met
after 5 iterations

Final approximation of the first root of f(x):


pn(6) = -2.770161901e+00

Final approximation of the second root of f(x):


pn(7) = 4.326446817e-01

Final approximation of the third root of f(x):


pn(6) = 3.337517322e+00

13
14
[20]: #Problem 4c
def f_c(x):
f_cx = 1.08 - 1.03*x + np.log(x)
return(f_cx)
x_c= np.linspace(0.6,1.6,101)
f_array_c = f_c(x_c)
plt.plot(x_c,f_array_c,'b')
plt.axhline(y=0,color='k',linestyle='--')
plt.legend([r'$f(x)=1.05-1.04*x+ln(x)$','y=0'])
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()

15
[21]: #Roots are near x=0.65 and x=1.35
#f'(x)=-1.03+(1/x)
def fdash_c(x):
fdash_cx = -1 + 1.0/x
return(fdash_cx)
eps_c1 = 10**-6.0
eps_c2 = 10**-6.0
po_c1 = 0.68
po_c2 = 1.3
Nmax_c1 = 100
Nmax_c2 = 100
pn_c1 = newton(f_c,fdash_c,po_c1,Nmax_c1,eps_c1)
pn_c2 = newton(f_c,fdash_c,po_c2,Nmax_c2,eps_c2)
print("\nFinal approximation of the first root of f(x):")
print("pn({0:d}) = {1:10.9e}".format(len(pn_c1),pn_c1[-1]))
print("\nFinal approximation of the second root of f(x):")
print("pn({0:d}) = {1:10.9e}".format(len(pn_c2),pn_c2[-1]))
p_c1 = pn_c1[-1]
p_c2 = pn_c2[-1]
logabs_en_c1 = np.log(abs( pn_c1[1:-1] - p_c1 ))
logabs_en1_c1 = np.log(abs( pn_c1[0:-2] - p_c1 ))
logabs_en_c2 = np.log(abs( pn_c2[1:-1] - p_c2 ))
logabs_en1_c2 = np.log(abs( pn_c2[0:-2] - p_c2 ))
plt.plot(logabs_en1_c1,logabs_en_c1,'r')
plt.xlabel('ln( |e_{n-1}| )')

16
plt.ylabel('ln( |e_{n}| )')
plt.show()
plt.plot(logabs_en1_c2,logabs_en_c2,'r')
plt.xlabel('ln( |e_{n-1}| )')
plt.ylabel('ln( |e_{n}| )')
plt.show()

The convergence tolerance has been met


after 5 iterations
The convergence tolerance has been met
after 6 iterations

Final approximation of the first root of f(x):


pn(6) = 6.942515599e-01

Final approximation of the second root of f(x):


pn(7) = 1.312718900e+00

17
[22]: #Problem 5 f(x)81x^4+27x^3-9x^2+3x-22, Zero x=2/3, po=0
#10 iterations of Newton's Method
#Plot absolute error v iteration number
def f(x):
fx = 81*x**4.0 + 27*x**3.0 - 9*x**2 + 3*x - 22
return(fx)
#f'(x)=324x^3+81x^2-18x+3
def fdash(x):
fdashx = 324*x**3.0 + 81*x**2.0 - 18*x + 3
return(fdashx)
po_a = 0
Nmax_a = 10
eps_a = 0.00001
pn_a = newton(f,fdash,po_a,Nmax_a,eps_a)
print("\nApproximations of the Root p:")
for ii in range(0,len(pn_a)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,pn_a[ii]))
p_a = 2/3
abs_e_a = np.abs(pn_a-p_a)
print("\nAbsolute Error in the Approximations p:")
for ii in range(0,len(abs_e_a)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,abs_e_a[ii]))

18
logabs_en_a = np.log(abs( pn_a[1:] - p_a ))
logabs_en1_a = np.log(abs( pn_a[0:-1] - p_a ))
plt.plot(logabs_en1_a,logabs_en_a,'r')
plt.xlabel('ln(|e_{n-1}|)')
plt.ylabel('ln(|e_{n}|)')
plt.show()
itera = [1,2,3,4,5,6,7,8,9,10]
plt.plot(itera,pn_a[1:],'b')
plt.xlabel('Iterations')
plt.ylabel('Absolute Error')
plt.show()

The convergence tolerance has not been met


after Nmax = 10 iterations

Approximations of the Root p:


Iteration 0: pn(0) = 0.00000000e+00
Iteration 1: pn(1) = 7.33333333e+00
Iteration 2: pn(2) = 5.48170875e+00
Iteration 3: pn(3) = 4.09392354e+00
Iteration 4: pn(4) = 3.05450000e+00
Iteration 5: pn(5) = 2.27726437e+00
Iteration 6: pn(6) = 1.69856166e+00
Iteration 7: pn(7) = 1.27285239e+00
Iteration 8: pn(8) = 9.70684389e-01
Iteration 9: pn(9) = 7.77938934e-01
Iteration 10: pn(10) = 6.87334539e-01

Absolute Error in the Approximations p:


Iteration 0: pn(0) = 6.66666667e-01
Iteration 1: pn(1) = 6.66666667e+00
Iteration 2: pn(2) = 4.81504208e+00
Iteration 3: pn(3) = 3.42725688e+00
Iteration 4: pn(4) = 2.38783334e+00
Iteration 5: pn(5) = 1.61059770e+00
Iteration 6: pn(6) = 1.03189499e+00
Iteration 7: pn(7) = 6.06185726e-01
Iteration 8: pn(8) = 3.04017722e-01
Iteration 9: pn(9) = 1.11272268e-01
Iteration 10: pn(10) = 2.06678725e-02

19
[23]: #Problem 5b
#Order of convergence is found utilizing the slope of the plot above which is 1

20
#Problem 5c
#Multiplicity of the root if found using the dereivative of the given function␣
,→at root x=2/3

#For the given function f(2/3)=0, f'(2/3)=\=0


#This means multiplicity of root (2/3) is 1

[24]: #Problem 6 Secant Method


def secant(f,p0,p1,Nmax,eps):
import numpy as np
def g(p0,p1,f):
gx = p1 - f(p1) * ( p1 - p0 ) / ( f(p1) - f(p0) )
return(gx)

pn = np.zeros(Nmax+2)
pn.fill(np.nan)
pn[0] = p0
pn[1] = p1

for ii in range(2,Nmax+2):
pn[ii] = g(pn[ii-2],pn[ii-1],f)
if ( abs( pn[ii]-pn[ii-1] ) < eps ):
print("The convergence tolerance has been met")
print("after {0:d} iterations".format(ii))
pn = pn[~np.isnan(pn)]
return(pn)

print("The convergence tolerance has not been met")


print("after Nmax = {0:d} iterations".format(Nmax))
return(pn)

[25]: #Problem 6a
def f(x):
fx = x**2*(1-np.cos(x))
return(fx)
p1 = -1
p2 = 2.5
Nmax = 25
eps = 0.00001
p_n = secant(f,p1,p2,Nmax,eps)
print("\nApproximations of the Root p:")
for ii in range(0,len(p_n)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,p_n[ii]))
p3 = 0
abs_e = np.abs(p_n-p3)
print("\nAbsolute Error in the Approximations p:")
for ii in range(0,len(abs_e)):

21
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,abs_e[ii]))
logabs_en = np.log(abs( p_n[1:-1] - p3))
logabs_en1 = np.log(abs( p_n[0:-2] - p3))
plt.plot(logabs_en1,logabs_en,'r')
plt.xlabel('ln(|e_{n-1}|)')
plt.ylabel('ln(|e_{n}|)')
plt.show()

The convergence tolerance has not been met


after Nmax = 25 iterations

Approximations of the Root p:


Iteration 0: pn(0) = -1.00000000e+00
Iteration 1: pn(1) = 2.50000000e+00
Iteration 2: pn(2) = -1.14901129e+00
Iteration 3: pn(3) = -1.42057452e+00
Iteration 4: pn(4) = -9.22851509e-01
Iteration 5: pn(5) = -8.00932283e-01
Iteration 6: pn(6) = -6.34279573e-01
Iteration 7: pn(7) = -5.22573332e-01
Iteration 8: pn(8) = -4.25182754e-01
Iteration 9: pn(9) = -3.48151275e-01
Iteration 10: pn(10) = -2.84669761e-01
Iteration 11: pn(11) = -2.33047009e-01
Iteration 12: pn(12) = -1.90781090e-01
Iteration 13: pn(13) = -1.56228321e-01
Iteration 14: pn(14) = -1.27943737e-01
Iteration 15: pn(15) = -1.04790606e-01
Iteration 16: pn(16) = -8.58315953e-02
Iteration 17: pn(17) = -7.03055350e-02
Iteration 18: pn(18) = -5.75893788e-02
Iteration 19: pn(19) = -4.71740108e-02
Iteration 20: pn(20) = -3.86427553e-02
Iteration 21: pn(21) = -3.16545909e-02
Iteration 22: pn(22) = -2.59302999e-02
Iteration 23: pn(23) = -2.12412402e-02
Iteration 24: pn(24) = -1.74001583e-02
Iteration 25: pn(25) = -1.42536865e-02
Iteration 26: pn(26) = -1.16762034e-02

Absolute Error in the Approximations p:


Iteration 0: pn(0) = 1.00000000e+00
Iteration 1: pn(1) = 2.50000000e+00
Iteration 2: pn(2) = 1.14901129e+00
Iteration 3: pn(3) = 1.42057452e+00
Iteration 4: pn(4) = 9.22851509e-01

22
Iteration 5: pn(5) = 8.00932283e-01
Iteration 6: pn(6) = 6.34279573e-01
Iteration 7: pn(7) = 5.22573332e-01
Iteration 8: pn(8) = 4.25182754e-01
Iteration 9: pn(9) = 3.48151275e-01
Iteration 10: pn(10) = 2.84669761e-01
Iteration 11: pn(11) = 2.33047009e-01
Iteration 12: pn(12) = 1.90781090e-01
Iteration 13: pn(13) = 1.56228321e-01
Iteration 14: pn(14) = 1.27943737e-01
Iteration 15: pn(15) = 1.04790606e-01
Iteration 16: pn(16) = 8.58315953e-02
Iteration 17: pn(17) = 7.03055350e-02
Iteration 18: pn(18) = 5.75893788e-02
Iteration 19: pn(19) = 4.71740108e-02
Iteration 20: pn(20) = 3.86427553e-02
Iteration 21: pn(21) = 3.16545909e-02
Iteration 22: pn(22) = 2.59302999e-02
Iteration 23: pn(23) = 2.12412402e-02
Iteration 24: pn(24) = 1.74001583e-02
Iteration 25: pn(25) = 1.42536865e-02
Iteration 26: pn(26) = 1.16762034e-02

[26]: #Order of convergence is shown to be one since the plot is linear which is less␣
,→than the expected 1.618

23
[27]: #Problem 6b
def f(x):
fx = 81*x**4 + 27*x**3 - 9*x**2 + 3*x - 22
return(fx)
p1 = 0
p2 = 0.5
Nmax = 25
eps = 0.00001
p_n = secant(f,p1,p2,Nmax,eps)
print("\nApproximations of the Root p:")
for ii in range(0,len(p_n)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,p_n[ii]))
p3 = 2/3
abs_e = np.abs(p_n-p3)
print("\nAbsolute Error in the Approximations p:")
for ii in range(0,len(abs_e)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,abs_e[ii]))
logabs_en = np.log(abs( p_n[1:-1] - p3))
logabs_en1 = np.log(abs( p_n[0:-2] - p3))
plt.plot(logabs_en1,logabs_en,'r')
plt.xlabel('ln(|e_{n-1}|)')
plt.ylabel('ln(|e_{n}|)')
plt.show()

The convergence tolerance has been met


after 10 iterations

Approximations of the Root p:


Iteration 0: pn(0) = 0.00000000e+00
Iteration 1: pn(1) = 5.00000000e-01
Iteration 2: pn(2) = 1.43089431e+00
Iteration 3: pn(3) = 5.33573891e-01
Iteration 4: pn(4) = 5.61516190e-01
Iteration 5: pn(5) = 7.07582416e-01
Iteration 6: pn(6) = 6.56883645e-01
Iteration 7: pn(7) = 6.65848599e-01
Iteration 8: pn(8) = 6.66683865e-01
Iteration 9: pn(9) = 6.66666637e-01
Iteration 10: pn(10) = 6.66666667e-01

Absolute Error in the Approximations p:


Iteration 0: pn(0) = 6.66666667e-01
Iteration 1: pn(1) = 1.66666667e-01
Iteration 2: pn(2) = 7.64227642e-01
Iteration 3: pn(3) = 1.33092776e-01

24
Iteration 4: pn(4) = 1.05150477e-01
Iteration 5: pn(5) = 4.09157494e-02
Iteration 6: pn(6) = 9.78302128e-03
Iteration 7: pn(7) = 8.18068127e-04
Iteration 8: pn(8) = 1.71986359e-05
Iteration 9: pn(9) = 2.98836141e-08
Iteration 10: pn(10) = 1.09057208e-12

[28]: #Order of convergence is shown to be about 1.6


#The secant method performs under these cicumstances by showing that the order␣
,→of convergence lowers to 1

#when a root of multiplicity is higher than 1

[29]: #Problem 7a Newton Method


def f(x):
fx = x**4.0-18.0*x**2.0 + 45.0
return(fx)
def fdash(x):
fdashx = 4*x**3.0 - 36.0*x
return(fdashx)
p0_a = 1.0
Nmax_a = 4
eps_a = .00001
p_n = newton(f,fdash,p0_a,Nmax_a,eps_a)
print("\nApproximations of the Root p:")

25
for ii in range(0,len(p_n)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,p_n[ii]))
p_a = np.sqrt(3)
abs_e = np.abs(p_n-p_a)
print("\nAbsolute Error in the Approximations p:")
for ii in range(0,len(abs_e)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,abs_e[ii]))
logabs_en = np.log(abs( p_n[1:-1] - p_a ))
logabs_en1 = np.log(abs( p_n[0:-2] - p_a ))
plt.plot(logabs_en1,logabs_en,'r')
plt.xlabel('log_e( |e_{n-1}| )')
plt.ylabel('log_e( |e_{n}| )')
plt.show()

The convergence tolerance has been met


after 4 iterations

Approximations of the Root p:


Iteration 0: pn(0) = 1.00000000e+00
Iteration 1: pn(1) = 1.87500000e+00
Iteration 2: pn(2) = 1.73103632e+00
Iteration 3: pn(3) = 1.73205081e+00
Iteration 4: pn(4) = 1.73205081e+00

Absolute Error in the Approximations p:


Iteration 0: pn(0) = 7.32050808e-01
Iteration 1: pn(1) = 1.42949192e-01
Iteration 2: pn(2) = 1.01448278e-03
Iteration 3: pn(3) = 3.47950557e-10
Iteration 4: pn(4) = 0.00000000e+00

26
[30]: #The apparent order of convergence is approximately 3

[31]: #Problem 7b Secant Method


def f(x):
fx = x**4.0-18.0*x**2.0 + 45.0
return(fx)
p_0 = 3
p_1 = 4
Nmax = 8
eps = .00001
p_n = secant(f,p_0,p_1,Nmax,eps)
print("\nApproximations of the Root p:")
for ii in range(0,len(p_n)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,p_n[ii]))
p_a = np.sqrt(15)
abs_e = np.abs(p_n-p_a)
print("\nAbsolute Error in the Approximations p:")
for ii in range(0,len(abs_e)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,abs_e[ii]))
logabs_en = np.log(abs( p_n[1:-1] - p_a ))
logabs_en1 = np.log(abs( p_n[0:-2] - p_a ))
plt.plot(logabs_en1,logabs_en,'r')

27
plt.xlabel('log_e( |e_{n-1}| )')
plt.ylabel('log_e( |e_{n}| )')
plt.show()

The convergence tolerance has been met


after 7 iterations

Approximations of the Root p:


Iteration 0: pn(0) = 3.00000000e+00
Iteration 1: pn(1) = 4.00000000e+00
Iteration 2: pn(2) = 3.73469388e+00
Iteration 3: pn(3) = 3.85932813e+00
Iteration 4: pn(4) = 3.87458114e+00
Iteration 5: pn(5) = 3.87296633e+00
Iteration 6: pn(6) = 3.87298333e+00
Iteration 7: pn(7) = 3.87298335e+00

Absolute Error in the Approximations p:


Iteration 0: pn(0) = 8.72983346e-01
Iteration 1: pn(1) = 1.27016654e-01
Iteration 2: pn(2) = 1.38289469e-01
Iteration 3: pn(3) = 1.36552124e-02
Iteration 4: pn(4) = 1.59779508e-03
Iteration 5: pn(5) = 1.70149631e-05
Iteration 6: pn(6) = 2.10399054e-08
Iteration 7: pn(7) = 2.77111667e-13

28
[32]: #Order of convergence is found through the average of the slope which is␣
,→approximately

#1.6
#Problem 7c
#The difference of between the Secant and Newtons methods based on convergence␣
,→is that

# Newtons method calculates derivative from the function while the secant␣
,→method calculates

#approximate derivative using difference method, so the convergence is affected

[ ]:

29

You might also like