Professional Documents
Culture Documents
HW 1
HW 1
HW 1
September 3, 2022
[1]: # Problem 1a
# Plot function f(x)=1-2*ln(x) to verify the presence of zeros in the interval␣
,→(1,2)
x= np.linspace(1,2,101)
f= 1-2*np.log(x)
xp=np.e**(1/2)
fp=0
plt.plot(x,f,'r', label = 'f(x)=1-2ln(x)')
plt.plot(xp,fp,'b.', label = 'p=e^(1/2)')
plt.legend()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()
1
[2]: #Problem 1b
#Bisection Algo
def bisection(eps,func,a,b,Nmax):
pn = np.zeros(Nmax)
pn.fill(np.nan)
sfa = np.sign(func(a))
for ii in range(0,Nmax):
pn[ii] = a + (b-a)/2.0
sfp = np.sign(func(pn[ii]))
if ( (sfa*sfp) < 0.0 ):
2
b = pn[ii]
else:
a = pn[ii]
sfa = np.sign(func(a))
[3]: a1 = 1
b1 = 2
Nmax1 = 5
eps1 = 0.00001
def f1(x):
f1x=1-2*np.log(x)
return f1x
pn1 = bisection(eps1,f1,a1,b1,Nmax1)
[4]: p1 = np.exp(1/2)
n1 = np.linspace(1,Nmax1,Nmax1)
e1 = abs(pn1-p1)
eb1 = (b1-a1)/(2.0**n1)
for ii in range(0,Nmax1):
print("{0:f}\t{1:f}\t{2:f}\t{3:f}".\
format(n1[ii],pn1[ii],e1[ii],eb1[ii]))
3
[6]: #Problem 1c Error vs Number of Iterations
x=[1,2,3,4,5]
y=[.148,.101,.023,.038,.0075]
plt.plot(x,y,'r', label = 'Bisection Method Error')
plt.legend()
plt.xlabel('Number of Iterations')
plt.ylabel('Error')
plt.show()
[7]: #Problem 1d
#The Bisection method has a limitation of having a slow convergence since it is␣
,→based
#on halving the interval and takes a few iteration if the root is about the␣
,→center of the intial interval
#and if the root is close th the bounds, also if a root exists at the tangent,␣
,→the bisection methid will fail
[8]: #Problem 2a
def f2(x):
f2x = (1/x)-48
return f2x
a2 = 0.01
b2 = .1
4
Nmax2 = 1000
eps2 = 0.00001
pn2 = bisection(eps2,f2,a2,b2,Nmax2)
5
[10]: p2 = 1/48
e2 = abs(p2-pn2)
plt.plot(n2,e2,'b')
plt.legend(['|e|'])
plt.xlabel('Iteration Number n')
plt.ylabel('Absolute Error |e|')
plt.show()
import numpy as np
def g(x,f,fp):
gx = x - f(x)/fp(x)
return(gx)
pn = np.zeros(Nmax+1)
pn.fill(np.nan)
pn[0] = po
for ii in range(1,Nmax+1):
pn[ii] = g(pn[ii-1],f,fp)
if ( abs( pn[ii]-pn[ii-1] ) < eps ):
print("The convergence tolerance has been met")
6
print("after {0:d} iterations".format(ii))
pn = pn[~np.isnan(pn)]
return(pn)
def fdash(x):
fdashx = 3*x**2
return(fdashx)
p_o = 2
Nmax = 5
eps = .00001
p_n = newton(f,fdash,p_o,Nmax,eps)
[13]: #Problem 3b
print("n\t|p_(n)-p_(n-1)|\t|p_(n-1)-p|\t|p_(n)-p|")
p_n6=p_n
p6 = (15)**(1.0/3.0)
pnpn1 = np.zeros(len(p_n6)-1)
pnpn1.fill(np.nan)
pn1p = np.zeros(len(p_n6)-1)
pn1p.fill(np.nan)
pnp = np.zeros(len(p_n6)-1)
7
pnp.fill(np.nan)
for ii in range(1,len(p_n6)):
pnpn1[ii-1] = abs(p_n6[ii]-p_n6[ii-1])
pn1p [ii-1] = abs(p_n6[ii-1]-p6)
pnp[ii-1] = abs(p_n6[ii]-p6)
for ii in range(0,len(p_n6)-1):
print("{0:d}\t{1:10.9e}\t{2:10.9e}\t{3:10.9e}".\
format(ii+1,pnpn1[ii],pn1p[ii],pnp[ii]))
[14]: #Problem 3c
pnp_pn1p_ratio = pnp / pn1p**2.0
print("Ratio |p_(n)-p| / |p_(n-1)-p|")
for ii in range(0,len(pnp)):
print("Iteration {0:d}: {1:10.9e}".\
format(ii+1,pnp_pn1p_ratio[ii]))
0.405480133
[16]: #Problem 4a
def f_a(x):
f_ax = np.exp(x) + x*x -x - 7
return(f_ax)
x_a= np.linspace(-3,2,501)
f_array_a = f_a(x_a)
plt.plot(x_a,f_array_a,'b')
8
plt.axhline(y=0,color='k',linestyle='--')
plt.legend([r'$f(x)=e^x+x^2-x-4$','y=0'])
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()
[17]: #Roots are near x=-2.2 and x=1.8 with gived e=10^-6
#f'(x)=e^x+2x-1
def fdash_a(x):
fd_ax = np.exp(x) + 2*x - 1.0
return(fd_ax)
eps_a1 = 10**-6.0
eps_a2 = 10**-6.0
po_a1 = -2.2
po_a2 = 1.8
Nmax_a1 = 100
Nmax_a2 = 100
pn_a1 = newton(f_a,fdash_a,po_a1,Nmax_a1,eps_a1)
pn_a2 = newton(f_a,fdash_a,po_a2,Nmax_a2,eps_a2)
print("\nFinal approximation of the first root of f(x):")
print("p_n({0:d}) = {1:10.9e}".format(len(pn_a1),pn_a1[-1]))
print("\nFinal approximation of the second root of f(x):")
print("p_n({0:d}) = {1:10.9e}".format(len(pn_a2),pn_a2[-1]))
p_a1 = pn_a1[-1]
9
p_a2 = pn_a2[-1]
logabs_en_a1 = np.log(abs( pn_a1[1:-1] - p_a1 ))
logabs_en1_a1 = np.log(abs( pn_a1[0:-2] - p_a1 ))
logabs_en_a2 = np.log(abs( pn_a2[1:-1] - p_a2 ))
logabs_en1_a2 = np.log(abs( pn_a2[0:-2] - p_a2 ))
plt.plot(logabs_en1_a1,logabs_en_a1,'r')
plt.xlabel('ln( |e_{n-1}| )')
plt.ylabel('ln( |e_{n}| )')
plt.show()
plt.plot(logabs_en1_a2,logabs_en_a2,'r')
plt.xlabel('ln( |e_{n-1}| )')
plt.ylabel('ln( |e_{n}| )')
plt.show()
10
[18]: #Problem 4b
def f_b(x):
f_bx = x**3.0 - x**2 - 9*x + 4
return(f_bx)
x_b= np.linspace(-4,4,101)
f_array_b = f_b(x_b)
plt.plot(x_b,f_array_b,'b')
plt.axhline(y=0,color='k',linestyle='--')
plt.legend([r'$f(x)=x^3-x^2-10x+11$','y=0'])
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()
11
[19]: #Roots are near x=-2.8, x=0.5, x=3.3
#f'(x)=3x^2-2x-9
def fdash_b(x):
fdash_bx = 3.0*x**2.0 - 2.0*x-10.0
return(fdash_bx)
eps_b1 = 10**-6.0
eps_b2 = 10**-6.0
eps_b3 = 10**-6.0
po_b1 = -2.8
po_b2 = 0.5
po_b3 = 3.3
Nmax_b1 = 100
Nmax_b2 = 100
Nmax_b3 = 100
pn_b1 = newton(f_b,fdash_b,po_b1,Nmax_b1,eps_b1)
pn_b2 = newton(f_b,fdash_b,po_b2,Nmax_b2,eps_b2)
pn_b3 = newton(f_b,fdash_b,po_b3,Nmax_b3,eps_b3)
12
p_b1 = pn_b1[-1]
p_b2 = pn_b2[-1]
p_b3 = pn_b3[-1]
plt.plot(logabs_en1_b1,logabs_en_b1,'r')
plt.xlabel('ln( |e_{n-1}| )')
plt.ylabel('ln( |e_{n}| )')
plt.show()
plt.plot(logabs_en1_b2,logabs_en_b2,'r')
plt.xlabel('ln( |e_{n-1}| )')
plt.ylabel('ln( |e_{n}| )')
plt.show()
plt.plot(logabs_en1_b3,logabs_en_b3,'r')
plt.xlabel('ln( |e_{n-1}| )')
plt.ylabel('ln( |e_{n}| )')
plt.show()
13
14
[20]: #Problem 4c
def f_c(x):
f_cx = 1.08 - 1.03*x + np.log(x)
return(f_cx)
x_c= np.linspace(0.6,1.6,101)
f_array_c = f_c(x_c)
plt.plot(x_c,f_array_c,'b')
plt.axhline(y=0,color='k',linestyle='--')
plt.legend([r'$f(x)=1.05-1.04*x+ln(x)$','y=0'])
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()
15
[21]: #Roots are near x=0.65 and x=1.35
#f'(x)=-1.03+(1/x)
def fdash_c(x):
fdash_cx = -1 + 1.0/x
return(fdash_cx)
eps_c1 = 10**-6.0
eps_c2 = 10**-6.0
po_c1 = 0.68
po_c2 = 1.3
Nmax_c1 = 100
Nmax_c2 = 100
pn_c1 = newton(f_c,fdash_c,po_c1,Nmax_c1,eps_c1)
pn_c2 = newton(f_c,fdash_c,po_c2,Nmax_c2,eps_c2)
print("\nFinal approximation of the first root of f(x):")
print("pn({0:d}) = {1:10.9e}".format(len(pn_c1),pn_c1[-1]))
print("\nFinal approximation of the second root of f(x):")
print("pn({0:d}) = {1:10.9e}".format(len(pn_c2),pn_c2[-1]))
p_c1 = pn_c1[-1]
p_c2 = pn_c2[-1]
logabs_en_c1 = np.log(abs( pn_c1[1:-1] - p_c1 ))
logabs_en1_c1 = np.log(abs( pn_c1[0:-2] - p_c1 ))
logabs_en_c2 = np.log(abs( pn_c2[1:-1] - p_c2 ))
logabs_en1_c2 = np.log(abs( pn_c2[0:-2] - p_c2 ))
plt.plot(logabs_en1_c1,logabs_en_c1,'r')
plt.xlabel('ln( |e_{n-1}| )')
16
plt.ylabel('ln( |e_{n}| )')
plt.show()
plt.plot(logabs_en1_c2,logabs_en_c2,'r')
plt.xlabel('ln( |e_{n-1}| )')
plt.ylabel('ln( |e_{n}| )')
plt.show()
17
[22]: #Problem 5 f(x)81x^4+27x^3-9x^2+3x-22, Zero x=2/3, po=0
#10 iterations of Newton's Method
#Plot absolute error v iteration number
def f(x):
fx = 81*x**4.0 + 27*x**3.0 - 9*x**2 + 3*x - 22
return(fx)
#f'(x)=324x^3+81x^2-18x+3
def fdash(x):
fdashx = 324*x**3.0 + 81*x**2.0 - 18*x + 3
return(fdashx)
po_a = 0
Nmax_a = 10
eps_a = 0.00001
pn_a = newton(f,fdash,po_a,Nmax_a,eps_a)
print("\nApproximations of the Root p:")
for ii in range(0,len(pn_a)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,pn_a[ii]))
p_a = 2/3
abs_e_a = np.abs(pn_a-p_a)
print("\nAbsolute Error in the Approximations p:")
for ii in range(0,len(abs_e_a)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,abs_e_a[ii]))
18
logabs_en_a = np.log(abs( pn_a[1:] - p_a ))
logabs_en1_a = np.log(abs( pn_a[0:-1] - p_a ))
plt.plot(logabs_en1_a,logabs_en_a,'r')
plt.xlabel('ln(|e_{n-1}|)')
plt.ylabel('ln(|e_{n}|)')
plt.show()
itera = [1,2,3,4,5,6,7,8,9,10]
plt.plot(itera,pn_a[1:],'b')
plt.xlabel('Iterations')
plt.ylabel('Absolute Error')
plt.show()
19
[23]: #Problem 5b
#Order of convergence is found utilizing the slope of the plot above which is 1
20
#Problem 5c
#Multiplicity of the root if found using the dereivative of the given function␣
,→at root x=2/3
pn = np.zeros(Nmax+2)
pn.fill(np.nan)
pn[0] = p0
pn[1] = p1
for ii in range(2,Nmax+2):
pn[ii] = g(pn[ii-2],pn[ii-1],f)
if ( abs( pn[ii]-pn[ii-1] ) < eps ):
print("The convergence tolerance has been met")
print("after {0:d} iterations".format(ii))
pn = pn[~np.isnan(pn)]
return(pn)
[25]: #Problem 6a
def f(x):
fx = x**2*(1-np.cos(x))
return(fx)
p1 = -1
p2 = 2.5
Nmax = 25
eps = 0.00001
p_n = secant(f,p1,p2,Nmax,eps)
print("\nApproximations of the Root p:")
for ii in range(0,len(p_n)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,p_n[ii]))
p3 = 0
abs_e = np.abs(p_n-p3)
print("\nAbsolute Error in the Approximations p:")
for ii in range(0,len(abs_e)):
21
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,abs_e[ii]))
logabs_en = np.log(abs( p_n[1:-1] - p3))
logabs_en1 = np.log(abs( p_n[0:-2] - p3))
plt.plot(logabs_en1,logabs_en,'r')
plt.xlabel('ln(|e_{n-1}|)')
plt.ylabel('ln(|e_{n}|)')
plt.show()
22
Iteration 5: pn(5) = 8.00932283e-01
Iteration 6: pn(6) = 6.34279573e-01
Iteration 7: pn(7) = 5.22573332e-01
Iteration 8: pn(8) = 4.25182754e-01
Iteration 9: pn(9) = 3.48151275e-01
Iteration 10: pn(10) = 2.84669761e-01
Iteration 11: pn(11) = 2.33047009e-01
Iteration 12: pn(12) = 1.90781090e-01
Iteration 13: pn(13) = 1.56228321e-01
Iteration 14: pn(14) = 1.27943737e-01
Iteration 15: pn(15) = 1.04790606e-01
Iteration 16: pn(16) = 8.58315953e-02
Iteration 17: pn(17) = 7.03055350e-02
Iteration 18: pn(18) = 5.75893788e-02
Iteration 19: pn(19) = 4.71740108e-02
Iteration 20: pn(20) = 3.86427553e-02
Iteration 21: pn(21) = 3.16545909e-02
Iteration 22: pn(22) = 2.59302999e-02
Iteration 23: pn(23) = 2.12412402e-02
Iteration 24: pn(24) = 1.74001583e-02
Iteration 25: pn(25) = 1.42536865e-02
Iteration 26: pn(26) = 1.16762034e-02
[26]: #Order of convergence is shown to be one since the plot is linear which is less␣
,→than the expected 1.618
23
[27]: #Problem 6b
def f(x):
fx = 81*x**4 + 27*x**3 - 9*x**2 + 3*x - 22
return(fx)
p1 = 0
p2 = 0.5
Nmax = 25
eps = 0.00001
p_n = secant(f,p1,p2,Nmax,eps)
print("\nApproximations of the Root p:")
for ii in range(0,len(p_n)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,p_n[ii]))
p3 = 2/3
abs_e = np.abs(p_n-p3)
print("\nAbsolute Error in the Approximations p:")
for ii in range(0,len(abs_e)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,abs_e[ii]))
logabs_en = np.log(abs( p_n[1:-1] - p3))
logabs_en1 = np.log(abs( p_n[0:-2] - p3))
plt.plot(logabs_en1,logabs_en,'r')
plt.xlabel('ln(|e_{n-1}|)')
plt.ylabel('ln(|e_{n}|)')
plt.show()
24
Iteration 4: pn(4) = 1.05150477e-01
Iteration 5: pn(5) = 4.09157494e-02
Iteration 6: pn(6) = 9.78302128e-03
Iteration 7: pn(7) = 8.18068127e-04
Iteration 8: pn(8) = 1.71986359e-05
Iteration 9: pn(9) = 2.98836141e-08
Iteration 10: pn(10) = 1.09057208e-12
25
for ii in range(0,len(p_n)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,p_n[ii]))
p_a = np.sqrt(3)
abs_e = np.abs(p_n-p_a)
print("\nAbsolute Error in the Approximations p:")
for ii in range(0,len(abs_e)):
print("Iteration {0:d}: pn({1:d}) = {2:9.8e}"\
.format(ii,ii,abs_e[ii]))
logabs_en = np.log(abs( p_n[1:-1] - p_a ))
logabs_en1 = np.log(abs( p_n[0:-2] - p_a ))
plt.plot(logabs_en1,logabs_en,'r')
plt.xlabel('log_e( |e_{n-1}| )')
plt.ylabel('log_e( |e_{n}| )')
plt.show()
26
[30]: #The apparent order of convergence is approximately 3
27
plt.xlabel('log_e( |e_{n-1}| )')
plt.ylabel('log_e( |e_{n}| )')
plt.show()
28
[32]: #Order of convergence is found through the average of the slope which is␣
,→approximately
#1.6
#Problem 7c
#The difference of between the Secant and Newtons methods based on convergence␣
,→is that
# Newtons method calculates derivative from the function while the secant␣
,→method calculates
[ ]:
29