Professional Documents
Culture Documents
Descente - Ipynb - Colaboratory
Descente - Ipynb - Colaboratory
ipynb - Colaboratory
import math
import numpy as np
# Algorithme de descente
def f(x,y,a,b):
return lambda x,y:(a-x)**2+b(y-x**2)**2
def df(x,y,a,b):
return 4*b*x**3-x*(4*b*y-2)-2*a,2*b*(y-x**2)
def descente(x,y,a,b,n,e,alpha):
gradx,grady=df(x,y,a,b)
grad = math.sqrt(gradx**2+grady**2)
i=0
while abs(grad)>e and i<n:
gradx, grady =df(x,y,a,b)
#print (gradx, grady)
grad = float(math.sqrt(gradx**2+grady**2) )
#print (grad)
x = x-alpha*gradx
y = y+-alpha*grady
i += 1
return x,y,i
descente(2,5,1,10,2000,0.00001,0.01)
def Rosenbrock(x):
return (1-x[0])**2+10*(x[1]-x[0]**2)**2
def df(x):
return np.array([4*10*x[0]**3-x[0]*(4*10*x[1]-2)-2,20*(x[1] - (x[0]**2))]).reshape(-1,1)
def backtrack2(x0, Rosenbrock, df, t = 1, alpha = 0.2, beta = 0.8):
while Rosenbrock(x0 - t*df(x0)) > Rosenbrock(x0) + alpha * t * np.dot(df(x0).T, -1*df(x0)):
t *= beta
return t
def GC(X,n):
gradxy = df(X)
d=-1*gradxy.reshape(-1,1)
grad=float(np.linalg.norm(gradxy))
i=0
while i<n and abs(grad)>0.00001 :
gradxy = df(X)
t=backtrack2(X, Rosenbrock, df, t = 1, alpha = 0.2, beta = 0.8)
X=X+np.dot(t,d)
g1=df(X)
g_=np.dot(g1.T,(g1-gradxy))
b=(g_/(grad**2))
d=(-g1+b*d).reshape(-1,1)
grad=float(np.linalg.norm(g1))
i+=1
return X
X=np.array([[2],[5]])
GC(X,150)
array([[0.99935788],
[0.99871051]])
#Algorithme de Newton
def df1(x,y,a,b):
return 4*b*x**3-x*(4*b*y-2)-2*a,2*b*(y-x**2)
def df(x):
return np.array([4*10*x[0]**3-x[0]*(4*10*x[1]-2)-2,20*(x[1] - (x[0]**2))]).reshape(-1,1)
def Hessienne(x,y,a,b):
dfxx=12*b*x**2-4*b*y-2
dfyy=2*b
dfxy=-4*b*x
H=np.matrix([[dfxx,dfxy],[dfxy,dfyy]])
return H
https://colab.research.google.com/drive/1DkQMt-02HnQ5O6LHmfEWDIE4Ot_q9tSr#scrollTo=9CoBPugqceKj&printMode=true 1/3
12/27/22, 10:19 PM Descente.ipynb - Colaboratory
def Armijo(x,y,df, t = 1, alpha = 0.2, beta = 0.8):
x=np.array([[x],[y]])
while Rosenbrock(x - t*df(x)) > Rosenbrock(x) + alpha * t * np.dot(df(x).T, -1*df(x)) :
t *= beta
return t
def Newton(x,y,a,b,n):
gradx,grady=df1(x,y,a,b)
gk=np.array([[gradx],[grady]])
dk =(-1)*np.linalg.inv(Hessienne(x,y,a,b))*gk
i=0
X=np.array([[x],[y]])
while i<n:
t=Armijo(x,y,df, t = 1, alpha = 0.2, beta = 0.8)
gradx,grady=df1(x,y,a,b)
gk=np.array([[gradx],[grady]])
dk =(-1)*np.linalg.inv(Hessienne(x,y,a,b))*gk
X=X+dk*t
i+=1
return X
Newton(2,5,1,10,1000)
matrix([[2.17989968],
[1.94170552]])
#Algorithme de descente à pas optimal cas d'une fonction quadratique
def algo_desc(x0,y0,A,b):
k=0
X=np.array([[x0],[y0]])
dk=-(A*X-b)
stop = np.array([[0],[0]])
while dk.all()!=stop.all():
dkt=np.transpose(dk)
tk=float((dkt*dk)/(dkt*A*dk))
X=X+tk*dk
dk=-(A*X-b)
k=k+1
return X,k
A=np.matrix([[2,0],[0,6]])
b=np.array([0,0]).reshape(2,1)
algo_desc(3,1,A,b)
(matrix([[ 0.00000000e+000],
[-3.70459792e-163]]), 541)
#Algorithme de descente conjugué cas d'une fonction quadratique
def algo_desc_conjug(x0,y0,A,b):
k=0
X=np.array([[x0],[y0]])
gk=(A*X-b)
dk=-gk
stop = np.array([[0],[0]])
while dk.all()!=stop.all():
gkt=np.transpose(gk)
dkt=np.transpose(dk)
tk=float((gkt*gk)/(dkt*A*dk))
X=X+tk*dk
gk=(A*X-b)
Bk=float((np.transpose(gk)*A*dk)/(dkt*A*dk))
dk=-gk+Bk*dk
k=k+1
return X,k
A=np.matrix([[2,0],[0,6]])
b=np.array([0,0]).reshape(2,1)
algo_desc_conjug(3,1,A,b)
(matrix([[0.],
[0.]]), 2)
https://colab.research.google.com/drive/1DkQMt-02HnQ5O6LHmfEWDIE4Ot_q9tSr#scrollTo=9CoBPugqceKj&printMode=true 2/3
12/27/22, 10:19 PM Descente.ipynb - Colaboratory
https://colab.research.google.com/drive/1DkQMt-02HnQ5O6LHmfEWDIE4Ot_q9tSr#scrollTo=9CoBPugqceKj&printMode=true 3/3