Feat: Ajoute l'exercice 1 de l'interpolation & extrapolation
This commit is contained in:
0
p1/ei_blanc.py
Normal file
0
p1/ei_blanc.py
Normal file
54
p1/ex1.py
Normal file
54
p1/ex1.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import numpy as np
|
||||
|
||||
|
||||
def resolve_up(A, b) -> list[float]:
|
||||
"""
|
||||
|
||||
:param A: Une matrice
|
||||
:param b: Un vecteur
|
||||
:return:
|
||||
"""
|
||||
x = np.zeros(len(b))
|
||||
for i in range(len(A) - 1, -1, -1):
|
||||
right_of_diagonal = 0.0
|
||||
for k in range(i + 1, len(A)):
|
||||
right_of_diagonal += A[i, k] * x[k]
|
||||
x[i] = (b[i] - right_of_diagonal) / A[i, i]
|
||||
return x
|
||||
|
||||
|
||||
def resolve_down(A, b) -> list[float]:
|
||||
"""
|
||||
|
||||
:param A: Une matrice
|
||||
:param b: Un vecteur
|
||||
:return:
|
||||
"""
|
||||
x = np.zeros(len(b))
|
||||
for i in range(len(A)):
|
||||
sum = 0.0
|
||||
for k in range(i):
|
||||
sum += A[i, k] * x[k]
|
||||
x[i] = (b[i] - sum) / A[i, i]
|
||||
return x
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
A = np.array([
|
||||
[2, 4, -6],
|
||||
[0, -1, 1],
|
||||
[0, 0, -2],
|
||||
], dtype=float)
|
||||
|
||||
b = np.array([2, 3, -7], dtype=float)
|
||||
|
||||
A_down = np.array([
|
||||
[2, 0, 0],
|
||||
[4, -1, 0],
|
||||
[-6, 1, -2],
|
||||
])
|
||||
|
||||
b_down = np.array([2, 3, -7])
|
||||
|
||||
print(resolve_up(A, b))
|
||||
print(resolve_down(A_down, b_down))
|
||||
82
p1/ex2.py
Normal file
82
p1/ex2.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import numpy as np
|
||||
from ex1 import resolve_up, resolve_down
|
||||
|
||||
|
||||
def est_symetrique_definie_positive(A) -> bool:
|
||||
"""
|
||||
Vérifie que A = A^t et que toutes les valeurs propres soit positive
|
||||
:param A:
|
||||
:return:
|
||||
"""
|
||||
|
||||
# Symétrie (avec tolérance pour les flottants)
|
||||
sym = np.allclose(A, A.T)
|
||||
# Définie positive : toutes les valeurs propres > 0
|
||||
valeurs_propres = np.linalg.eigvals(A)
|
||||
definie_positive = np.all(valeurs_propres > 0)
|
||||
return sym and definie_positive
|
||||
|
||||
|
||||
def gauss(A, b):
|
||||
"""
|
||||
Pas de condition, mais pas optimal non plus.
|
||||
:param A: Une matrice
|
||||
:param b: Un vecteur
|
||||
:return: A triangularisée et b modifié en conséquence.
|
||||
"""
|
||||
n = len(A)
|
||||
for k in range(n): # k = pivot
|
||||
for i in range(k+1, n): # i = ligne en dessous du pivot
|
||||
g = A[i, k] / A[k, k] # multiplication pour éliminer A[i, k]
|
||||
A[i, k:] -= g * A[k, k:] # row operation on A
|
||||
b[i] -= g * b[k] # same row operation on b
|
||||
|
||||
return A, b # matrice triangularisée et le vecteur b modifié
|
||||
|
||||
|
||||
|
||||
def cholesky(A):
|
||||
if not est_symetrique_definie_positive(A):
|
||||
raise ValueError('Matrice non symétrique définie positive')
|
||||
|
||||
m = A.shape[0] # Taille de la matrice (m x m)
|
||||
L = np.zeros_like(A, dtype=np.float32) # Matrice L initialisée à zéro
|
||||
|
||||
for j in range(m): # Parcourt chaque colonne j de 0 à m-1
|
||||
for i in range(j, m): # Parcourt chaque ligne i de j à m-1
|
||||
s = sum(L[i, k] * L[j, k] for k in range(j)) # Somme des produits L[i,k] * L[j,k] pour k < j
|
||||
|
||||
if i == j: # Élément diagonal
|
||||
L[i, j] = np.sqrt(A[i, i] - s) # Calcul de la racine carrée pour la diagonale
|
||||
else: # Élément hors diagonale
|
||||
L[i, j] = (A[i, j] - s) / L[j, j] # Calcul des éléments hors diagonale
|
||||
return L
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
A = np.array([
|
||||
[1, 1, 1, 1],
|
||||
[1, 5, 5, 5],
|
||||
[1, 5, 14, 14],
|
||||
[1, 5, 14, 15]
|
||||
], dtype=np.float64)
|
||||
|
||||
b = np.array([1, 0, 0, 0], dtype=np.float64)
|
||||
|
||||
A_triangle, b_gauss = gauss(A, b)
|
||||
print(f"A triangulsarisée:\n {A_triangle}")
|
||||
|
||||
x = resolve_up(A_triangle, b_gauss)
|
||||
print(f"x={x}")
|
||||
|
||||
A = np.array([
|
||||
[4, 1, 1, 1],
|
||||
[1, 5, 2, 1],
|
||||
[1, 2, 6, 2],
|
||||
[1, 1, 2, 7]
|
||||
])
|
||||
|
||||
L = cholesky(A)
|
||||
print(f"L: {L}")
|
||||
x = resolve_down(L, b)
|
||||
print(f'x: {x}')
|
||||
104
p1/ex3.py
Normal file
104
p1/ex3.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import numpy as np
|
||||
|
||||
|
||||
# L'inverse d'une diagonale
|
||||
# D_inv = np.diag(1 / np.diag(A))
|
||||
|
||||
|
||||
def diag_strict_dominante(A) -> bool:
|
||||
"""
|
||||
Pour chaque ligne, il faut que la somme des coefs non-diagonaux soit inférieure au coef diagonal
|
||||
TOUT EST EN VALEUR ABSOLUE.
|
||||
:param A:
|
||||
:return:
|
||||
"""
|
||||
A = np.array(A)
|
||||
n = len(A)
|
||||
for i in range(n):
|
||||
diag = abs(A[i,i])
|
||||
row_sum = np.sum(np.abs(A[i,:])) - diag # On fait la somme de toute la ligne puis on soustrait le coef diagonal
|
||||
if diag <= row_sum:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def jacobi(A, b, epsilon=1e-6, max_iter=100_000):
|
||||
if not diag_strict_dominante(A):
|
||||
raise ValueError("A doit être à diagonale strictement dominante")
|
||||
|
||||
x = np.zeros_like(b, dtype=float)
|
||||
L = np.tril(A, k=-1) # Partie triangulaire inférieure (sans diagonale)
|
||||
U = np.triu(A, k=1) # Partie triangulaire supérieure (sans diagonale)
|
||||
|
||||
for _ in range(max_iter):
|
||||
x_new = np.diag(1 / np.diag(A)) @ (b - (L + U) @ x)
|
||||
if np.linalg.norm(x_new - x, ord=2) < epsilon:
|
||||
break
|
||||
x = x_new
|
||||
|
||||
return x_new
|
||||
|
||||
|
||||
def gauss_seidel(A, b, epsilon=1e-6, max_iter=100_000):
|
||||
D = np.diag(np.diag(A))
|
||||
L = np.tril(A, k=-1)
|
||||
U = np.triu(A, k=1)
|
||||
x = np.zeros_like(b, dtype=float)
|
||||
for _ in range(max_iter):
|
||||
x_old = x.copy()
|
||||
# Résolution de (D - L) x = U x_old + b
|
||||
x = np.linalg.solve(D - L, -U @ x_old + b)
|
||||
if np.linalg.norm(x - x_old, ord=2) < epsilon:
|
||||
return x
|
||||
raise RuntimeError('Gauss-Seidel : convergence non atteinte')
|
||||
|
||||
|
||||
def relaxation(A, b, omega=1.0, epsilon=1e-6, max_iter=100_000):
|
||||
D = np.diag(np.diag(A))
|
||||
L = np.tril(A, k=-1)
|
||||
U = np.triu(A, k=1)
|
||||
x = np.zeros_like(b, dtype=float)
|
||||
|
||||
# Pré-calculer (D - ωL)^(-1) une seule fois
|
||||
inv_D_omega_L = np.linalg.inv(D - omega * L)
|
||||
|
||||
for _ in range(max_iter):
|
||||
x_new = inv_D_omega_L @ (omega * (b - U @ x) - omega * L @ x + (1 - omega) * D @ x)
|
||||
if np.linalg.norm(x_new - x, ord=2) < epsilon:
|
||||
return x_new
|
||||
x = x_new
|
||||
|
||||
raise RuntimeError("La méthode de relaxation n'a pas convergé.")
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
A = np.array([
|
||||
[8,4,1],
|
||||
[1,6,-5],
|
||||
[1,-2,-6]
|
||||
])
|
||||
|
||||
b = np.array([1,0,0])
|
||||
|
||||
D = np.diag(A)
|
||||
L = np.tril(A)
|
||||
U = np.triu(A)
|
||||
|
||||
res_jacobi = jacobi(A, b)
|
||||
print(res_jacobi)
|
||||
|
||||
res_gauss = gauss_seidel(A, b)
|
||||
print(res_gauss)
|
||||
|
||||
# je la commente car omega = 1 utilise la ^m fonction que gauss_seidel
|
||||
#res_relaxation_1 = relaxation(A, b, 1)
|
||||
#print(res_relaxation_1)
|
||||
|
||||
res_relaxation_less_1 = relaxation(A, b, 0)
|
||||
print(res_relaxation_less_1)
|
||||
|
||||
res_relaxation_2 = relaxation(A, b, 2)
|
||||
print(res_relaxation_2)
|
||||
|
||||
print('fini')
|
||||
37
p1/ex4.py
Normal file
37
p1/ex4.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
from ex2 import gauss
|
||||
from ex1 import resolve_up
|
||||
|
||||
if __name__ == '__main__':
|
||||
A = np.array([
|
||||
[-1, 5.6, 20.4],
|
||||
[-1, 28, 26],
|
||||
[-1, 8, 30]
|
||||
], dtype=np.float32)
|
||||
|
||||
b = np.array([84.84, 361, 228.75], dtype=np.float32)
|
||||
|
||||
A_res, b_res = gauss(A, b)
|
||||
print(f'A: {A_res}')
|
||||
print(f'b: {b_res}')
|
||||
|
||||
x = resolve_up(A_res, b_res)
|
||||
print(f'x: {x}')
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
cercles = [(2.8, 10.2, 5.2), (14, 13, 7), (4, 15, 3.5)]
|
||||
|
||||
for x, y, r in cercles:
|
||||
cercle = plt.Circle((x, y), r, color='blue', fill=False)
|
||||
ax.add_patch(cercle)
|
||||
|
||||
# Ajuster les limites du graphique
|
||||
ax.set_xlim(-4, 22)
|
||||
ax.set_ylim(4, 21)
|
||||
ax.set_aspect('equal')
|
||||
|
||||
plt.title("Cercles tracés avec matplotlib")
|
||||
plt.grid(True)
|
||||
plt.show()
|
||||
95
p1/ex5.py
Normal file
95
p1/ex5.py
Normal file
@@ -0,0 +1,95 @@
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
new_raph_steps = []
|
||||
dich_steps = []
|
||||
|
||||
|
||||
def f(x) -> float:
|
||||
return np.exp(x) - 2 * np.cos(x)
|
||||
|
||||
|
||||
def dichotomie(a, b, epsilon=1e-6, n=0) -> float:
|
||||
x = (a + b) / 2
|
||||
alpha = f(a) * f(x)
|
||||
dich_steps.append(x) # si on veut afficher les étapes
|
||||
|
||||
if np.abs(b - a) <= epsilon:
|
||||
return x
|
||||
|
||||
if alpha < 0:
|
||||
return dichotomie(a, x, epsilon, n+1)
|
||||
elif alpha == 0:
|
||||
return x
|
||||
else:
|
||||
return dichotomie(x, b, epsilon, n+1)
|
||||
|
||||
|
||||
def f_prime(x) -> float:
|
||||
return np.exp(x) + 2 * np.sin(x)
|
||||
|
||||
|
||||
def newton_raphson(x_0, epsilon, max_iter=1_000) -> float | None:
|
||||
"""
|
||||
Condition : connaître la dérivée de f(x)
|
||||
:param x_0:
|
||||
:param epsilon:
|
||||
:param max_iter:
|
||||
:return:
|
||||
"""
|
||||
x = x_0
|
||||
|
||||
for _ in range(max_iter):
|
||||
x_prev = x
|
||||
fx = f(x)
|
||||
fpx = f_prime(x)
|
||||
if np.abs(fx) < epsilon:
|
||||
return x
|
||||
elif fpx == 0:
|
||||
raise RuntimeError("Dérivée nulle : échec de Newton-Raphson.")
|
||||
x = x - (fx / fpx)
|
||||
new_raph_steps.append(x)
|
||||
if np.abs(x - x_prev) < epsilon:
|
||||
return x
|
||||
|
||||
raise RuntimeError(f"Newton-Raphson n'a pas convergé en {max_iter} itérations.")
|
||||
|
||||
|
||||
def ex1() -> None:
|
||||
x = np.arange(0, 1, 0.001)
|
||||
f = [np.exp(i) - 2 * np.cos(i) for i in x]
|
||||
f1 = [np.exp(i) for i in x]
|
||||
f2 = [2 * np.cos(i) for i in x]
|
||||
|
||||
for i in range(len(x)):
|
||||
if f1[i] == f2[i]:
|
||||
print(f1[i])
|
||||
|
||||
plt.plot(x, f)
|
||||
plt.plot(x, f1)
|
||||
plt.plot(x, f2)
|
||||
plt.grid()
|
||||
plt.show()
|
||||
|
||||
dichotomie_res = dichotomie(0, 1, 10e-6)
|
||||
print(dichotomie_res)
|
||||
|
||||
plt.plot(dich_steps)
|
||||
plt.title('Convergence de la dichotomie')
|
||||
plt.show()
|
||||
|
||||
newton_raphson_res = newton_raphson(0.1, 10e-6)
|
||||
print(newton_raphson_res)
|
||||
|
||||
plt.plot(new_raph_steps)
|
||||
plt.title('Convergence de Newton-Raphson')
|
||||
plt.show()
|
||||
|
||||
"""
|
||||
La NR à moins d'étape que la dichotomie, NR à l'air rapide,
|
||||
comme Flash McQueen (Kachow)
|
||||
"""
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
ex1()
|
||||
Reference in New Issue
Block a user