In [1]:
import math
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.animation as animation
from tqdm.notebook import tqdm, tnrange
from IPython.display import display, HTML
from sklearn.preprocessing import minmax_scale

Реалізація¶

Функції для апроксимації¶

Функція ${F_1}(x, \vec{c}) = c_1+c_2 x + c_3 x^2 + ... + c_n x^{n-1} $

її градієнт $ \nabla_{\vec{c}}F_1(x, \vec{c}) = < 1, x, x^2, \space\dots\space , x^{n-1}> $

In [2]:
def F_1(x, c: np.ndarray):
    return np.polyval(c[::-1], x)
def F_1_grad(x, c: np.ndarray):
    return x**np.linspace(0, len(c)-1, len(c))

Функція $ \ F_2 \ (x, \vec{c}) \ =\ c_1 + c_2 sin(x) + c_3 cos(x) + c_4 ln(|x|), $

її градієнт $\ \nabla_{\vec{c}\ } F_2(x, \vec{c}) \ =\ < 1, sin(x), cos(x), ln(|x|) > $

In [3]:
def F_2(x, c):
    return c[0] + c[1]*math.sin(x) + c[2]*math.cos(x) + c[3]*math.log(abs(x))
def F_2_grad(x, c):
    return np.array([1, math.sin(x), math.cos(x), math.log(abs(x))])

Функція ціни¶

Функція ціни $ \ cost(c) = \sum_{i=1}^{N} (G(x_i) - F(x_i, c) )^2, $

Її градієнт $ \nabla_{\vec{c}\ } cost(c) = \sum_{i=1}^{N} 2 F'(x_i, c)(F(x_i, c) - G(x_i)) $

In [4]:
def cost(x, y, c, F):
    return np.sum(
        np.array([(F(x_i, c)-y_i)**2 for x_i, y_i in zip(x, y)])/N
                 )

def cost_grad(x, y, c,F,F_grad):
    return np.array([(F(x_i, c) - y_i)/N for x_i, y_i in zip(x, y)])@np.array([F_grad(x_i, c) for x_i in x])

Реалізація антиградієнтного методу¶

In [5]:
def gradient_descent(c, F, F_grad, N_iterations = 100, learning_rate = 1):
    for i in tnrange(N_iterations):
        c -= learning_rate * cost_grad(x, y, c,F, F_grad)
        
    return c

Анімація¶

In [6]:
plt.rcParams["animation.html"] = "jshtml"
In [7]:
def generate_history(c, x, y, F, F_grad, N_iterations = 1000, learning_rate = 1e-1):
    history = np.empty((N_iterations, *c.shape))
    history[0] = c
    for i in tnrange(1, N_iterations):
        history[i] = history[i-1] - learning_rate * cost_grad(x, y, history[i-1],F, F_grad)
    return history
In [8]:
def animate(x, y, history, F):
    fig, ax = plt.gcf(), plt.gca()
    plt.close()
    ax.set_xlim(np.min(x), np.max(x))
    ax.set_ylim(np.min(y), np.max(y))
    
    y_aprox = np.array([F(x_i, history[0]) for x_i in x], dtype=float)
    line, = ax.plot(x, y_aprox, animated=True)
    
    scatter, = ax.plot(x, y, 'o', markersize=3)
    
    def update(frame):
        ax.set_title(f"Ітерація {frame+1}")
        line.set_data(x, [F(x_i, history[frame]) for x_i in x])
        
        return [line]
    ani = animation.FuncAnimation(fig, update, frames=len(history), blit=True, interval=50, repeat_delay=2000)
    return ani

Інструменти тестування¶

In [9]:
def test(x, y, F, F_grad, C_n, N_iterations, learning_rate):
    c = np.random.uniform(-1, 1, C_n) # 5 коефіцієнтів
    x_norm, y_norm = minmax_scale(x, (-1, 1)), minmax_scale(y, (-1, 1))
    h = generate_history(c, x_norm, y_norm,
                     F, F_grad, 
                     N_iterations, learning_rate,
                    )
    ani = animate(x_norm, y_norm, h, F)
    return ani

    

Результати¶

Апроксимація прямої $y = x$¶

In [10]:
def G(x, mu, sigma):
    return  x + random.gauss(mu, sigma)
    
N = 100
mu, sigma = 0, 4

x = np.linspace(-N/2, N/2, N)
y = np.array([G(x_i, mu, sigma) for x_i in x])
# plt.plot(x, y, 'o', markersize='3', color='orange')

$F_1$ для $dim(c)=2$¶

In [11]:
F = F_1
F_grad = F_1_grad
C_n = 2
N_iterations = 20
learning_rate = 1

test(x, y, F, F_grad, C_n, N_iterations, learning_rate)
  0%|          | 0/19 [00:00<?, ?it/s]
Out[11]:

$F_1$ для $dim(c)=10$¶

In [12]:
F = F_1
F_grad = F_1_grad
C_n = 10
N_iterations = 100
learning_rate = 0.1

test(x, y, F, F_grad, C_n, N_iterations, learning_rate)
  0%|          | 0/99 [00:00<?, ?it/s]
Out[12]:

$F_2$¶

In [13]:
F = F_2
F_grad = F_2_grad
C_n = 4
N_iterations = 200
learning_rate = 0.1

test(x, y, F, F_grad, C_n, N_iterations, learning_rate)
  0%|          | 0/199 [00:00<?, ?it/s]
Out[13]:

Апроксимація $y = 1 -|x|$ ( $\bigwedge$ - пряма)¶

In [14]:
def G(x, mu, sigma):
    return  50 - abs(x) + random.gauss(mu, sigma)
    
N = 100
mu, sigma = 0, 4

x = np.linspace(-N/2, N/2, N)
y = np.array([G(x_i, mu, sigma) for x_i in x])
# plt.plot(x, y, 'o', markersize='3', color='orange')

$F_1$ для $dim(c)=5$¶

In [15]:
F = F_1
F_grad = F_1_grad
C_n = 5
N_iterations = 200
learning_rate = 1

test(x, y, F, F_grad, C_n, N_iterations, learning_rate)
  0%|          | 0/199 [00:00<?, ?it/s]
Out[15]:

$F_1$ для $dim(c)=10$¶

In [16]:
F = F_1
F_grad = F_1_grad
C_n = 10
N_iterations = 200
learning_rate = 1

test(x, y, F, F_grad, C_n, N_iterations, learning_rate)
  0%|          | 0/199 [00:00<?, ?it/s]
Out[16]:

$F_2$¶

In [17]:
F = F_2
F_grad = F_2_grad
C_n = 4
N_iterations = 80
learning_rate = 0.1

test(x, y, F, F_grad, C_n, N_iterations, learning_rate)
  0%|          | 0/79 [00:00<?, ?it/s]
Out[17]: