-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy patharchive.py
More file actions
74 lines (62 loc) · 1.98 KB
/
archive.py
File metadata and controls
74 lines (62 loc) · 1.98 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import numpy as np
from numpy import sum,diag
from numpy.core._multiarray_umath import exp, log, divide
from loss_function import find_eta
# works good
def objective_soft_max_Wj(X, W, C):
"""
:param X:
:param W: shape #features x #labels
:param C: shape #labels x #samples
:return:
"""
l = C.shape[0]
m = X.shape[1]
objective_value = 0
eta = find_eta(X, W)
XT_W = X.T @ W - eta
weighted_sum = sum(exp(XT_W), axis=1)
for k in range(l):
objective_value = objective_value + C[k, :].T @ log(divide(exp(XT_W[:, k]), weighted_sum))
return - objective_value / m
# works good
def objective_soft_max_old(X, W, C):
l = C.shape[0]
m = X.shape[1]
objective_value = 0
eta = find_eta(X, W)
XT_W = X.T @ W - eta
weighted_sum = sum(exp(XT_W), axis=1)
wighted_sum_matrix = diag(1 / weighted_sum)
for k in range(l):
objective_value = objective_value + C[k, :].T @ log(wighted_sum_matrix @ exp(XT_W[:, k]))
return - objective_value / m
def objective_soft_max_gradient_Wp(X, W, C, p):
m = X.shape[1]
eta = find_eta(X, W)
XT_W = X.T @ W - eta
weighted_sum = sum(exp(XT_W), axis=1)
return (1 / m) * X @ (divide(exp(XT_W[:, p]), weighted_sum) - C[p, :])
def objective_soft_max_gradient_W2(X, W, C):
l = C.shape[0]
n = X.shape[0]
m = X.shape[1]
eta = find_eta(X, W)
XT_W = X.T @ W - eta
weighted_sum = sum(exp(XT_W), axis=1)
grad = np.zeros((n, l))
for p in range(l):
grad[:, p] = (1 / m) * X @ (divide(exp(XT_W[:, p]), weighted_sum) - C[p, :])
return grad
def objective_soft_max_gradient_W_old_but_gold(X, W, C):
l = C.shape[0]
n = X.shape[0]
m = X.shape[1]
eta = find_eta(X, W)
XT_W = X.T @ W - eta
weighted_sum = sum(exp(XT_W), axis=1)
wighted_sum_matrix = np.diag(1 / weighted_sum)
grad = np.zeros((n, l))
for p in range(l):
grad[:, p] = (1 / m) * X @ (wighted_sum_matrix @ exp(XT_W[:, p]) - C[p, :])
return grad