Skip to content

Commit ae9d00e

Browse files
committed
Add solutions to sample exam
1 parent ee25436 commit ae9d00e

2 files changed

Lines changed: 82 additions & 0 deletions

File tree

src/codes/07-summary/exam_preparation.py

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,50 @@ def euler_method(
3333
return x, y
3434
### ANCHOR_END: euler_errors
3535

36+
### ANCHOR: gradient_descent_errors
37+
def double_well_gradient(x): # x: float
38+
grad = 6 * x**5 - 2 * x - 0.25
39+
return grad
40+
41+
def gradient_descent(func_grad, x0, tau=0.01, maxgrad=1e-6, maxiter=500)
42+
x = 0
43+
converged = False
44+
45+
for _ in range(0, maxiter):
46+
grad = func_grad(x)
47+
x = x - tau * grad
48+
if grad < maxgrad:
49+
converged = True
50+
break
51+
52+
return x, converged
53+
54+
x_opt, converged = gradient_descent(double_well_gradient, -1.2)
55+
print("Ein lokales Minimum liegt bei x = " , x_opt) # x_opt = -0.8872
56+
### ANCHOR_END: gradient_descent_errors
57+
58+
### ANCHOR: gradient_descent_correct
59+
def double_well_gradient(x): # x: float
60+
grad = 6 * x**5 - 4 * x - 0.25 # Error 1: -2*x should be -4*x (derivative of -2x^2 is -4x)
61+
return grad
62+
63+
def gradient_descent(func_grad, x0, tau=0.01, maxgrad=1e-6, maxiter=500):
64+
x = x0 # Error 2: Should initialize x with x0, not 0
65+
converged = False
66+
67+
for _ in range(0, maxiter):
68+
grad = func_grad(x)
69+
x = x - tau * grad
70+
if abs(grad) < maxgrad: # Error 3: Should use abs(grad) for convergence check
71+
converged = True
72+
break
73+
74+
return x, converged # Error 4: return must be outside the for loop
75+
76+
x_opt, converged = gradient_descent(double_well_gradient, -1.2)
77+
print("Ein lokales Minimum liegt bei x = " , x_opt) # x_opt = -0.8872
78+
### ANCHOR_END: gradient_descent_correct
79+
3680
### ANCHOR: knn_incomplete
3781
import numpy as np
3882
import matplotlib.pyplot as plt
@@ -92,6 +136,22 @@ def predict(self, X, y, xi):
92136
return y_pred
93137
### ANCHOR_END: knn_complete
94138

139+
### ANCHOR: knn_weighted_complete
140+
class kNNWeightedClassifier:
141+
def __init__(self, k, sigma=1.0):
142+
self.k = k
143+
self.sigma = sigma
144+
145+
def predict(self, X, y, xi):
146+
distances = np.linalg.norm(X - xi, axis=1)
147+
weights = np.exp(-distances**2 / (2 * self.sigma**2))
148+
nearest = np.argsort(distances)[:self.k]
149+
y_nearest = y[nearest]
150+
unique_labels, label_counts = np.unique(y_nearest, return_counts=True)
151+
y_pred = unique_labels[np.argmax(label_counts)]
152+
return y_pred
153+
### ANCHOR_END: knn_weighted_complete
154+
95155
### ANCHOR: knn_example
96156
N = 20
97157
X = np.random.randn(N, 2)

src/psets/exam_preparation.md

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,11 @@ print(y)
3535
<div style="max-width: 300px; margin: 1em auto;
3636
padding: 1em; border: 1px solid #ccc; border-radius: 4px; text-align: left;"
3737

38+
<!--
39+
**Lösung:**
40+
`[6, 4, 2]`
41+
-->
42+
3843
**(b)**
3944

4045
```python
@@ -57,6 +62,11 @@ print(z)
5762
<div style="max-width: 300px; margin: 1em auto;
5863
padding: 1em; border: 1px solid #ccc; border-radius: 4px; text-align: left;"
5964

65+
<!--
66+
**Lösung:**
67+
`[5, 7, 9]`
68+
-->
69+
6070
**(c)**
6171

6272
```python
@@ -77,6 +87,11 @@ print(z)
7787

7888
</div>
7989

90+
<!--
91+
**Lösung:**
92+
`32`
93+
-->
94+
8095
<!--- ANCHOR_END: aufgabe_1 --->
8196

8297
## Aufgabe 2: Gradientenverfahren
@@ -121,6 +136,13 @@ x_opt, converged = gradient_descent(double_well_gradient, -1.2)
121136
print("Ein lokales Minimum liegt bei x = " , x_opt) # x_opt = -0.8872
122137
```
123138

139+
<!--
140+
**Lösung:**
141+
```
142+
{{#include ../codes/07-summary/exam_preparation.py:gradient_descent_correct}}
143+
```
144+
-->
145+
124146
<!--- ANCHOR_END: aufgabe_2 --->
125147

126148
## Aufgabe 3: $k$-Nearest Neighbors

0 commit comments

Comments
 (0)