Skip to content

Commit 439ed0c

Browse files
committed
delete irrelevant part
1 parent bfa5a2a commit 439ed0c

File tree

1 file changed

+1
-94
lines changed

1 file changed

+1
-94
lines changed

lectures/calvo_gradient.md

Lines changed: 1 addition & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -1014,97 +1014,4 @@ After checking the above formulas, please implement them in Python and check tha
10141014
10151015
After that, we can put this material into a second chapter of the Calvo gradient lecture.
10161016
1017-
I can write why we regard this as ML -- i.e., sort of brute force, but less brute force than than the earlier gradient method because here we recognize more about the mathematical structure of $V$.
1018-
1019-
```{code-cell} ipython3
1020-
import numpy as np
1021-
import jax.numpy as jnp
1022-
from jax import grad, jit
1023-
import optax
1024-
1025-
@jit
1026-
def compute_V(μ, β, c, α=1, u0=1, u1=0.5, u2=3):
1027-
θ = compute_θ(μ, α)
1028-
1029-
h0 = u0
1030-
h1 = -u1 * α
1031-
h2 = -0.5 * u2 * α**2
1032-
1033-
T = len(μ) - 1
1034-
t = np.arange(T)
1035-
1036-
# Compute sum except for the last element
1037-
V_sum = np.sum(β**t * (h0 + h1 * θ[:T] + h2 * θ[:T]**2 - 0.5 * c * μ[:T]**2))
1038-
1039-
# Compute the final term
1040-
V_final = (β**T / (1 - β)) * (h0 + h1 * μ[-1] + h2 * μ[-1]**2 - 0.5 * c * μ[-1]**2)
1041-
1042-
V = V_sum + V_final
1043-
1044-
return V
1045-
1046-
1047-
def compute_θ(μ, α=1):
1048-
λ = α / (1 + α)
1049-
T = len(μ) - 1
1050-
μbar = μ[-1]
1051-
1052-
# Create an array of powers for λ
1053-
λ_powers = λ ** jnp.arange(T + 1)
1054-
1055-
# Compute the weighted sums for all t
1056-
weighted_sums = jnp.array(
1057-
[jnp.sum(λ_powers[:T-t] * μ[t:T]) for t in range(T)])
1058-
1059-
# Compute θ values except for the last element
1060-
θ = (1 - λ) * weighted_sums + λ**(T - jnp.arange(T)) * μbar
1061-
1062-
# Set the last element
1063-
θ = jnp.append(θ, μbar)
1064-
1065-
return θ
1066-
1067-
def compute_J(μ, β, c, α=1, u0=1, u1=0.5, u2=3):
1068-
θ = compute_θ(μ, α)
1069-
T = len(μ)
1070-
1071-
h0 = u0
1072-
h1 = -u1 * α
1073-
h2 = -0.5 * u2 * α**2
1074-
1075-
λ = α / (1 + α)
1076-
A = jnp.eye(T, T) - λ * jnp.eye(T, T, k=1)
1077-
B = (1 - λ) * np.linalg.inv(A)
1078-
1079-
e_vec = jnp.hstack([jnp.ones(T-1), jnp.sqrt(1/(1-β))])
1080-
β_vec = jnp.hstack([jnp.array([β**(t/2) for t in range(T-1)]), jnp.sqrt(β**(T-1)/(1-β))])
1081-
1082-
β_vec_sq = β_vec**2
1083-
f1 = (B * β_vec * e_vec)
1084-
1085-
disc_B = β_vec * B
1086-
F1 = (disc_B).T @ (disc_B)
1087-
F2 = jnp.diag(β_vec_sq)
1088-
1089-
g1 = h1 * f1
1090-
G2 = h2 * F1 - (c/2) * F2
1091-
1092-
return h0 + jnp.dot(g1, μ) + jnp.dot(μ, jnp.dot(G2, μ))
1093-
1094-
# Parameters
1095-
T = 40
1096-
β = 0.85
1097-
c = 2
1098-
α = 1
1099-
u0 = 1
1100-
u1 = 0.5
1101-
u2 = 3
1102-
1103-
# Initial guess for μ
1104-
μ_init = jnp.zeros(T)
1105-
1106-
# Calculate values using both functions
1107-
V_val = compute_V(μ_init, β, c, α, u0, u1, u2)
1108-
J_val = compute_J(μ_init, β, c, α, u0, u1, u2)
1109-
V_val, J_val
1110-
```
1017+
I can write why we regard this as ML -- i.e., sort of brute force, but less brute force than than the earlier gradient method because here we recognize more about the mathematical structure of $V$.

0 commit comments

Comments
 (0)