@@ -641,145 +641,7 @@ compute_V(optimized_μ, β=0.85, c=2)
641
641
```{code-cell} ipython3
642
642
compute_V(clq.μ_series, β=0.85, c=2)
643
643
```
644
-
645
- ### Some regressions
646
-
647
- In the interest of looking for some parameters that might help us learn about the structure of
648
- the Ramsey plan, we shall some least squares linear regressions of various components of $\vec \theta$ and $\vec \mu$ on others.
649
-
650
- ```{code-cell} ipython3
651
- # Compute θ using optimized_μ
652
- θs = np.array(compute_θ(optimized_μ))
653
- μs = np.array(optimized_μ)
654
-
655
- # First regression: μ_t on a constant and θ_t
656
- X1_θ = sm.add_constant(θs)
657
- model1 = sm.OLS(μs, X1_θ)
658
- results1 = model1.fit()
659
-
660
- # Print regression summary
661
- print("Regression of μ_t on a constant and θ_t:")
662
- print(results1.summary(slim=True))
663
- ```
664
-
665
- ```{code-cell} ipython3
666
- plt.scatter(θs, μs)
667
- plt.plot(θs, results1.predict(X1_θ), 'C1', label='$\hat \mu_t$', linestyle='--')
668
- plt.xlabel(r'$\theta_t$')
669
- plt.ylabel(r'$\mu_t$')
670
- plt.legend()
671
- plt.show()
672
- ```
673
-
674
- ```{code-cell} ipython3
675
- # Second regression: θ_{t+1} on a constant and θ_t
676
- θ_t = np.array(θs[:-1]) # θ_t
677
- θ_t1 = np.array(θs[1:]) # θ_{t+1}
678
- X2_θ = sm.add_constant(θ_t) # Add a constant term for the intercept
679
- model2 = sm.OLS(θ_t1, X2_θ)
680
- results2 = model2.fit()
681
-
682
- # Print regression summary
683
- print("\nRegression of θ_{t+1} on a constant and θ_t:")
684
- print(results2.summary(slim=True))
685
- ```
686
-
687
- ```{code-cell} ipython3
688
- plt.scatter(θ_t, θ_t1)
689
- plt.plot(θ_t, results2.predict(X2_θ), color='C1', label='$\hat θ_t$', linestyle='--')
690
- plt.xlabel(r'$\theta_t$')
691
- plt.ylabel(r'$\theta_{t+1}$')
692
- plt.legend()
693
-
694
- plt.tight_layout()
695
- plt.show()
696
- ```
697
-
698
- Now to learn about the structure of the optimal value $V$ as a function of $\vec \mu, \vec \theta$,
699
- we'll run some more regressions.
700
-
701
- +++
702
-
703
- First, we modified the function `compute_V_t` to return a sequence of $\vec v_t$.
704
-
705
- ```{code-cell} ipython3
706
- def compute_V_t(μ, β, c, α=1, u0=1, u1=0.5, u2=3):
707
- θ = compute_θ(μ, α)
708
-
709
- h0 = u0
710
- h1 = -u1 * α
711
- h2 = -0.5 * u2 * α**2
712
-
713
- T = len(μ)
714
- V_t = jnp.zeros(T)
715
-
716
- for t in range(T - 1):
717
- V_t = V_t.at[t].set(β**t * (h0 + h1 * θ[t] + h2 * θ[t]**2 - 0.5 * c * μ[t]**2))
718
-
719
- # Terminal condition
720
- V_t = V_t.at[T-1].set((β**(T-1) / (1 - β)) * (h0 + h1 * μ[-1] + h2 * μ[-1]**2 - 0.5 * c * μ[-1]**2))
721
-
722
- return V_t
723
- ```
724
-
725
- ```{code-cell} ipython3
726
- # Compute v_t
727
- v_ts = np.array(compute_V_t(optimized_μ, β=0.85, c=2))
728
-
729
- # Initialize arrays for discounted sum of θ_t, θ_t^2, μ_t^2
730
- βθ_t = np.zeros(T)
731
- βθ_t2 = np.zeros(T)
732
- βμ_t2 = np.zeros(T)
733
-
734
- # Compute discounted sum of θ_t, θ_t^2, μ_t^2
735
- for ts in range(T):
736
- βθ_t[ts] = sum(clq.β**t * θs[t]
737
- for t in range(ts + 1))
738
- βθ_t2[ts] = sum(clq.β**t * θs[t]**2
739
- for t in range(ts + 1))
740
- βμ_t2[ts] = sum(clq.β**t * μs[t]**2
741
- for t in range(ts + 1))
742
-
743
- X = np.column_stack((βθ_t, βθ_t2, βμ_t2))
744
- X_vt = sm.add_constant(X)
745
-
746
- # Fit the model
747
- model3 = sm.OLS(v_ts, X_vt).fit()
748
- ```
749
-
750
- ```{code-cell} ipython3
751
- plt.figure()
752
- plt.scatter(θs, v_ts)
753
- plt.plot(θs, model3.predict(X_vt), color='C1', label='$\hat v_t$', linestyle='--')
754
- plt.xlabel('$θ_t$')
755
- plt.ylabel('$v_t$')
756
- plt.legend()
757
- plt.show()
758
- ```
759
-
760
- Using a different and more structured computational strategy, this quantecon lecture {doc}`calvo` represented
761
- a Ramsey plan recursively via the following system of linear equations:
762
-
763
-
764
-
765
- ```{math}
766
- :label: eq_old9101
767
-
768
- \begin{aligned}
769
- \theta_0 & = \theta_0^R \\
770
- \mu_t & = b_0 + b_1 \theta_t \\
771
- v_t & = g_0 +g_1\theta_t + g_2 \theta_t^2 \\
772
- \theta_{t+1} & = d_0 + d_1 \theta_t , \quad d_0 >0, d_1 \in (0,1) \\
773
- \end{aligned}
774
- ```
775
-
776
- where $b_0, b_1, g_0, g_1, g_2$ were positive parameters that the lecture computed with Python code.
777
-
778
- By running regressions on the outcomes $\vec \mu^R, \vec \theta^R$ that we have computed with the brute force gradient descent method in this lecture, we have recovered the same representation.
779
-
780
- However, in this lecture we have more or less discovered the representation by brute force -- i.e.,
781
- just by running some regressions and staring at the result, noticing that the $R^2$ of unity tell us
782
- that the fits are perfect.
644
+
783
645
784
646
### Restricting $\mu_t = \bar \mu$ for all $t$
785
647
@@ -910,6 +772,12 @@ A, B = construct_B(α=clq.α, T=T)
910
772
print(f'A = \n {A}')
911
773
```
912
774
775
+ ```{code-cell} ipython3
776
+ # Compute θ using optimized_μ
777
+ θs = np.array(compute_θ(optimized_μ))
778
+ μs = np.array(optimized_μ)
779
+ ```
780
+
913
781
```{code-cell} ipython3
914
782
np.allclose(θs, B @ clq.μ_series)
915
783
```
@@ -1120,3 +988,142 @@ closed_grad
1120
988
```{code-cell} ipython3
1121
989
print(f'deviation = {np.linalg.norm(closed_grad - (- grad_J(jnp.ones(T))))}')
1122
990
```
991
+
992
+ ### Some regressions
993
+
994
+ In the interest of looking for some parameters that might help us learn about the structure of
995
+ the Ramsey plan, we shall some least squares linear regressions of various components of $\vec \theta$ and $\vec \mu$ on others.
996
+
997
+ ```{code-cell} ipython3
998
+ # Compute θ using optimized_μ
999
+ θs = np.array(compute_θ(optimized_μ))
1000
+ μs = np.array(optimized_μ)
1001
+
1002
+ # First regression: μ_t on a constant and θ_t
1003
+ X1_θ = sm.add_constant(θs)
1004
+ model1 = sm.OLS(μs, X1_θ)
1005
+ results1 = model1.fit()
1006
+
1007
+ # Print regression summary
1008
+ print("Regression of μ_t on a constant and θ_t:")
1009
+ print(results1.summary(slim=True))
1010
+ ```
1011
+
1012
+ ```{code-cell} ipython3
1013
+ plt.scatter(θs, μs)
1014
+ plt.plot(θs, results1.predict(X1_θ), 'C1', label='$\hat \mu_t$', linestyle='--')
1015
+ plt.xlabel(r'$\theta_t$')
1016
+ plt.ylabel(r'$\mu_t$')
1017
+ plt.legend()
1018
+ plt.show()
1019
+ ```
1020
+
1021
+ ```{code-cell} ipython3
1022
+ # Second regression: θ_{t+1} on a constant and θ_t
1023
+ θ_t = np.array(θs[:-1]) # θ_t
1024
+ θ_t1 = np.array(θs[1:]) # θ_{t+1}
1025
+ X2_θ = sm.add_constant(θ_t) # Add a constant term for the intercept
1026
+ model2 = sm.OLS(θ_t1, X2_θ)
1027
+ results2 = model2.fit()
1028
+
1029
+ # Print regression summary
1030
+ print("\nRegression of θ_{t+1} on a constant and θ_t:")
1031
+ print(results2.summary(slim=True))
1032
+ ```
1033
+
1034
+ ```{code-cell} ipython3
1035
+ plt.scatter(θ_t, θ_t1)
1036
+ plt.plot(θ_t, results2.predict(X2_θ), color='C1', label='$\hat θ_t$', linestyle='--')
1037
+ plt.xlabel(r'$\theta_t$')
1038
+ plt.ylabel(r'$\theta_{t+1}$')
1039
+ plt.legend()
1040
+
1041
+ plt.tight_layout()
1042
+ plt.show()
1043
+ ```
1044
+
1045
+ Now to learn about the structure of the optimal value $V$ as a function of $\vec \mu, \vec \theta$,
1046
+ we'll run some more regressions.
1047
+
1048
+ +++
1049
+
1050
+ First, we modified the function `compute_V_t` to return a sequence of $\vec v_t$.
1051
+
1052
+ ```{code-cell} ipython3
1053
+ def compute_V_t(μ, β, c, α=1, u0=1, u1=0.5, u2=3):
1054
+ θ = compute_θ(μ, α)
1055
+
1056
+ h0 = u0
1057
+ h1 = -u1 * α
1058
+ h2 = -0.5 * u2 * α**2
1059
+
1060
+ T = len(μ)
1061
+ V_t = jnp.zeros(T)
1062
+
1063
+ for t in range(T - 1):
1064
+ V_t = V_t.at[t].set(β**t * (h0 + h1 * θ[t] + h2 * θ[t]**2 - 0.5 * c * μ[t]**2))
1065
+
1066
+ # Terminal condition
1067
+ V_t = V_t.at[T-1].set((β**(T-1) / (1 - β)) * (h0 + h1 * μ[-1] + h2 * μ[-1]**2 - 0.5 * c * μ[-1]**2))
1068
+
1069
+ return V_t
1070
+ ```
1071
+
1072
+ ```{code-cell} ipython3
1073
+ # Compute v_t
1074
+ v_ts = np.array(compute_V_t(optimized_μ, β=0.85, c=2))
1075
+
1076
+ # Initialize arrays for discounted sum of θ_t, θ_t^2, μ_t^2
1077
+ βθ_t = np.zeros(T)
1078
+ βθ_t2 = np.zeros(T)
1079
+ βμ_t2 = np.zeros(T)
1080
+
1081
+ # Compute discounted sum of θ_t, θ_t^2, μ_t^2
1082
+ for ts in range(T):
1083
+ βθ_t[ts] = sum(clq.β**t * θs[t]
1084
+ for t in range(ts + 1))
1085
+ βθ_t2[ts] = sum(clq.β**t * θs[t]**2
1086
+ for t in range(ts + 1))
1087
+ βμ_t2[ts] = sum(clq.β**t * μs[t]**2
1088
+ for t in range(ts + 1))
1089
+
1090
+ X = np.column_stack((βθ_t, βθ_t2, βμ_t2))
1091
+ X_vt = sm.add_constant(X)
1092
+
1093
+ # Fit the model
1094
+ model3 = sm.OLS(v_ts, X_vt).fit()
1095
+ ```
1096
+
1097
+ ```{code-cell} ipython3
1098
+ plt.figure()
1099
+ plt.scatter(θs, v_ts)
1100
+ plt.plot(θs, model3.predict(X_vt), color='C1', label='$\hat v_t$', linestyle='--')
1101
+ plt.xlabel('$θ_t$')
1102
+ plt.ylabel('$v_t$')
1103
+ plt.legend()
1104
+ plt.show()
1105
+ ```
1106
+
1107
+ Using a different and more structured computational strategy, this quantecon lecture {doc}`calvo` represented
1108
+ a Ramsey plan recursively via the following system of linear equations:
1109
+
1110
+
1111
+
1112
+ ```{math}
1113
+ :label: eq_old9101
1114
+
1115
+ \begin{aligned}
1116
+ \theta_0 & = \theta_0^R \\
1117
+ \mu_t & = b_0 + b_1 \theta_t \\
1118
+ v_t & = g_0 +g_1\theta_t + g_2 \theta_t^2 \\
1119
+ \theta_{t+1} & = d_0 + d_1 \theta_t , \quad d_0 >0, d_1 \in (0,1) \\
1120
+ \end{aligned}
1121
+ ```
1122
+
1123
+ where $b_0, b_1, g_0, g_1, g_2$ were positive parameters that the lecture computed with Python code.
1124
+
1125
+ By running regressions on the outcomes $\vec \mu^R, \vec \theta^R$ that we have computed with the brute force gradient descent method in this lecture, we have recovered the same representation.
1126
+
1127
+ However, in this lecture we have more or less discovered the representation by brute force -- i.e.,
1128
+ just by running some regressions and staring at the result, noticing that the $R^2$ of unity tell us
1129
+ that the fits are perfect.
0 commit comments