cap pro drop _all set matsize 11000 * senario 5: gmmbin * senario 8: gmmsel * senario 19: gmmwage (small data) global senario = 5 /* Example: estimating a binary choice model*/ *Initialize the parameters pro def setParameters if $senario == 5 { global k=5 /* need to give the # of parameters, 5 here*/ global m=5 /* need to give the # of moments, 5 here*/ matrix theta=[0\1\4\1\1] /* set the initial values of the parameters */ } else if $senario == 8{ global k=8 /* need to give the # of moments, 8 here*/ global m=8 /* need to give the # of parameters, 8 here*/ matrix theta=[1\1\1\1\9\1\1\1] /* set the initial values of the parameters*/ } else if $senario > = 19{ global k=19 /* need to give the # of moments, 19 here*/ global m=19 /* need to give the # of parameters, 19 here*/ matrix theta=[-6.7577\-0.3257\0.2065\0.08976\-0.00216\0.6695\0.02015\0.01131\0.01302\1\1.34\0.16\0.118\-0.162\-0.077\0.019\0.207\0.198\0.194] /* set the initial values of the parameters*/ } end *Generate moments matrix, G: n by m cap pro drop mkG pro def mkG forvalues i=1/$k{ scalar theta`i'=theta[`i',1] } forvalues j=1/$m{ cap drop g`j' } if $senario == 5{ gen g1 = v-theta1-theta2*s gen g2 = s*g1 gen g3 = theta3-g1^2 gen g4 = (D-(v>=0))*sqrt(2*_pi*theta3)*exp((v-theta1-theta2*s)^2/(2*theta3))-theta4-theta5*x gen g5 = z*g4 } else if $senario == 8{ g g1 = v-theta1-theta2*y1-theta3*z1-theta4*z2 g g2 = y1*g1 g g3 = z1*g1 g g4 = z2*g1 g g5 = theta5-g1^2 g g6 = D*(p-theta6-theta7*y1-theta8*z2)*exp((v-theta1-theta2*y1-theta3*z1-theta4*z2)^2/(2*theta5)) g g7 = z1*g6 g g8 = z2*g6 } else if $senario = 19{ * function g1-g9: X*(v-XA) * function g10: Sigma2-(v-XA)^2 * function g11-g19: D*Z*(p-YB) * Here X=Y=Z, g g1 = (lncost14-theta1-theta2*afqt4-theta3*par_c-theta4*BLACK-theta5*MALE-theta6*URBAN-theta7*ch-theta8*exp1-theta9*exp2) g g2 = afqt4*g1 g g3 = par_c*g1 g g4 = BLACK*g1 g g5 = MALE *g1 g g6 = URBAN*g1 g g7 = ch *g1 g g8 = exp1 *g1 g g9 = exp2 *g1 g g10 = theta10-g1^2 g g11 = D*(lnw-theta11-theta12*afqt4-theta13*par_c-theta14*BLACK-theta15*MALE-theta16*URBAN-theta17*ch-theta18*exp1-theta19*exp2)*exp((lncost14-theta1-theta2*afqt4-theta3*par_c-theta4*BLACK-theta5*MALE-theta6*URBAN-theta7*ch-theta8*exp1-theta9*exp2)^2/(2*theta10)) g g12 = afqt4*g11 g g13 = par_c*g11 g g14 = BLACK*g11 g g15 = MALE *g11 g g16 = URBAN*g11 g g17 = ch *g11 g g18 = exp1 *g11 g g19 = exp2 *g11 } mkmat g1-g$m, matrix(G) end *Generate mean moment matrix, mg: 1 by m cap pro drop mkMeang prog def mkMeang mkG matrix l=J(1, _N, 1) matrix mg=l*G*(1/_N) end *Generate the gradeient matrix, Gradg=d_mg/d_theta: k by m cap pro drop mkGradg program define mkGradg local epsMin = 1e-5 local epsMax = 0.1 local alpha = 1e-3 mkMeang matrix mgCurrent=mg forvalues i=1/$k { local temp=theta[`i',1] local eps = `alpha' * `temp' local eps = max(`eps', `epsMin') local eps = min(`eps', `epsMax') /*if mod(`i',5)==1{ di "The dh is `eps'" }*/ matrix theta[`i',1]= `temp'+`eps' mkMeang matrix mgplus=mg matrix grad`i'=(mgplus-mgCurrent)/`eps' matrix theta[`i',1]=`temp' } matrix Sqr= I($m) forvalues j=1/$k{ matrix Sqr[`j',1] =grad`j' } matrix Gradg=Sqr[1..$k,.] end *Optimiz using DFG algorithm: Greene,Econometric Analysis, 5th ed. P938-940 *Minimize f=mg*W*mg', w.r.t theta1 theta2...thetak *F.O.C: d_f/d_theta=d_mg/d_theta * d_f/d_mg' * =d_mg/d_theta *(W+W')*mg' * =Gradg*(W+W')*mg' * =0 cap pro drop optimize program define optimize matrix invH = I($k) /* Initialize Hessian Inverse= I(k)*/ forvalues itera=1/200{ di " " di " " di " " di in g "============================== Iteration # `itera' =============================" matrix tempMat = theta /* Save the theta generated by the last iteration */ mkMeang matrix obj = mg*W*mg' local f = obj[1,1] di "(f) =`f'" mkGradg matrix df = Gradg*(W+W')*mg' matrix direc = -invH*df matrix dMag2 = direc'*direc local mag2 = dMag2[1,1] matrix direc = direc/sqrt(`mag2') /* Nomalize the direction vector*/ /*********************************************************** *Search the optimal step size, using Golden Section method*/ /* (3-sqrt(5))/2 = 0.3819660113; (sqrt(5)-1)/2 = 0.6180339887; */ local stepMin = 0 local stepMax = 8 local stepA = 0.6180339887*`stepMin'+0.3819660113*`stepMax' matrix theta = tempMat+`stepA'*direc mkMeang matrix fA = mg*W*mg' local fa = fA[1,1] /* To make sure that fa < f. Otherwise, decrease the maxStep by 50% */ while `fa'>`f' | `fa'==. { local stepMax = `stepMax'/2 local stepA = 0.6180339887*`stepMin'+0.3819660113*`stepMax' matrix theta = tempMat+`stepA'*direc mkMeang matrix fA = mg*W*mg' local fa = fA[1,1] if `stepMax' < 1e-5{ continue, break } di in y "(fa, stepA) = (`fa', `stepA')" } local stepB = 0.6180339887*`stepMax'+0.3819660113*`stepMin' matrix theta = tempMat+`stepB'*direc mkMeang matrix fB = mg*W*mg' local fb = fB[1,1] forvalues i=1/100{ if `fa'<`fb'{ local stepMax=`stepB' local stepB =`stepA' local fb = `fa' local stepA = 0.6180339887*`stepMin'+0.3819660113*`stepMax' matrix theta = tempMat+`stepA'*direc mkMeang matrix fA = mg*W*mg' local fa = fA[1,1] di "(fa, stepA) = (`fa', `stepA')" } else{ local stepMin= `stepA' local stepA = `stepB' local fa = `fb' local stepB = 0.6180339887*`stepMax'+0.3819660113*`stepMin' matrix theta = tempMat+`stepB'*direc mkMeang matrix fB = mg*W*mg' local fb = fB[1,1] di "(fb, stepB) = (`fb', `stepB')" } if `stepB'-`stepA'<1e-5 & abs(`fa'-`fb') < 1e-5 { continue, break } } if `fa'<`fb' { local stepSize=`stepA' } else { local stepSize=`stepB' } matrix theta = tempMat+`stepSize'*direc /***********************************************************/ *DFP variable metric algorithm mkMeang matrix obj = mg*W*mg' local f = obj[1,1] mkGradg matrix dfNew = Gradg*(W+W')*mg' matrix gamma = dfNew-df matrix delta = `stepSize'*direc matrix B = delta*delta'*inv(delta'*gamma) matrix C = -invH*gamma*gamma'*invH'*inv(gamma'*invH*gamma) matrix invH = invH+B+C matrix dfSqr = dfNew'*dfNew local df2 = dfSqr[1,1] di " " di " " di in g "(f, df2, stepSize) =(`f', `df2', `stepA')" di " " matrix theta2 = theta' mat list theta2 if (`df2'< $k*1e-2) & (`f'< $k*1e-2) & (`df2'*`f'<1e-5) & (`stepSize'< 1e-4) { di in y " " di in y "============ The optimization converges after `itera' iterations ================" continue, break } if (`itera' >= 200){ di in y " " di in y "============ The optimization does not converge ==================================" } } end * Do 2 stage GMM cap pro drop gmm pro def gmm setParameters matrix W=I($m) /* 1st stage weighting matrix= I(m)*/ optimize matrix stage1_theta=theta' mkG matrix W=inv(G'*G*(1/_N)) optimize matrix stage2_theta=theta' mkMeang matrix W=inv(G'*G*(1/_N)) mkGradg mat list Gradg mat list W mat list mg matrix dfNew = Gradg*(W+W')*mg' mat list dfNew matrix Vgmm=inv(Gradg*W*Gradg')*(1/_N) matrix SE=cholesky(diag(vecdiag(Vgmm))) matrix nf=(_N)*(mg*W*mg') local Waldstat=nf[1,1] local dff=$m-$k local Pvalue=chi2tail(`dff',`Waldstat') di in g" " di in g "1st stage parameter vector:" matrix list stage1_theta di in g " " di in g "2nd stage parameter vector:" matrix list stage2_theta di in g " " di in g "Estimated Std. Err.:" matrix list SE di in g " " di in g "Overidentifying restriction test:" di in g " " di in g "Waldstat=`Waldstat'; P-value=`Pvalue'" end gmm