function estimate_markov(X::Vector, nstates::Int64)
P = zeros(nstates, nstates)
n = length(X) - 1
for t = 1:n
# add one each count is observed
P[X[t], X[t+1]] = P[X[t], X[t+1]] + 1
end
for i = 1:nstates
# divide by total number of counts
P[i, :] .= P[i, :]/sum(P[i, :])
end
return P
end
Note that this will lead to division by zero errors for states that are never visited. The Python code handles this by dropping such states.
This is already accomplished on the Python side in https://github.com/QuantEcon/QuantEcon.py/pull/658
An ongoing discussion for the Julia side is here:
@msilva913 proposes
Note that this will lead to division by zero errors for states that are never visited. The Python code handles this by dropping such states.