# inspired by https://nbviewer.org/github/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Chapter5_LossFunctions/Ch5_LossFunctions_PyMC_current.ipynb
begin
using Turing
using DataFramesMeta
using MCMCChains
using PlutoUI; TableOfContents()
using StatisticalRethinking
using StatsFuns
using CairoMakie
using Optim
end
function penalty(actual,estimated;α=2)
if actual > estimated
(actual - estimated)^2 * α
else
(actual - estimated)^2
end
end
penalty (generic function with 1 method)
0.01 .^ [2,1.5,1]
3-element Vector{Float64}: 0.0001 0.001 0.01
penalty_abs(actual,estimated) = abs(actual - estimated)
penalty_abs (generic function with 1 method)
penalty_SE(actual,estimated) = (actual - estimated)^2
penalty_SE (generic function with 1 method)
q_true = 0.025
0.025
N = 100
100
simulated_claims = rand(N) .< q_true #
100-element BitVector: 0 0 0 0 0 0 0 ⋮ 0 0 0 0 0 0
sum(simulated_claims)
5
@model function model(claims,exposures)
μ ~ Beta(1,1)
claims ~ Binomial(exposures,μ)
end
model (generic function with 2 methods)
chain = sample(model(sum(simulated_claims),N),NUTS(),2000)
iteration | chain | μ | lp | n_steps | is_accept | acceptance_rate | log_density | ... | |
---|---|---|---|---|---|---|---|---|---|
1 | 1001 | 1 | 0.0320919 | -5.62942 | 3.0 | 1.0 | 0.575275 | -5.62942 | |
2 | 1002 | 1 | 0.130579 | -7.51092 | 1.0 | 1.0 | 0.390409 | -7.51092 | |
3 | 1003 | 1 | 0.130579 | -7.51092 | 3.0 | 1.0 | 0.066623 | -7.51092 | |
4 | 1004 | 1 | 0.109993 | -6.29368 | 3.0 | 1.0 | 0.354252 | -6.29368 | |
5 | 1005 | 1 | 0.04638 | -4.84754 | 1.0 | 1.0 | 1.0 | -4.84754 | |
6 | 1006 | 1 | 0.0325044 | -5.59371 | 1.0 | 1.0 | 0.72508 | -5.59371 | |
7 | 1007 | 1 | 0.0603535 | -4.68454 | 3.0 | 1.0 | 0.651125 | -4.68454 | |
8 | 1008 | 1 | 0.0587535 | -4.68242 | 1.0 | 1.0 | 1.0 | -4.68242 | |
9 | 1009 | 1 | 0.0587535 | -4.68242 | 1.0 | 1.0 | 0.797157 | -4.68242 | |
10 | 1010 | 1 | 0.0587535 | -4.68242 | 3.0 | 1.0 | 0.361234 | -4.68242 | |
... |
PRECIS(DataFrame(chain))
┌───────┬──────────────────────────────────────────────────┐ │ param │ mean std 5.5% 50% 94.5% histogram │ ├───────┼──────────────────────────────────────────────────┤ │ μ │ 0.0595 0.0233 0.0284 0.057 0.1013 ▁▅█▇▃▂▁▁▁ │ └───────┴──────────────────────────────────────────────────┘
# find the x which minimizes the average loss, where the loss is averaged over the posterior
let
μ = chain[:μ]
f(x) = mean(penalty.(μ,x))
r = optimize(f,[0.0])
Optim.minimizer(r)[1]
end
0.06611328125000002
function loss_result(x,loss_function)
μ = chain[:μ]
mean(loss_function.(μ,x))
end
loss_result (generic function with 1 method)
Squared error is posterior mean
Absoulte error is posterior median
let
function plt_measures(axis,chain)
mn = mean(chain)
md = median(chain)
vlines!(axis,[mn],label="mean | $(round(mn;digits=4))")
vlines!(axis,[md],label="median | $(round(md;digits=4))")
end
f = Figure()
ax = Axis(f[2,1],title="Posterior claim density",xlabel="claim rate")
density!(ax,vec(chain[:μ]))
plt_measures(ax,chain[:μ])
axislegend(ax)
ax2 = Axis(f[1,1],title="Loss Functions", xlabel="assumed claim rate",ylabel="relative loss")
ylims!(ax2,0.5,2)
est_range = range(0.,0.1,100)
function plt_loss(axis,loss_func,label)
y = loss_result.(est_range,loss_func)
# rescale to 1
y = y ./ minimum(y)
minimizer = est_range[argmin(y)[1]]
lines!(ax2,est_range,y, label="$label | $(round(minimizer;digits=4))")
end
ps = [
(penalty,"Asymmetric"),(penalty_SE,"Squared Error"),(penalty_abs,"Absolute Value")]
for (p,l) in ps
plt_loss(ax2,p,l)
end
axislegend(ax2)
linkxaxes!(ax,ax2)
f
end
Built with Julia 1.8.5 and
CairoMakie 0.10.2To run this page locally, download this file and open it with Pluto.jl.