## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
  collapse = TRUE,
  comment  = "#>"
)
library(RenyiExtropy)

## ----prob-vector--------------------------------------------------------------
p <- c(0.2, 0.5, 0.3)   # valid 3-outcome distribution

## ----shannon------------------------------------------------------------------
shannon_entropy(p)               # three-outcome distribution
shannon_entropy(rep(0.25, 4))    # uniform: H = log(4)
shannon_entropy(c(1, 0, 0))      # degenerate: H = 0
normalized_entropy(p)            # H(p) / log(n), always in [0, 1]

## ----renyi-entropy------------------------------------------------------------
renyi_entropy(p, q = 2)         # collision entropy
renyi_entropy(p, q = 0.5)       # Renyi entropy, q = 0.5
renyi_entropy(p, q = 1)         # limit: equals shannon_entropy(p)
shannon_entropy(p)               # same value

## ----tsallis------------------------------------------------------------------
tsallis_entropy(p, q = 2)
tsallis_entropy(p, q = 1)       # limit: equals shannon_entropy(p)

## ----extropy------------------------------------------------------------------
extropy(p)
shannon_extropy(p)               # identical

## ----renyi-extropy------------------------------------------------------------
renyi_extropy(p, q = 2)
renyi_extropy(p, q = 1)          # limit: equals extropy(p)

# n = 2: Renyi extropy == Renyi entropy
renyi_extropy(c(0.4, 0.6), q = 2)
renyi_entropy(c(0.4, 0.6), q = 2)

# Maximum Renyi extropy over n outcomes (uniform distribution)
max_renyi_extropy(3)
renyi_extropy(rep(1/3, 3), q = 2)

## ----joint--------------------------------------------------------------------
Pxy <- matrix(c(0.2, 0.3, 0.1, 0.4), nrow = 2, byrow = TRUE)

joint_entropy(Pxy)               # H(X, Y)
conditional_entropy(Pxy)         # H(Y | X) = H(X,Y) - H(X)

# Independent distributions: H(X,Y) = H(X) + H(Y)
px <- c(0.4, 0.6)
py <- c(0.3, 0.7)
Pxy_indep <- outer(px, py)
joint_entropy(Pxy_indep)
shannon_entropy(px) + shannon_entropy(py)

## ----cond-renyi---------------------------------------------------------------
conditional_renyi_extropy(Pxy, q = 2)
conditional_renyi_extropy(Pxy, q = 1)   # limit: conditional Shannon extropy

## ----kl-----------------------------------------------------------------------
q_dist <- c(0.3, 0.4, 0.3)
kl_divergence(p, q_dist)         # KL(P || Q)
kl_divergence(q_dist, p)         # KL(Q || P) -- asymmetric
kl_divergence(p, p)              # 0

## ----js-----------------------------------------------------------------------
js_divergence(p, q_dist)         # symmetric
js_divergence(q_dist, p)         # same value
js_divergence(p, p)              # 0
js_divergence(c(1, 0), c(0, 1)) # maximum: log(2)
log(2)

## ----cross--------------------------------------------------------------------
cross_entropy(p, q_dist)
shannon_entropy(p) + kl_divergence(p, q_dist)   # same value
cross_entropy(p, p)                              # equals shannon_entropy(p)

## ----validation, error = TRUE-------------------------------------------------
try({
shannon_entropy(c(0.2, 0.3, 0.1))   # does not sum to 1
renyi_entropy(p, q = NA)            # NA not allowed
max_renyi_extropy(1)                # n must be >= 2
kl_divergence(p, c(0.5, 0.5))      # length mismatch
})

