## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(
  collapse = TRUE,
  comment = "#>",
  fig.width = 6,
  fig.height = 4,
  eval = FALSE
)

## -----------------------------------------------------------------------------
# library(compositional.mle)
# 
# # Generate data
# set.seed(42)
# y <- rnorm(200, mean = 5, sd = 2)
# 
# # Define the log-likelihood (numDeriv handles derivatives by default)
# ll <- function(theta) {
#   mu <- theta[1]; sigma <- theta[2]
#   -length(y) * log(sigma) - 0.5 * sum((y - mu)^2) / sigma^2
# }
# 
# problem <- mle_problem(loglike = ll)
# 
# # Coarse-to-fine: grid search for a starting region, then L-BFGS-B
# strategy <- grid_search(lower = c(0, 0.5), upper = c(10, 5), n = 5) %>>%
#   lbfgsb(lower = c(-Inf, 1e-4), upper = c(Inf, Inf))
# 
# result <- strategy(problem, theta0 = c(0, 1))
# result

## -----------------------------------------------------------------------------
# race <- bfgs() %|% nelder_mead() %|% gradient_ascent()
# result_race <- race(problem, theta0 = c(0, 1))

## -----------------------------------------------------------------------------
# robust <- with_restarts(
#   gradient_ascent() %>>% bfgs(),
#   n = 10,
#   sampler = uniform_sampler(lower = c(-10, 0.1), upper = c(10, 10))
# )
# result_robust <- robust(problem, theta0 = c(0, 1))

## -----------------------------------------------------------------------------
# library(algebraic.mle)
# 
# # Extract estimates and uncertainty
# params(result)       # parameter estimates (theta-hat)
# se(result)           # standard errors
# vcov(result)         # variance-covariance matrix
# as.numeric(logLik(result))   # log-likelihood at the MLE
# confint(result)      # 95% confidence intervals

## -----------------------------------------------------------------------------
# # Three independent datasets from the same population
# set.seed(123)
# y1 <- rnorm(50,  mean = 10, sd = 3)
# y2 <- rnorm(100, mean = 10, sd = 3)
# y3 <- rnorm(75,  mean = 10, sd = 3)
# 
# # Fit each independently
# make_problem <- function(data) {
#   mle_problem(loglike = function(theta) {
#     mu <- theta[1]; sigma <- theta[2]
#     -length(data) * log(sigma) - 0.5 * sum((data - mu)^2) / sigma^2
#   })
# }
# 
# solver <- lbfgsb(lower = c(-Inf, 1e-4), upper = c(Inf, Inf))
# 
# fit1 <- solver(make_problem(y1), theta0 = c(0, 1))
# fit2 <- solver(make_problem(y2), theta0 = c(0, 1))
# fit3 <- solver(make_problem(y3), theta0 = c(0, 1))
# 
# # Combine: inverse-variance weighting via Fisher information
# combined <- mle_weighted(list(fit1, fit2, fit3))

## -----------------------------------------------------------------------------
# # Individual SEs vs combined
# data.frame(
#   Source   = c("Lab 1 (n=50)", "Lab 2 (n=100)", "Lab 3 (n=75)", "Combined"),
#   Estimate = c(params(fit1)[1], params(fit2)[1],
#                params(fit3)[1], params(combined)[1]),
#   SE       = c(se(fit1)[1], se(fit2)[1], se(fit3)[1], se(combined)[1])
# )

## -----------------------------------------------------------------------------
# library(hypothesize)
# 
# # Wald test: is mu = 10?
# w <- wald_test(estimate = params(combined)[1],
#                se = se(combined)[1],
#                null_value = 10)
# w

## -----------------------------------------------------------------------------
# # Likelihood ratio test: does sigma differ from 3?
# # Compare full model (mu, sigma free) vs restricted (mu free, sigma = 3)
# ll_full <- as.numeric(logLik(fit2))  # full model log-likelihood
# ll_null <- sum(dnorm(y2, mean = params(fit2)[1], sd = 3, log = TRUE))
# 
# lr <- lrt(null_loglik = ll_null, alt_loglik = ll_full, dof = 1)
# lr

## -----------------------------------------------------------------------------
# # Each lab tests H0: mu = 10
# w1 <- wald_test(params(fit1)[1], se(fit1)[1], null_value = 10)
# w2 <- wald_test(params(fit2)[1], se(fit2)[1], null_value = 10)
# w3 <- wald_test(params(fit3)[1], se(fit3)[1], null_value = 10)
# 
# # Combine independent tests
# combined_test <- fisher_combine(w1, w2, w3)
# combined_test

## -----------------------------------------------------------------------------
# # Multiple testing correction
# adjusted <- adjust_pval(list(w1, w2, w3), method = "BH")

## -----------------------------------------------------------------------------
# pval(w)                    # extract p-value
# test_stat(w)               # extract test statistic
# dof(w)                     # degrees of freedom
# is_significant_at(w, 0.05) # check significance at alpha = 0.05
# confint(w)                 # confidence interval (for Wald tests)

## -----------------------------------------------------------------------------
# # Strategy 1: numDeriv (default) --- zero effort
# p1 <- mle_problem(loglike = ll)
# 
# # Strategy 2: hand-coded analytic derivatives
# p2 <- mle_problem(
#   loglike = ll,
#   score = function(theta) {
#     mu <- theta[1]; sigma <- theta[2]; n <- length(y)
#     c(sum(y - mu) / sigma^2,
#       -n / sigma + sum((y - mu)^2) / sigma^3)
#   },
#   fisher = function(theta) {
#     n <- length(y); sigma <- theta[2]
#     matrix(c(n / sigma^2, 0, 0, 2 * n / sigma^2), 2, 2)
#   }
# )
# 
# # Strategy 3: automatic differentiation
# # Any AD package that provides score(f, theta) and hessian(f, theta)
# # can be plugged in here, e.g.:
# # p3 <- mle_problem(
# #   loglike = ll,
# #   score   = function(theta) ad_pkg::score(ll, theta),
# #   fisher  = function(theta) ad_pkg::hessian(ll, theta)
# # )

## ----likelihood-model-bridge, eval=FALSE--------------------------------------
# library(likelihood.model)
# 
# # model is a likelihood_model object from likelihood.model
# problem <- mle_problem(model, data = my_data)
# 
# # Now solve with any composed strategy
# solver <- lbfgsb(lower = c(-Inf, 1e-4), upper = c(Inf, Inf))
# result <- solver(problem, theta0 = c(0, 1))

