Empirical likeihood¶
python packed for empirical likelihood classes.
Includes:
- Data class used to store the data to be provided to the EL framework
- Block class This class is designed to accomdate the block-wise EL.
- Parameter the EL is implemented as a method in the class of the parameter. Includes the following methods:
- Estimates the actual EL likelihood for a given etimating equation that relates the parameter to the data sample provided
- Calculates correction factor in the case of block-wise EL
- Provied the confidence level at a desired percentage
In [1]:
import sys, os, traceback, logging
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as so
import scipy.stats as ss
class data:
def __init__(self,x=None,block_length=1,block_shift=1):
self.x = x
self.n = x.shape[0]
self.block = block(count=x.shape[0],length=block_length,shift=block_shift)
class block:
def __init__(self,count,length=1,shift=1):
self.length = length
self.shift = shift
self.count = count
self.corr = 1
def block_mc(self,mc):
n = mc.shape[0]
count = int(np.ceil((n - self.length)/self.shift))
self.count = count
corr = self.el_correction(n)
bmc = np.empty(count)
for i in range(count):
bi = mc[i*self.shift : i*self.shift+self.length]
bmc[i] = np.sum(bi)/float(self.length)
return bmc
def el_correction(self,n):
self.corr = n/(self.count*self.length)
return self.corr
class parameter:
def __init__(self,value,estim_eq='mean'):
self.value = value
self.error = None
self.estim_eq = estim_eq
def __log_star(self,z,n):
log_index = z > 1./n
res = np.zeros_like(z)
z_over = z[log_index]
z_under = z[~log_index]
res[log_index] = np.log(z_over)
res[~log_index] = np.log(1./n) - 1.5 + 2*z_under*n - (z_under*n)**2/2
return res
def __el_function(self,lam,*args):
el = -np.sum(self.__log_star(1+np.dot(self.mc,lam), self.mc.shape[0]))
return el
def get_moment_condtition(self,data):
''' Method sets up the '''
if self.estim_eq =='mean':
# moment condition for sample mean
mc = data.x - self.value
elif self.estim_eq == 'ar-least':
mc = []
for i in range(len(data.x)):
if i<1:
continue
y_temp = []
for j in range(len(self.value)):
y_temp.append(data.x[i-j-1])
foo = np.array(y_temp)*(data.x[i]-np.dot(self.value,y_temp))
mc.append(foo)
mc = np.array(mc)
elif self.estim_eq == 'arma-lad':
# moment conditino for ARMA(p,q) process
# self.value is expected as 2D array
mc = []
noise = np.zeros_like(data.x)
for i in range(len(data.x)):
if i <max(len(self.value[0]),len(self.value[1])):
continue
val = data.x[i]
for j in range(len(self.value[0])):
#print('delam AR s %.1f' % self.value[0][j])
val -= self.value[0][j]*data.x[i-j-1]
for j in range(len(self.value[1])):
#print('delam MA s %.1f' % self.value[1][j])
val -= self.value[1][j]*noise[i-j-1]
noise[i] = val
mc = abs(noise)
elif self.estim_eq == 'tree':
# moment conditino for Owen's pine tree width estimation
w,z = [],[]
for i in range(len(data.x)):
if i<10:
continue
coef = 1./max(1,min(i,10))
w.append(data.x[i] - coef*np.sum(data.x[i-min(i,10):i]))
if w[-1] < -20.:
z.append(1)
else:
z.append(0)
mc = np.array(z) - self.value
if data.block.length>1:
mc = data.block.block_mc(mc)
self.mc = mc
return mc
def calc_el(self,data):
self.data = data
self.get_moment_condtition(data)
init_val = np.zeros_like(np.arange(self.mc.shape[1]))
f_min = so.minimize(self.__el_function,init_val)
el_prob = 1./data.n/(1.+np.dot(self.mc,f_min.x))
el_val = np.exp(np.sum(np.log(data.n*el_prob)))
# JPA: mozno toto pojde prec ....
el_val = el_val**data.block.corr
self.el_prob = el_prob
self.el_val = el_val
return el_val
def get_el(self,data):
try:
el = self.calc_el(data)
return el
except Exception as e:
self.err = traceback.format_exc()
logging.error(traceback.format_exc())
def confidence(self,conf_level=.95):
return np.exp(-.5*ss.chi2.ppf(conf_level,1))