"""
A simple evaluator for various corpus models.
"""

import sys

import pkg_resources

#import psyco
#psyco.full()

import ucdresolve
from ucdresolve import corpus
from ucdresolve import evaluation
from ucdresolve import ucdmodel


def getSimpleVMCorpus(input):
	return corpus.VectorModelCorpus.fromSource(input,
		ucdFactory=ucdmodel.VectorModelUCD.fromVizierLine)


def getCumulativeVMCorpus(input):
	return corpus.CumulativeVectorModelCorpus.fromSource(input,
		ucdFactory=ucdmodel.VectorModelUCD.fromVizierLine)


def getTestScoreBasedCorpus(input):
	return corpus.CollocWeightingCorpus.fromSource(input,
		ucdFactory=ucdmodel.VectorModelUCD.fromVizierLine)


def getBayesianCorpus(input):
	return corpus.BayesianCorpus.fromSource(input,
		ucdFactory=ucdmodel.VectorModelUCD.fromVizierLine)


corpora = {
	"tiny": "data/testData/testData.txt",
	"small": "data/testData/testData3.txt",
	"medium": "data/testData/testData10.txt",
	"large": "data/testData/workingSet.txt",
}

modelBuilders = {
	"sVM": getSimpleVMCorpus,
	"cVM": getCumulativeVMCorpus,
	"tsb": getTestScoreBasedCorpus,
	"bay": getBayesianCorpus,
}

def parseCmdLine():
	from optparse import OptionParser
	parser = OptionParser(usage="%prog [options]")
	parser.add_option("-c", "--corpus", help="use CORPUS to evaluate, which"
		" is one of %s."%(", ".join(corpora.keys())), metavar="CORPUS",
		action="store", type="str", default="small", dest="corpus")
	parser.add_option("-m", "--model", help="evaluate MODEL, which is one"
		" of %s."%(", ".join(modelBuilders.keys())), metavar="MODEL",
		action="store", type="str", default="sVM", dest="model")
	parser.add_option("-d", "--dump", help="dump stats to evalReport.pickle",
		action="store_true", dest="dumpEval")
	opts, args = parser.parse_args()
	if opts.corpus not in corpora:
		parser.error("%s is not a valid corpus"%opts.corpus)
	if opts.model not in modelBuilders:
		parser.error("%s is not a valid model"%opts.model)
	if args:
		parser.print_help()
		sys.exit(1)
	return opts, corpora[opts.corpus], modelBuilders[opts.model]


def runOneEval(result, corpusName, modelBuilder):
	for training, test in evaluation.iterCrossValidationIterable(4,
			pkg_resources.resource_stream('ucdresolve', corpusName)):
		evaluation.evaluateModel(training, test, modelBuilder, result, 
			verbose=False)

if __name__=="__main__":
	opts, corpusName, modelBuilder = parseCmdLine()
	result = evaluation.EvaluationResult()
	runOneEval(result, corpusName, modelBuilder)
	print result.getSynopsis()
	print result.getResLine()
	if opts.dumpEval:
		result.save(open("evalReport.pickle", "w"))
