Package sentence :: Module rankhandler
[hide private]
[frames] | no frames]

Source Code for Module sentence.rankhandler

  1  ''' 
  2  Created on Apr 15, 2011 
  3   
  4  @author: Eleftherios Avramidis 
  5  ''' 
  6   
  7  from parallelsentence import ParallelSentence 
  8  from dataset import DataSet 
  9  from collections import OrderedDict 
 10  import sys 
 11   
12 -class RankHandler(object):
13 ''' 14 classdocs 15 ''' 16 17
18 - def __init__(self, rank_name = "rank"):
19 """ 20 Collection of convenience functions for transforming parallel sentences with many ranks 21 into pairwise mode and vice versa. Most of the implementations here are ugly with many nested loops, 22 so a more object-oriented approach would be to go through the various DataSet types 23 """ 24 self.rank_name = rank_name
25
26 - def get_multiclass_from_pairwise_set(self, parallelsentences, allow_ties = False):
27 if isinstance(parallelsentences, DataSet): 28 parallelsentences = parallelsentences.get_parallelsentences() 29 30 31 sentences_per_judgment = OrderedDict() 32 #constract groups of pairwise sentences, based on their judgment id, which is unique per group 33 for parallelsentence in parallelsentences: 34 jid = int(parallelsentence.get_attribute("judgement_id")) 35 if jid in sentences_per_judgment: 36 sentences_per_judgment[jid].append(parallelsentence) 37 else: 38 #if this key has not been seen before, initiate a new entry 39 sentences_per_judgment[jid]=[parallelsentence] 40 41 new_parallelsentences = [] 42 43 for jid in sentences_per_judgment: 44 pairwise_sentences = sentences_per_judgment[jid] 45 rank_per_system = OrderedDict() 46 tranlsations_per_system = OrderedDict() 47 for pairwise_sentence in pairwise_sentences: 48 rank = int(pairwise_sentence.get_attribute(self.rank_name)) 49 50 #it is supposed to have only two translations 51 translation1 = pairwise_sentence.get_translations()[0] 52 if translation1.get_attribute("system") in rank_per_system: 53 rank_per_system[translation1.get_attribute("system")] += rank 54 else: 55 rank_per_system[translation1.get_attribute("system")] = rank 56 tranlsations_per_system[translation1.get_attribute("system")] = translation1 57 58 translation2 = pairwise_sentence.get_translations()[1] 59 if translation2.get_attribute("system") in rank_per_system: 60 rank_per_system[translation2.get_attribute("system")] -= rank 61 else: 62 rank_per_system[translation2.get_attribute("system")] = -1 * rank 63 tranlsations_per_system[translation2.get_attribute("system")] = translation2 64 65 i = 0 66 prev_rank = None 67 translations_new_rank = [] 68 # print rank_per_system 69 70 # best_rank = min(rank_per_system.values()) 71 # best_ranked_systems = [system for system in rank_per_system if rank_per_system[system] == best_rank ] 72 # print "best_ranked systems", best_ranked_systems 73 # best_ranked_pairwise = [ps for ps in pairwise_sentences if (ps.get_translations()[0].get_attribute("system") in best_ranked_systems) and (ps.get_translations()[1].get_attribute("system") in best_ranked_systems)] 74 # print "best_ranked_pairwise", best_ranked_pairwise 75 # if len(best_ranked_pairwise) > 0: 76 # for best_ranked_system in best_ranked_systems: 77 # pos_comparisons = [int(ps.get_attribute(self.rank_name)) for ps in best_ranked_pairwise if ps.get_translations()[0].get_attribute("system") == best_ranked_system ] 78 # neg_comparisons = [int(ps.get_attribute(self.rank_name)) for ps in best_ranked_pairwise if ps.get_translations()[1].get_attribute("system") == best_ranked_system ] 79 # new_rank = 0.50 * (sum(pos_comparisons) - sum(neg_comparisons)) / (len(pos_comparisons) + len(neg_comparisons) + 0.01) 80 # rank_per_system[best_ranked_system] += new_rank 81 # print "second pass best rank" , rank_per_system 82 # 83 # for system in sorted(rank_per_system, key=lambda system: rank_per_system[system]): 84 for system in rank_per_system.keys(): 85 if rank_per_system[system] != prev_rank: 86 i += 1 87 88 #print "system: %s\t%d -> %d" % (system, rank_per_system[system] , i) 89 # print i, system, 90 prev_rank = rank_per_system[system] 91 translation = tranlsations_per_system[system] 92 translation.add_attribute(self.rank_name, str(i)) 93 translations_new_rank.append(translation) 94 95 # print 96 src = pairwise_sentences[0].get_source() 97 attributes = pairwise_sentences[0].get_attributes() 98 del attributes[self.rank_name] 99 new_parallelsentence = ParallelSentence(src, translations_new_rank, None, attributes) 100 new_parallelsentences.append(new_parallelsentence) 101 return new_parallelsentences
102 103 #print "------------------" 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
119 - def get_pairwise_from_multiclass_sentence(self, parallelsentence, judgement_id, allow_ties = False, exponential = True, rename_rank = True):
120 """ 121 Converts a the ranked system translations of one sentence into many sentences containing one translation pair each, 122 so that system output can be compared in a pairwise manner. 123 @param parallelsentence: the parallesentences than needs to be split into pairs 124 @type parallelsentence: ParallelSentence 125 @param allow_ties: sentences of equal performance (rank=0) will be included in the set, if this is set to True 126 @type allow_ties: boolean 127 @return a list of parallelsentences containing a pair of system translations and a universal rank value 128 """ 129 source = parallelsentence.get_source() 130 translations = parallelsentence.get_translations() 131 pairwise_sentences = [] 132 systems_parsed = [] 133 134 for system_a in translations: 135 for system_b in translations: 136 if system_a == system_b: 137 continue 138 if system_b in systems_parsed and not exponential: 139 continue 140 systems_parsed.append(system_a) 141 rank = self._normalize_rank(system_a, system_b) 142 if not rank: 143 new_attributes = parallelsentence.get_attributes() 144 new_attributes["judgement_id"] = judgement_id 145 #new_attributes["orig_rank"] = new_attributes[self.rank_name] 146 new_attributes[self.rank_name] = "-99" 147 pairwise_sentence = ParallelSentence(source, [system_a, system_b], None, new_attributes) 148 pairwise_sentences.append(pairwise_sentence) 149 elif rank != "0" or allow_ties: 150 new_attributes = parallelsentence.get_attributes() 151 #new_attributes["orig_rank"] = new_attributes[self.rank_name] 152 new_attributes[self.rank_name] = rank 153 new_attributes["judgement_id"] = judgement_id 154 pairwise_sentence = ParallelSentence(source, [system_a, system_b], None, new_attributes) 155 pairwise_sentences.append(pairwise_sentence) 156 157 if rename_rank: 158 for system in translations: 159 #remove existing ranks 160 try: 161 system.rename_attribute(self.rank_name, "orig_rank") 162 except KeyError: 163 print "didn't rename rank attribute" 164 pass 165 166 return pairwise_sentences
167 168
169 - def get_pairwise_from_multiclass_set(self, parallelsentences, allow_ties = False, exponential = True, rename_rank = True):
170 pairwise_parallelsentences = [] 171 j = 0 172 for parallelsentence in parallelsentences: 173 j += 1 174 if "judgment_id" in parallelsentence.get_attributes(): 175 judgement_id = parallelsentence.get_attribute("judgment_id") 176 else: 177 sys.stderr.write("Warning: no judgment id. We will assign an incremental one, which may result in unwanted behaviour if the original id was lost on the way") 178 judgement_id = str(j) 179 pairwise_parallelsentences.extend( self.get_pairwise_from_multiclass_sentence(parallelsentence, judgement_id, allow_ties, exponential, rename_rank) ) 180 #pairwise_parallelsentences = self.merge_overlapping_pairwise_set(pairwise_parallelsentences) 181 return pairwise_parallelsentences
182 183 184 185 186
187 - def merge_overlapping_pairwise_set(self, parallelsentences):
188 sets = OrderedDict() 189 merged_parallelsentences = [] 190 merged = 0 191 192 #first sort everything into dicts, to make searching easier 193 for ps in parallelsentences: 194 sentence_id = ps.get_attribute("id") 195 try: 196 set_id = ps.get_attribute("testset") 197 except: 198 try: 199 set_id = ps.get_attribute("document_id") 200 except: 201 set_id = "0" 202 203 if sets.has_key(set_id): 204 if sets[set_id].has_key(sentence_id): 205 sets[set_id][sentence_id].append(ps) 206 else: 207 sets[set_id][sentence_id] = [ps] 208 else: 209 sets[set_id] = {sentence_id : [ps]} 210 211 212 213 for set_id in sets: 214 sset = sets[set_id] 215 216 for sentence_id in sorted(sset.keys() ): #key=int 217 pslist = sset[sentence_id] 218 219 system_pairs = set([(ps.get_translations()[0].get_attribute("system"), ps.get_translations()[1].get_attribute("system")) for ps in pslist]) 220 for (system_a, system_b) in system_pairs: 221 rank = 0 222 j = 0 223 mod = 0 224 for ps in pslist: 225 226 if ps.get_translations()[0].get_attribute("system") == system_a \ 227 and ps.get_translations()[1].get_attribute("system") == system_b: 228 rank += int(ps.get_attribute(self.rank_name)) * self._annotator_weight(ps) 229 mod += 1 230 i = j 231 j += 1 232 if rank > 0: 233 final_rank = 1 234 elif rank < 0: 235 final_rank = -1 236 else: 237 final_rank = 0 238 239 if mod > 1: 240 merged += 1 241 #print sentence_id, (system_a, system_b) 242 243 src = pslist[i].get_source() 244 tgt = pslist[i].get_translations() 245 ref = pslist[i].get_reference() 246 atts = pslist[i].get_attributes() 247 atts[self.rank_name] = str(final_rank) 248 new_ps = ParallelSentence(src, tgt, ref, atts) 249 merged_parallelsentences.append(new_ps) 250 print "merged %d out of %d" % (merged, len(parallelsentences)) 251 return merged_parallelsentences
252 253 254 255 256
257 - def _annotator_weight(self, ps):
258 return 1
259 260 261 262
263 - def _normalize_rank(self, system_a, system_b):
264 """ 265 Receives two rank scores for the two respective system outputs, compares them and returns a universal 266 comparison value, namely -1 if the first system is better, +1 if the second system output is better, 267 and 0 if they are equally good. 268 """ 269 try: 270 rank_a = system_a.get_attribute(self.rank_name) 271 rank_b = system_b.get_attribute(self.rank_name) 272 if rank_a < rank_b: 273 rank = "-1" 274 elif rank_a > rank_b: 275 rank = "1" 276 else: 277 rank = "0" 278 return rank 279 except KeyError: 280 return None
281