maxoul commited on
Commit
2e56a2a
·
verified ·
1 Parent(s): 746b6af

Upload COCOM

Browse files
Files changed (1) hide show
  1. modelling_pisco.py +2 -1
modelling_pisco.py CHANGED
@@ -1036,11 +1036,12 @@ class COCOM(PreTrainedModel):
1036
  """
1037
  Compress a list of documents
1038
  if questions is not None, assumes compression is done query-dependently !
 
1039
  """
1040
  if questions is None:
1041
  input_encoder = self.prepare_encoder_inputs(documents, max_length=128)
1042
  else: # we assume query-dependent here:
1043
- input_encoder = self.prepare_encoder_inputs(documents, max_length=128, q_texts=[question for question, docs in zip(questions, documents) for _ in docs])
1044
  enc_input_ids = input_encoder['input_ids'].to(self.decoder.device)
1045
  attention_mask = input_encoder['attention_mask'].to(self.decoder.device)
1046
  return self.compress(enc_input_ids=enc_input_ids, enc_attention_mask=attention_mask)
 
1036
  """
1037
  Compress a list of documents
1038
  if questions is not None, assumes compression is done query-dependently !
1039
+ excepts as many questions as documents here (so repeat questions for multidoc)
1040
  """
1041
  if questions is None:
1042
  input_encoder = self.prepare_encoder_inputs(documents, max_length=128)
1043
  else: # we assume query-dependent here:
1044
+ input_encoder = self.prepare_encoder_inputs(documents, max_length=128, q_texts=questions)
1045
  enc_input_ids = input_encoder['input_ids'].to(self.decoder.device)
1046
  attention_mask = input_encoder['attention_mask'].to(self.decoder.device)
1047
  return self.compress(enc_input_ids=enc_input_ids, enc_attention_mask=attention_mask)