Skip to content
Permalink
4d623e0e9d
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
90 lines (79 sloc) 3.85 KB
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.trainer_utils import set_seed
from tqdm import tqdm
import argparse
import torch
import json
import os
import nltk
import re
from utils.config_loader import ConfigLoader
from utils.answer_helpers import LIST_INDEXER_REGEX
from prompts import get_rag_messages
def load_eli5(eli5_path):
with open(eli5_path, "r", encoding="utf-8") as f:
data = json.load(f)
return data
def trim_incomplete_sentences(text):
text = text.strip()
while text:
sentences = nltk.sent_tokenize(text)
if len(sentences) == 1:
break
last_sentence = sentences[-1]
if re.search(r'[.!?;]"?\s*\Z', last_sentence) and not re.match(LIST_INDEXER_REGEX, last_sentence):
break
last_sentence_pos = text.rfind(last_sentence)
text = text[:last_sentence_pos].rstrip()
return text
def main(args):
config_loader = ConfigLoader()
config = config_loader.load_config(args.yaml_base_config, args.yaml_config)
tokenizer = AutoTokenizer.from_pretrained(config['model'])
if not tokenizer.pad_token_id:
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "left"
model = AutoModelForCausalLM.from_pretrained(config['model'], torch_dtype=torch.float16, device_map="auto")
eli5_data = load_eli5(args.eli5_path)
generated_answers = []
with tqdm(total=len(eli5_data), desc="Generating answers") as pbar:
for start in range(0, len(eli5_data), args.batch_size):
batch = eli5_data[start:start+args.batch_size]
batch_messages = [
get_rag_messages(config, example["question"], example["docs"], example["question_ctx"])
for example in batch
]
batch_inputs = tokenizer.apply_chat_template(
batch_messages, return_tensors="pt", continue_final_message=True, padding=True, return_dict=True
)
batch_inputs = batch_inputs.to(model.device)
model_kwargs = {
"max_new_tokens": config.get("max_new_tokens", 400),
"do_sample": False,
"pad_token_id": tokenizer.eos_token_id,
}
if config.get("temperature", 0.0) > 0.0:
model_kwargs["temperature"] = config["temperature"]
if "top_p" in config:
model_kwargs["top_p"] = config["top_p"]
model_kwargs["do_sample"] = True
set_seed(args.seed)
outputs = model.generate(**batch_inputs, **model_kwargs)
for output_ids in outputs:
answer = tokenizer.decode(output_ids[batch_inputs.input_ids.shape[-1]:], skip_special_tokens=True)
answer = trim_incomplete_sentences(answer)
generated_answers.append(answer)
pbar.update(len(batch))
config_name = os.path.splitext(os.path.basename(args.yaml_config))[0]
output_path = os.path.splitext(args.eli5_path)[0] + f"_answers_{config_name}_{args.seed}.json"
with open(output_path, "w", encoding="utf-8") as f:
json.dump(generated_answers, f, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate answers using RAG")
parser.add_argument("--eli5_path", type=str, help="Path to the ELI5 dataset JSON file", default="./data/eli5_eval_bm25_top100_reranked_oracle.json")
parser.add_argument("--yaml_base_config", type=str, help="Path to the base YAML config file", default="base_config.yaml")
parser.add_argument("--yaml_config", type=str, help="Path to the experiment-specific YAML config file", default="mistral_7B.yaml")
parser.add_argument("--batch_size", type=int, help="Batch size for inference", default=1)
parser.add_argument("--seed", type=int, help="Random seed", default=42)
args = parser.parse_args()
main(args)