Arguments#

class FlagEmbedding.abc.evaluation.AbsEvalArgs(eval_name: str = None, dataset_dir: str | None = None, force_redownload: bool = False, dataset_names: str | None = None, splits: str = 'test', corpus_embd_save_dir: str = None, output_dir: str = './search_results', search_top_k: int = 1000, rerank_top_k: int = 100, cache_path: str = None, token: str = <factory>, overwrite: bool = False, ignore_identical_ids: bool = False, k_values: int = <factory>, eval_output_method: str = 'markdown', eval_output_path: str = './eval_results.md', eval_metrics: str = <factory>)[source]#

Base class for evaluation arguments.

class FlagEmbedding.abc.evaluation.AbsEvalModelArgs(embedder_name_or_path: str, embedder_model_class: str | None = None, normalize_embeddings: bool = True, pooling_method: str = 'cls', use_fp16: bool = True, devices: str | None = None, query_instruction_for_retrieval: str | None = None, query_instruction_format_for_retrieval: str = '{}{}', examples_for_task: str | None = None, examples_instruction_format: str = '{}{}', trust_remote_code: bool = False, reranker_name_or_path: str | None = None, reranker_model_class: str | None = None, reranker_peft_path: str | None = None, use_bf16: bool = False, query_instruction_for_rerank: str | None = None, query_instruction_format_for_rerank: str = '{}{}', passage_instruction_for_rerank: str | None = None, passage_instruction_format_for_rerank: str = '{}{}', cache_dir: str = None, embedder_batch_size: int = 3000, reranker_batch_size: int = 3000, embedder_query_max_length: int = 512, embedder_passage_max_length: int = 512, reranker_query_max_length: int | None = None, reranker_max_length: int = 512, normalize: bool = False, prompt: str | None = None, cutoff_layers: List[int] = None, compress_ratio: int = 1, compress_layers: int | None = None)[source]#

Base class for model arguments during evaluation.