import{s as hn,o as $n,n as O}from"../chunks/scheduler.7da89386.js";import{S as bn,i as _n,g as c,s as o,r as f,A as yn,h as d,f as l,c as r,j as w,u as g,x as y,k as C,y as n,a as p,v,d as h,t as $,w as b}from"../chunks/index.20910acc.js";import{D as j}from"../chunks/Docstring.f0851586.js";import{C as G}from"../chunks/CodeBlock.143bd81e.js";import{E as A}from"../chunks/ExampleCodeBlock.d74eb35c.js";import{H as Ft,E as xn}from"../chunks/getInferenceSnippets.217b4024.js";function Mn(M){let a,x="Example:",m,s,u;return s=new G({props:{code:"bXlfbWV0cmljJTIwJTNEJTIwRXZhbHVhdGlvbk1vZHVsZUluZm8uZnJvbV9kaXJlY3RvcnkoJTIyJTJGcGF0aCUyRnRvJTJGZGlyZWN0b3J5JTJGJTIyKQ==",highlighted:'>>> my_metric = EvaluationModuleInfo.from_directory("/path/to/directory/")',wrap:!1}}),{c(){a=c("p"),a.textContent=x,m=o(),f(s.$$.fragment)},l(t){a=d(t,"P",{"data-svelte-h":!0}),y(a)!=="svelte-11lpom8"&&(a.textContent=x),m=r(t),g(s.$$.fragment,t)},m(t,_){p(t,a,_),p(t,m,_),v(s,t,_),u=!0},p:O,i(t){u||(h(s.$$.fragment,t),u=!0)},o(t){$(s.$$.fragment,t),u=!1},d(t){t&&(l(a),l(m)),b(s,t)}}}function wn(M){let a,x="Example:",m,s,u;return s=new G({props:{code:"bXlfbWV0cmljLmluZm8ud3JpdGVfdG9fZGlyZWN0b3J5KCUyMiUyRnBhdGglMkZ0byUyRmRpcmVjdG9yeSUyRiUyMik=",highlighted:'>>> my_metric.info.write_to_directory("/path/to/directory/")',wrap:!1}}),{c(){a=c("p"),a.textContent=x,m=o(),f(s.$$.fragment)},l(t){a=d(t,"P",{"data-svelte-h":!0}),y(a)!=="svelte-11lpom8"&&(a.textContent=x),m=r(t),g(s.$$.fragment,t)},m(t,_){p(t,a,_),p(t,m,_),v(s,t,_),u=!0},p:O,i(t){u||(h(s.$$.fragment,t),u=!0)},o(t){$(s.$$.fragment,t),u=!1},d(t){t&&(l(a),l(m)),b(s,t)}}}function Cn(M){let a,x="Example:",m,s,u;return s=new G({props:{code:"aW1wb3J0JTIwZXZhbHVhdGUlMEFhY2N1cmFjeSUyMCUzRCUyMGV2YWx1YXRlLmxvYWQoJTIyYWNjdXJhY3klMjIpJTBBYWNjdXJhY3kuYWRkKHJlZmVyZW5jZXMlM0QlNUIwJTJDMSU1RCUyQyUyMHByZWRpY3Rpb25zJTNEJTVCMSUyQzAlNUQp",highlighted:`>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> accuracy.add(references=[0,1], predictions=[1,0])`,wrap:!1}}),{c(){a=c("p"),a.textContent=x,m=o(),f(s.$$.fragment)},l(t){a=d(t,"P",{"data-svelte-h":!0}),y(a)!=="svelte-11lpom8"&&(a.textContent=x),m=r(t),g(s.$$.fragment,t)},m(t,_){p(t,a,_),p(t,m,_),v(s,t,_),u=!0},p:O,i(t){u||(h(s.$$.fragment,t),u=!0)},o(t){$(s.$$.fragment,t),u=!1},d(t){t&&(l(a),l(m)),b(s,t)}}}function jn(M){let a,x="Example:",m,s,u;return s=new G({props:{code:"aW1wb3J0JTIwZXZhbHVhdGUlMEFhY2N1cmFjeSUyMCUzRCUyMGV2YWx1YXRlLmxvYWQoJTIyYWNjdXJhY3klMjIpJTBBZm9yJTIwcmVmcyUyQyUyMHByZWRzJTIwaW4lMjB6aXAoJTVCJTVCMCUyQzElNUQlMkMlNUIwJTJDMSU1RCU1RCUyQyUyMCU1QiU1QjElMkMwJTVEJTJDJTVCMCUyQzElNUQlNUQpJTNBJTBBJTIwJTIwJTIwJTIwYWNjdXJhY3kuYWRkX2JhdGNoKHJlZmVyZW5jZXMlM0RyZWZzJTJDJTIwcHJlZGljdGlvbnMlM0RwcmVkcyk=",highlighted:`>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
... accuracy.add_batch(references=refs, predictions=preds)`,wrap:!1}}),{c(){a=c("p"),a.textContent=x,m=o(),f(s.$$.fragment)},l(t){a=d(t,"P",{"data-svelte-h":!0}),y(a)!=="svelte-11lpom8"&&(a.textContent=x),m=r(t),g(s.$$.fragment,t)},m(t,_){p(t,a,_),p(t,m,_),v(s,t,_),u=!0},p:O,i(t){u||(h(s.$$.fragment,t),u=!0)},o(t){$(s.$$.fragment,t),u=!1},d(t){t&&(l(a),l(m)),b(s,t)}}}function Tn(M){let a,x;return a=new G({props:{code:"aW1wb3J0JTIwZXZhbHVhdGUlMEFhY2N1cmFjeSUyMCUzRCUyMCUyMGV2YWx1YXRlLmxvYWQoJTIyYWNjdXJhY3klMjIpJTBBYWNjdXJhY3kuY29tcHV0ZShwcmVkaWN0aW9ucyUzRCU1QjAlMkMlMjAxJTJDJTIwMSUyQyUyMDAlNUQlMkMlMjByZWZlcmVuY2VzJTNEJTVCMCUyQyUyMDElMkMlMjAwJTJDJTIwMSU1RCk=",highlighted:`>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> accuracy.compute(predictions=[0, 1, 1, 0], references=[0, 1, 0, 1])`,wrap:!1}}),{c(){f(a.$$.fragment)},l(m){g(a.$$.fragment,m)},m(m,s){v(a,m,s),x=!0},p:O,i(m){x||(h(a.$$.fragment,m),x=!0)},o(m){$(a.$$.fragment,m),x=!1},d(m){b(a,m)}}}function En(M){let a,x="Example:",m,s,u;return s=new G({props:{code:"aW1wb3J0JTIwZXZhbHVhdGU=",highlighted:'>>> import evaluate',wrap:!1}}),{c(){a=c("p"),a.textContent=x,m=o(),f(s.$$.fragment)},l(t){a=d(t,"P",{"data-svelte-h":!0}),y(a)!=="svelte-11lpom8"&&(a.textContent=x),m=r(t),g(s.$$.fragment,t)},m(t,_){p(t,a,_),p(t,m,_),v(s,t,_),u=!0},p:O,i(t){u||(h(s.$$.fragment,t),u=!0)},o(t){$(s.$$.fragment,t),u=!1},d(t){t&&(l(a),l(m)),b(s,t)}}}function Jn(M){let a,x="Examples:",m,s,u;return s=new G({props:{code:"aW1wb3J0JTIwZXZhbHVhdGUlMEFhY2N1cmFjeSUyMCUzRCUyMGV2YWx1YXRlLmxvYWQoJTIyYWNjdXJhY3klMjIpJTBBZjElMjAlM0QlMjBldmFsdWF0ZS5sb2FkKCUyMmYxJTIyKSUwQWNsZl9tZXRyaWNzJTIwJTNEJTIwY29tYmluZSglNUIlMjJhY2N1cmFjeSUyMiUyQyUyMCUyMmYxJTIyJTVEKQ==",highlighted:`>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])`,wrap:!1}}),{c(){a=c("p"),a.textContent=x,m=o(),f(s.$$.fragment)},l(t){a=d(t,"P",{"data-svelte-h":!0}),y(a)!=="svelte-kvfsh7"&&(a.textContent=x),m=r(t),g(s.$$.fragment,t)},m(t,_){p(t,a,_),p(t,m,_),v(s,t,_),u=!0},p:O,i(t){u||(h(s.$$.fragment,t),u=!0)},o(t){$(s.$$.fragment,t),u=!1},d(t){t&&(l(a),l(m)),b(s,t)}}}function Un(M){let a,x="Example:",m,s,u;return s=new G({props:{code:"aW1wb3J0JTIwZXZhbHVhdGUlMEFhY2N1cmFjeSUyMCUzRCUyMGV2YWx1YXRlLmxvYWQoJTIyYWNjdXJhY3klMjIpJTBBZjElMjAlM0QlMjBldmFsdWF0ZS5sb2FkKCUyMmYxJTIyKSUwQWNsZl9tZXRyaWNzJTIwJTNEJTIwY29tYmluZSglNUIlMjJhY2N1cmFjeSUyMiUyQyUyMCUyMmYxJTIyJTVEKSUwQWZvciUyMHJlZiUyQyUyMHByZWQlMjBpbiUyMHppcCglNUIwJTJDMSUyQzAlMkMxJTVEJTJDJTIwJTVCMSUyQzAlMkMwJTJDMSU1RCklM0ElMEElMjAlMjAlMjAlMjBjbGZfbWV0cmljcy5hZGQocmVmZXJlbmNlcyUzRHJlZiUyQyUyMHByZWRpY3Rpb25zJTNEcHJlZCk=",highlighted:`>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> for ref, pred in zip([0,1,0,1], [1,0,0,1]):
... clf_metrics.add(references=ref, predictions=pred)`,wrap:!1}}),{c(){a=c("p"),a.textContent=x,m=o(),f(s.$$.fragment)},l(t){a=d(t,"P",{"data-svelte-h":!0}),y(a)!=="svelte-11lpom8"&&(a.textContent=x),m=r(t),g(s.$$.fragment,t)},m(t,_){p(t,a,_),p(t,m,_),v(s,t,_),u=!0},p:O,i(t){u||(h(s.$$.fragment,t),u=!0)},o(t){$(s.$$.fragment,t),u=!1},d(t){t&&(l(a),l(m)),b(s,t)}}}function kn(M){let a,x="Example:",m,s,u;return s=new G({props:{code:"aW1wb3J0JTIwZXZhbHVhdGUlMEFhY2N1cmFjeSUyMCUzRCUyMGV2YWx1YXRlLmxvYWQoJTIyYWNjdXJhY3klMjIpJTBBZjElMjAlM0QlMjBldmFsdWF0ZS5sb2FkKCUyMmYxJTIyKSUwQWNsZl9tZXRyaWNzJTIwJTNEJTIwY29tYmluZSglNUIlMjJhY2N1cmFjeSUyMiUyQyUyMCUyMmYxJTIyJTVEKSUwQWZvciUyMHJlZnMlMkMlMjBwcmVkcyUyMGluJTIwemlwKCU1QiU1QjAlMkMxJTVEJTJDJTVCMCUyQzElNUQlNUQlMkMlMjAlNUIlNUIxJTJDMCU1RCUyQyU1QjAlMkMxJTVEJTVEKSUzQSUwQSUyMCUyMCUyMCUyMGNsZl9tZXRyaWNzLmFkZChyZWZlcmVuY2VzJTNEcmVmcyUyQyUyMHByZWRpY3Rpb25zJTNEcHJlZHMp",highlighted:`>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
... clf_metrics.add(references=refs, predictions=preds)`,wrap:!1}}),{c(){a=c("p"),a.textContent=x,m=o(),f(s.$$.fragment)},l(t){a=d(t,"P",{"data-svelte-h":!0}),y(a)!=="svelte-11lpom8"&&(a.textContent=x),m=r(t),g(s.$$.fragment,t)},m(t,_){p(t,a,_),p(t,m,_),v(s,t,_),u=!0},p:O,i(t){u||(h(s.$$.fragment,t),u=!0)},o(t){$(s.$$.fragment,t),u=!1},d(t){t&&(l(a),l(m)),b(s,t)}}}function Nn(M){let a,x="Example:",m,s,u;return s=new G({props:{code:"aW1wb3J0JTIwZXZhbHVhdGUlMEFhY2N1cmFjeSUyMCUzRCUyMGV2YWx1YXRlLmxvYWQoJTIyYWNjdXJhY3klMjIpJTBBZjElMjAlM0QlMjBldmFsdWF0ZS5sb2FkKCUyMmYxJTIyKSUwQWNsZl9tZXRyaWNzJTIwJTNEJTIwY29tYmluZSglNUIlMjJhY2N1cmFjeSUyMiUyQyUyMCUyMmYxJTIyJTVEKSUwQWNsZl9tZXRyaWNzLmNvbXB1dGUocHJlZGljdGlvbnMlM0QlNUIwJTJDMSU1RCUyQyUyMHJlZmVyZW5jZXMlM0QlNUIxJTJDMSU1RCk=",highlighted:`>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> clf_metrics.compute(predictions=[0,1], references=[1,1])
{'accuracy': 0.5, 'f1': 0.6666666666666666}`,wrap:!1}}),{c(){a=c("p"),a.textContent=x,m=o(),f(s.$$.fragment)},l(t){a=d(t,"P",{"data-svelte-h":!0}),y(a)!=="svelte-11lpom8"&&(a.textContent=x),m=r(t),g(s.$$.fragment,t)},m(t,_){p(t,a,_),p(t,m,_),v(s,t,_),u=!0},p:O,i(t){u||(h(s.$$.fragment,t),u=!0)},o(t){$(s.$$.fragment,t),u=!1},d(t){t&&(l(a),l(m)),b(s,t)}}}function In(M){let a,x,m,s,u,t,_,xt,$e,La="The base class EvaluationModuleInfo
implements a the logic for the subclasses MetricInfo
, ComparisonInfo
, and MeasurementInfo
.",Mt,T,be,Rt,Fe,Sa=`Base class to store information about an evaluation used for MetricInfo
, ComparisonInfo
,
and MeasurementInfo
.`,Ht,Re,Pa=`EvaluationModuleInfo
documents an evaluation, including its name, version, and features.
See the constructor arguments and properties for a full list.`,qt,He,Qa="Note: Not all fields are known on construction and may be updated later.",zt,D,_e,Xt,qe,Da="Create EvaluationModuleInfo
from the JSON file in metric_info_dir
.",At,oe,Ot,F,ye,Gt,ze,Fa=`Write EvaluationModuleInfo
as JSON to metric_info_dir
.
Also save the license separately in LICENSE.`,Bt,re,wt,k,xe,Kt,Xe,Ra="Information about a metric.",ea,Ae,Ha=`EvaluationModuleInfo
documents a metric, including its name, version, and features.
See the constructor arguments and properties for a full list.`,ta,Oe,qa="Note: Not all fields are known on construction and may be updated later.",Ct,N,Me,aa,Ge,za="Information about a comparison.",na,Be,Xa=`EvaluationModuleInfo
documents a comparison, including its name, version, and features.
See the constructor arguments and properties for a full list.`,sa,Ke,Aa="Note: Not all fields are known on construction and may be updated later.",jt,I,we,oa,et,Oa="Information about a measurement.",ra,tt,Ga=`EvaluationModuleInfo
documents a measurement, including its name, version, and features.
See the constructor arguments and properties for a full list.`,la,at,Ba="Note: Not all fields are known on construction and may be updated later.",Tt,Ce,Et,je,Ka="The base class EvaluationModule
implements a the logic for the subclasses Metric
, Comparison
, and Measurement
.",Jt,E,Te,ia,nt,en="A EvaluationModule
is the base class and common API for metrics, comparisons, and measurements.",ca,R,Ee,da,st,tn="Add one prediction and reference for the evaluation module’s stack.",ma,le,pa,H,Je,ua,ot,an="Add a batch of predictions and references for the evaluation module’s stack.",fa,ie,ga,V,Ue,va,rt,nn="Compute the evaluation module.",ha,lt,sn="Usage of positional arguments is not allowed to prevent mistakes.",$a,ce,ba,q,ke,_a,it,on="Downloads and prepares evaluation module for reading.",ya,de,Ut,B,Ne,xa,ct,rn="A Metric is the base class and common API for all metrics.",kt,K,Ie,Ma,dt,ln="A Comparison is the base class and common API for all comparisons.",Nt,ee,Ze,wa,mt,cn="A Measurement is the base class and common API for all measurements.",It,Ye,Zt,Ve,dn="The combine
function allows to combine multiple EvaluationModule
s into a single CombinedEvaluations
.",Yt,Z,We,Ca,pt,mn=`Combines several metrics, comparisons, or measurements into a single CombinedEvaluations
object that
can be used like a single evaluation module.`,ja,ut,pn=`If two scores have the same name, then they are prefixed with their module names.
And if two modules have the same name, please use a dictionary to give them different names, otherwise an integer id is appended to the prefix.`,Ta,me,Vt,Y,Le,Ea,z,Se,Ja,ft,un="Add one prediction and reference for each evaluation module’s stack.",Ua,pe,ka,X,Pe,Na,gt,fn="Add a batch of predictions and references for each evaluation module’s stack.",Ia,ue,Za,W,Qe,Ya,vt,gn="Compute each evaluation module.",Va,ht,vn="Usage of positional arguments is not allowed to prevent mistakes.",Wa,fe,Wt,De,Lt,yt,St;return u=new Ft({props:{title:"Main classes",local:"main-classes",headingTag:"h1"}}),_=new Ft({props:{title:"EvaluationModuleInfo",local:"evaluate.EvaluationModuleInfo",headingTag:"h2"}}),be=new j({props:{name:"class evaluate.EvaluationModuleInfo",anchor:"evaluate.EvaluationModuleInfo",parameters:[{name:"description",val:": str"},{name:"citation",val:": str"},{name:"features",val:": typing.Union[datasets.features.features.Features, typing.List[datasets.features.features.Features]]"},{name:"inputs_description",val:": str = "},{name:"homepage",val:": str = "},{name:"license",val:": str = "},{name:"codebase_urls",val:": typing.List[str] = "},{name:"reference_urls",val:": typing.List[str] = "},{name:"streamable",val:": bool = False"},{name:"format",val:": typing.Optional[str] = None"},{name:"module_type",val:": str = 'metric'"},{name:"module_name",val:": typing.Optional[str] = None"},{name:"config_name",val:": typing.Optional[str] = None"},{name:"experiment_id",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/evaluate/blob/v0.4.5/src/evaluate/info.py#L34"}}),_e=new j({props:{name:"from_directory",anchor:"evaluate.EvaluationModuleInfo.from_directory",parameters:[{name:"metric_info_dir",val:""}],parametersDescription:[{anchor:"evaluate.EvaluationModuleInfo.from_directory.metric_info_dir",description:`metric_info_dir (str
) —
The directory containing the metric_info
JSON file. This
should be the root directory of a specific metric version.`,name:"metric_info_dir"}],source:"https://github.com/huggingface/evaluate/blob/v0.4.5/src/evaluate/info.py#L92"}}),oe=new A({props:{anchor:"evaluate.EvaluationModuleInfo.from_directory.example",$$slots:{default:[Mn]},$$scope:{ctx:M}}}),ye=new j({props:{name:"write_to_directory",anchor:"evaluate.EvaluationModuleInfo.write_to_directory",parameters:[{name:"metric_info_dir",val:""}],parametersDescription:[{anchor:"evaluate.EvaluationModuleInfo.write_to_directory.metric_info_dir",description:`metric_info_dir (str
) —
The directory to save metric_info_dir
to.`,name:"metric_info_dir"}],source:"https://github.com/huggingface/evaluate/blob/v0.4.5/src/evaluate/info.py#L72"}}),re=new A({props:{anchor:"evaluate.EvaluationModuleInfo.write_to_directory.example",$$slots:{default:[wn]},$$scope:{ctx:M}}}),xe=new j({props:{name:"class evaluate.MetricInfo",anchor:"evaluate.MetricInfo",parameters:[{name:"description",val:": str"},{name:"citation",val:": str"},{name:"features",val:": typing.Union[datasets.features.features.Features, typing.List[datasets.features.features.Features]]"},{name:"inputs_description",val:": str = "},{name:"homepage",val:": str = "},{name:"license",val:": str = "},{name:"codebase_urls",val:": typing.List[str] = "},{name:"reference_urls",val:": typing.List[str] = "},{name:"streamable",val:": bool = False"},{name:"format",val:": typing.Optional[str] = None"},{name:"module_type",val:": str = 'metric'"},{name:"module_name",val:": typing.Optional[str] = None"},{name:"config_name",val:": typing.Optional[str] = None"},{name:"experiment_id",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/evaluate/blob/v0.4.5/src/evaluate/info.py#L121"}}),Me=new j({props:{name:"class evaluate.ComparisonInfo",anchor:"evaluate.ComparisonInfo",parameters:[{name:"description",val:": str"},{name:"citation",val:": str"},{name:"features",val:": typing.Union[datasets.features.features.Features, typing.List[datasets.features.features.Features]]"},{name:"inputs_description",val:": str = "},{name:"homepage",val:": str = "},{name:"license",val:": str = "},{name:"codebase_urls",val:": typing.List[str] = "},{name:"reference_urls",val:": typing.List[str] = "},{name:"streamable",val:": bool = False"},{name:"format",val:": typing.Optional[str] = None"},{name:"module_type",val:": str = 'comparison'"},{name:"module_name",val:": typing.Optional[str] = None"},{name:"config_name",val:": typing.Optional[str] = None"},{name:"experiment_id",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/evaluate/blob/v0.4.5/src/evaluate/info.py#L134"}}),we=new j({props:{name:"class evaluate.MeasurementInfo",anchor:"evaluate.MeasurementInfo",parameters:[{name:"description",val:": str"},{name:"citation",val:": str"},{name:"features",val:": typing.Union[datasets.features.features.Features, typing.List[datasets.features.features.Features]]"},{name:"inputs_description",val:": str = "},{name:"homepage",val:": str = "},{name:"license",val:": str = "},{name:"codebase_urls",val:": typing.List[str] = "},{name:"reference_urls",val:": typing.List[str] = "},{name:"streamable",val:": bool = False"},{name:"format",val:": typing.Optional[str] = None"},{name:"module_type",val:": str = 'measurement'"},{name:"module_name",val:": typing.Optional[str] = None"},{name:"config_name",val:": typing.Optional[str] = None"},{name:"experiment_id",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/evaluate/blob/v0.4.5/src/evaluate/info.py#L147"}}),Ce=new Ft({props:{title:"EvaluationModule",local:"evaluate.EvaluationModule",headingTag:"h2"}}),Te=new j({props:{name:"class evaluate.EvaluationModule",anchor:"evaluate.EvaluationModule",parameters:[{name:"config_name",val:": typing.Optional[str] = None"},{name:"keep_in_memory",val:": bool = False"},{name:"cache_dir",val:": typing.Optional[str] = None"},{name:"num_process",val:": int = 1"},{name:"process_id",val:": int = 0"},{name:"seed",val:": typing.Optional[int] = None"},{name:"experiment_id",val:": typing.Optional[str] = None"},{name:"hash",val:": str = None"},{name:"max_concurrent_cache_files",val:": int = 10000"},{name:"timeout",val:": typing.Union[int, float] = 100"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"evaluate.EvaluationModule.config_name",description:`config_name (str
) —
This is used to define a hash specific to a module computation script and prevents the module’s data
to be overridden when the module loading script is modified.`,name:"config_name"},{anchor:"evaluate.EvaluationModule.keep_in_memory",description:`keep_in_memory (bool
) —
Keep all predictions and references in memory. Not possible in distributed settings.`,name:"keep_in_memory"},{anchor:"evaluate.EvaluationModule.cache_dir",description:`cache_dir (str
) —
Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.`,name:"cache_dir"},{anchor:"evaluate.EvaluationModule.num_process",description:`num_process (int
) —
Specify the total number of nodes in a distributed settings.
This is useful to compute module in distributed setups (in particular non-additive modules like F1).`,name:"num_process"},{anchor:"evaluate.EvaluationModule.process_id",description:`process_id (int
) —
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute module in distributed setups (in particular non-additive metrics like F1).`,name:"process_id"},{anchor:"evaluate.EvaluationModule.seed",description:`seed (int
, optional) —
If specified, this will temporarily set numpy’s random seed when compute() is run.`,name:"seed"},{anchor:"evaluate.EvaluationModule.experiment_id",description:`experiment_id (str
) —
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute module in distributed setups (in particular non-additive metrics like F1).`,name:"experiment_id"},{anchor:"evaluate.EvaluationModule.hash",description:`hash (str
) —
Used to identify the evaluation module according to the hashed file contents.`,name:"hash"},{anchor:"evaluate.EvaluationModule.max_concurrent_cache_files",description:`max_concurrent_cache_files (int
) —
Max number of concurrent module cache files (default 10000
).`,name:"max_concurrent_cache_files"},{anchor:"evaluate.EvaluationModule.timeout",description:`timeout (Union[int, float]
) —
Timeout in second for distributed setting synchronization.`,name:"timeout"}],source:"https://github.com/huggingface/evaluate/blob/v0.4.5/src/evaluate/module.py#L149"}}),Ee=new j({props:{name:"add",anchor:"evaluate.EvaluationModule.add",parameters:[{name:"prediction",val:" = None"},{name:"reference",val:" = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"evaluate.EvaluationModule.add.prediction",description:`prediction (list/array/tensor
, optional) —
Predictions.`,name:"prediction"},{anchor:"evaluate.EvaluationModule.add.reference",description:`reference (list/array/tensor
, optional) —
References.`,name:"reference"}],source:"https://github.com/huggingface/evaluate/blob/v0.4.5/src/evaluate/module.py#L548"}}),le=new A({props:{anchor:"evaluate.EvaluationModule.add.example",$$slots:{default:[Cn]},$$scope:{ctx:M}}}),Je=new j({props:{name:"add_batch",anchor:"evaluate.EvaluationModule.add_batch",parameters:[{name:"predictions",val:" = None"},{name:"references",val:" = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"evaluate.EvaluationModule.add_batch.predictions",description:`predictions (list/array/tensor
, optional) —
Predictions.`,name:"predictions"},{anchor:"evaluate.EvaluationModule.add_batch.references",description:`references (list/array/tensor
, optional) —
References.`,name:"references"}],source:"https://github.com/huggingface/evaluate/blob/v0.4.5/src/evaluate/module.py#L488"}}),ie=new A({props:{anchor:"evaluate.EvaluationModule.add_batch.example",$$slots:{default:[jn]},$$scope:{ctx:M}}}),Ue=new j({props:{name:"compute",anchor:"evaluate.EvaluationModule.compute",parameters:[{name:"predictions",val:" = None"},{name:"references",val:" = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"evaluate.EvaluationModule.compute.predictions",description:`predictions (list/array/tensor
, optional) —
Predictions.`,name:"predictions"},{anchor:"evaluate.EvaluationModule.compute.references",description:`references (list/array/tensor
, optional) —
References.`,name:"references"},{anchor:"evaluate.EvaluationModule.compute.*kwargs",description:`**kwargs (optional) —
Keyword arguments that will be forwarded to the evaluation module compute()
method (see details in the docstring).`,name:"*kwargs"}],source:"https://github.com/huggingface/evaluate/blob/v0.4.5/src/evaluate/module.py#L415",returnDescription:`