input
stringlengths
33
5k
output
stringlengths
32
5k
import os from parameterized import parameterized from torchaudio.datasets import LibriMix from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase _SAMPLE_RATE = 8000 _TASKS_TO_MIXTURE = { "sep_clean": "mix_clean", "enh_single": "mix_single", "enh_both": "m...
import os from parameterized import parameterized from torchaudio.datasets import LibriMix from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase _SAMPLE_RATE = 8000 _TASKS_TO_MIXTURE = { "sep_clean": "mix_clean", "enh_single": "mix_single", "enh_both": "m...
"""Init file of LlamaIndex.""" __version__ = "0.12.18" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index.core....
"""Init file of LlamaIndex.""" __version__ = "0.12.17" import logging from logging import NullHandler from typing import Callable, Optional try: # Force pants to install eval_type_backport on 3.9 import eval_type_backport # noqa # type: ignore except ImportError: pass # response from llama_index.core....
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.linalg import cholesky as cholesky from keras.src.ops.linalg import det as det from keras.src.ops.linalg import eig as eig from keras.src.ops.linalg import eigh as eigh from keras...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.linalg import cholesky from keras.src.ops.linalg import det from keras.src.ops.linalg import eig from keras.src.ops.linalg import eigh from keras.src.ops.linalg import inv from ke...
# Copyright (c) OpenMMLab. All rights reserved. from .anchor_free_head import AnchorFreeHead from .anchor_head import AnchorHead from .atss_head import ATSSHead from .autoassign_head import AutoAssignHead from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead from .centernet_head import CenterNetHead from .c...
# Copyright (c) OpenMMLab. All rights reserved. from .anchor_free_head import AnchorFreeHead from .anchor_head import AnchorHead from .atss_head import ATSSHead from .autoassign_head import AutoAssignHead from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead from .centernet_head import CenterNetHead from .c...
# Copyright (c) OpenMMLab. All rights reserved. import datetime import logging import os.path as osp from typing import Optional from mmengine.fileio import dump from mmengine.logging import print_log from . import root from .default_scope import DefaultScope from .registry import Registry def traverse_registry_tree...
# Copyright (c) OpenMMLab. All rights reserved. import datetime import os.path as osp import warnings from typing import Optional from mmengine.fileio import dump from mmengine.logging import print_log from . import root from .default_scope import DefaultScope from .registry import Registry def traverse_registry_tre...
from exa_py.api import ( HighlightsContentsOptions, TextContentsOptions, ) from langchain_exa.retrievers import ExaSearchRetriever from langchain_exa.tools import ExaFindSimilarResults, ExaSearchResults __all__ = [ "ExaFindSimilarResults", "ExaSearchResults", "ExaSearchRetriever", "HighlightsC...
from exa_py.api import ( # type: ignore # type: ignore[import-not-found, import-not-found] HighlightsContentsOptions, TextContentsOptions, ) from langchain_exa.retrievers import ExaSearchRetriever from langchain_exa.tools import ExaFindSimilarResults, ExaSearchResults __all__ = [ "ExaSearchResults", ...
import os from unittest.mock import patch import pytest from langchain_community.embeddings.openai import OpenAIEmbeddings os.environ["OPENAI_API_KEY"] = "foo" @pytest.mark.requires("openai") def test_openai_invalid_model_kwargs() -> None: with pytest.raises(ValueError): OpenAIEmbeddings(model_kwargs={...
import pytest from langchain_community.embeddings.openai import OpenAIEmbeddings @pytest.mark.requires("openai") def test_openai_invalid_model_kwargs() -> None: with pytest.raises(ValueError): OpenAIEmbeddings(model_kwargs={"model": "foo"}) @pytest.mark.requires("openai") def test_openai_incorrect_fiel...
import os from typing import Any, Dict, Generator, Literal, Optional import requests import sseclient from llama_index.core.llms import ( CompletionResponse, CompletionResponseGen, CustomLLM, LLMMetadata, ) from llama_index.core.llms.callbacks import llm_completion_callback from llama_index.core.bridge...
import os from typing import Any, Dict, Generator, Literal, Optional import requests import sseclient from llama_index.core.llms import ( CompletionResponse, CompletionResponseGen, CustomLLM, LLMMetadata, ) from llama_index.core.llms.callbacks import llm_completion_callback from llama_index.core.bridge...
from abc import abstractmethod from typing import Iterator, Iterable, MutableSequence from docarray import Document class BaseSequenceLikeMixin(MutableSequence[Document]): """Implement sequence-like methods""" def insert(self, index: int, value: 'Document'): """Insert `doc` at `index`. :par...
from abc import abstractmethod from typing import Iterator, Iterable, MutableSequence from docarray import Document class BaseSequenceLikeMixin(MutableSequence[Document]): """Implement sequence-like methods""" def insert(self, index: int, value: 'Document'): """Insert `doc` at `index`. :par...
import pathlib from argparse import ArgumentParser from lightning import ConformerRNNTModule, get_data_module from pytorch_lightning import seed_everything, Trainer from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint from pytorch_lightning.plugins import DDPPlugin def run_train(args): se...
import pathlib from argparse import ArgumentParser from lightning import ConformerRNNTModule, get_data_module from pytorch_lightning import Trainer, seed_everything from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor from pytorch_lightning.plugins import DDPPlugin def run_train(args): se...
import datetime import autogpt_libs.auth as autogpt_auth_lib import fastapi import fastapi.testclient import pytest import pytest_mock import backend.server.v2.library.model as library_model from backend.server.v2.library.routes import router as library_router app = fastapi.FastAPI() app.include_router(library_route...
import autogpt_libs.auth.depends import autogpt_libs.auth.middleware import fastapi import fastapi.testclient import pytest import pytest_mock import backend.server.v2.library.db import backend.server.v2.library.model import backend.server.v2.library.routes app = fastapi.FastAPI() app.include_router(backend.server.v2...
from __future__ import annotations from collections.abc import Iterable from enum import Enum from typing import Any import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer from sentence_transformers.util import pairwise_cos_sim, pairwise...
from __future__ import annotations from collections.abc import Iterable from enum import Enum from typing import Any import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer from sentence_transformers.util import pairwise_cos_sim, pairwise...
"""Run smoke tests""" import torchaudio # noqa: F401 import torchaudio.compliance.kaldi # noqa: F401 import torchaudio.datasets # noqa: F401 import torchaudio.functional # noqa: F401 import torchaudio.models # noqa: F401 import torchaudio.pipelines # noqa: F401 import torchaudio.sox_effects # noqa: F401 import ...
"""Run smoke tests""" import torchaudio # noqa: F401 import torchaudio.compliance.kaldi # noqa: F401 import torchaudio.datasets # noqa: F401 import torchaudio.functional # noqa: F401 import torchaudio.models # noqa: F401 import torchaudio.pipelines # noqa: F401 import torchaudio.sox_effects # noqa: F401 import ...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from typing import Tuple from mmengine.model import BaseModule from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.utils import InstanceList, OptConfigType, OptMultiConfig ...
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from typing import Tuple from mmengine.model import BaseModule from torch import Tensor from mmdet.data_elements import SampleList from mmdet.registry import MODELS from mmdet.utils import InstanceList, OptConfigType, OptMultiConf...
# coding=utf-8 # Copyright 2025 The rednote-hilab team and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licens...
# coding=utf-8 # Copyright 2025 The rednote-hilab team and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licens...
from __future__ import annotations import pytest from sentence_transformers.cross_encoder import CrossEncoder @pytest.mark.parametrize( "model_name, expected_score", [ ("cross-encoder/ms-marco-MiniLM-L6-v2", [8.12545108795166, -3.045016050338745, -3.1524128913879395]), ("cross-encoder/ms-mar...
from __future__ import annotations import pytest from sentence_transformers.cross_encoder import CrossEncoder @pytest.mark.parametrize( "model_name, expected_score", [ ("cross-encoder/ms-marco-MiniLM-L-6-v2", [8.12545108795166, -3.045016050338745, -3.1524128913879395]), ("cross-encoder/ms-ma...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.optimizers import legacy as legacy from keras.optimizers import schedules as schedules from keras.src.optimizers import deserialize as deserialize from keras.src.optimizers import get as ...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.api.optimizers import legacy from keras.api.optimizers import schedules from keras.src.optimizers import deserialize from keras.src.optimizers import get from keras.src.optimizers import ...
""" This example starts multiple processes (1 per GPU), which encode sentences in parallel. This gives a near linear speed-up when encoding large text collections. It also demonstrates how to stream data which is helpful in case you don't want to wait for an extremely large dataset to download, or if you want to limit ...
""" This example starts multiple processes (1 per GPU), which encode sentences in parallel. This gives a near linear speed-up when encoding large text collections. It also demonstrates how to stream data which is helpful in case you don't want to wait for an extremely large dataset to download, or if you want to limit ...
import numpy as np import pytest import torch from pydantic import parse_obj_as from docarray import BaseDoc from docarray.documents import VideoDoc from docarray.typing import AudioNdArray, NdArray, VideoNdArray from docarray.utils.misc import is_tf_available from tests import TOYDATA_DIR tf_available = is_tf_availa...
import numpy as np import pytest import torch from pydantic import parse_obj_as from docarray import BaseDocument from docarray.documents import VideoDoc from docarray.typing import AudioNdArray, NdArray, VideoNdArray from docarray.utils.misc import is_tf_available from tests import TOYDATA_DIR tf_available = is_tf_a...
from __future__ import annotations from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss from .CrossEntropyLoss import CrossEntropyLoss from .LambdaLoss import ( LambdaLoss, LambdaRankScheme, NDCGLoss1Scheme, NDCGLo...
from __future__ import annotations from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss from .CrossEntropyLoss import CrossEntropyLoss from .ListNetLoss import ListNetLoss from .MarginMSELoss import MarginMSELoss from .MSELoss imp...
import importlib from typing import Any from langchain.retrievers.document_compressors.base import DocumentCompressorPipeline from langchain.retrievers.document_compressors.chain_extract import ( LLMChainExtractor, ) from langchain.retrievers.document_compressors.chain_filter import ( LLMChainFilter, ) from la...
import importlib from typing import Any from langchain.retrievers.document_compressors.base import DocumentCompressorPipeline from langchain.retrievers.document_compressors.chain_extract import ( LLMChainExtractor, ) from langchain.retrievers.document_compressors.chain_filter import ( LLMChainFilter, ) from la...
import numpy as np import torch from docarray import Document, Image, Text from docarray.typing import ( AnyUrl, Embedding, ImageUrl, NdArray, Tensor, TextUrl, TorchEmbedding, TorchTensor, ) from docarray.typing.tensor import NdArrayEmbedding def test_multi_modal_doc_proto(): clas...
import numpy as np import torch from docarray import Document, Image, Text from docarray.typing import ( AnyUrl, Embedding, ImageUrl, NdArray, Tensor, TextUrl, TorchEmbedding, TorchTensor, ) from docarray.typing.tensor import NdArrayEmbedding def test_multi_modal_doc_proto(): clas...
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] norm_cfg = dict(type='BN', requires_grad=True) model = dict( backbone=dict(norm_cfg=norm_cfg, norm_eval=False), neck=dict(norm_cfg=norm...
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] norm_cfg = dict(type='BN', requires_grad=True) model = dict( backbone=dict(norm_cfg=norm_cfg, norm_eval=False), neck=dict(norm_cfg=norm...
import numpy as np import pytest from fastapi import FastAPI from httpx import AsyncClient from docarray import BaseDocument from docarray.documents import Image, Text from docarray.typing import NdArray @pytest.mark.asyncio async def test_fast_api(): class Mmdoc(BaseDocument): img: Image text: T...
import numpy as np import pytest from fastapi import FastAPI from httpx import AsyncClient from docarray import BaseDocument, Image, Text from docarray.typing import NdArray @pytest.mark.asyncio async def test_fast_api(): class Mmdoc(BaseDocument): img: Image text: Text title: str in...
_base_ = './atss_r50_fpn_8xb8-amp-lsj-200e_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = './atss_r50_fpn_lsj_200e_8x8_fp16_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file...
_base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file...
"""Document transformers.""" from __future__ import annotations from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Any from langchain_core.runnables.config import run_in_executor if TYPE_CHECKING: from collections.abc import Sequence from langchain_core.documents import Document class ...
from __future__ import annotations from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Any from langchain_core.runnables.config import run_in_executor if TYPE_CHECKING: from collections.abc import Sequence from langchain_core.documents import Document class BaseDocumentTransformer(ABC): ...
from keras.src.api_export import keras_export # Unique source of truth for the version number. __version__ = "3.8.0" @keras_export("keras.version") def version(): return __version__
from keras.src.api_export import keras_export # Unique source of truth for the version number. __version__ = "3.7.0" @keras_export("keras.version") def version(): return __version__
""" This examples demonstrates the setup for Question-Answer-Retrieval. You can input a query or a question. The script then uses semantic search to find relevant passages in Simple English Wikipedia (as it is smaller and fits better in RAM). As model, we use: nq-distilbert-base-v1 It was trained on the Natural Ques...
""" This examples demonstrates the setup for Question-Answer-Retrieval. You can input a query or a question. The script then uses semantic search to find relevant passages in Simple English Wikipedia (as it is smaller and fits better in RAM). As model, we use: nq-distilbert-base-v1 It was trained on the Natural Ques...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig from .single_stage import SingleStageDetector @MODELS.register_module() class ATSS(SingleStageDetector): """Implementation of `ATSS <https://arxiv.org/abs/1912.02424>...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class ATSS(SingleStageDetector): """Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`...
"""Module to change the configuration of libsox, which is used by I/O functions like :py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`. """ from typing import Dict, List import torch import torchaudio @torchaudio._extension.fail_if_no_sox def set_seed(seed: int): """Set libsox's...
"""Module to change the configuration of libsox, which is used by I/O functions like :py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`. """ from typing import Dict, List import torch from torchaudio._internal import module_utils as _mod_utils @_mod_utils.requires_sox() def set_seed(...
import torch __all__ = ["DeepSpeech"] class FullyConnected(torch.nn.Module): """ Args: n_feature: Number of input features n_hidden: Internal hidden unit size. """ def __init__(self, n_feature: int, n_hidden: int, dropout: float, relu_max_clip: int = 20) -> None: super(FullyC...
import torch __all__ = ["DeepSpeech"] class FullyConnected(torch.nn.Module): """ Args: n_feature: Number of input features n_hidden: Internal hidden unit size. """ def __init__(self, n_feature: int, n_hidden: int, dropout: float, relu_max_clip: int = 20) -> None: super(FullyC...
from docarray.typing.tensor.embedding import Embedding, NdArrayEmbedding, TorchEmbedding from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.tensor import Tensor from docarray.typing.tensor.torch_tensor import TorchTensor __all__ = [ 'NdArray', 'TorchTensor', 'Tensor', 'Embed...
from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.tensor import Tensor from docarray.typing.tensor.torch_tensor import TorchTensor __all__ = ['NdArray', 'TorchTensor', 'Tensor']
from .normalizer import ImageNormalizer
from .normalizer import ImageNormalizer
# Copyright 2025 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LIC...
# Copyright 2024 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LIC...
"""Simple Web scraper.""" from typing import List, Optional, Dict, Callable import requests from llama_index.core.bridge.pydantic import PrivateAttr from llama_index.core.readers.base import BasePydanticReader from llama_index.core.schema import Document class SimpleWebPageReader(BasePydanticReader): """ Si...
"""Simple Web scraper.""" from typing import List, Optional, Dict, Callable import requests from llama_index.core.bridge.pydantic import PrivateAttr from llama_index.core.readers.base import BasePydanticReader from llama_index.core.schema import Document class SimpleWebPageReader(BasePydanticReader): """Simple ...
"""Sentence window retriever.""" from typing import Any, Dict, List from llama_index.core import Settings, VectorStoreIndex from llama_index.core.llama_pack.base import BaseLlamaPack from llama_index.core.node_parser import ( SentenceWindowNodeParser, ) from llama_index.core.postprocessor import MetadataReplaceme...
"""Sentence window retriever.""" from typing import Any, Dict, List from llama_index.core import Settings, VectorStoreIndex from llama_index.core.llama_pack.base import BaseLlamaPack from llama_index.core.node_parser import ( SentenceWindowNodeParser, ) from llama_index.core.postprocessor import MetadataReplaceme...
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.data_elements.bbox import bbox2result from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class YOLACT(SingleStageDetector): """Implementation of `YOLACT <https://arxiv.org/abs/1904.02689...
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import bbox2result from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class YOLACT(SingleStageDetector): """Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_""" de...
# Copyright (c) OpenMMLab. All rights reserved. from .gaussian_target import (gather_feat, gaussian_radius, gen_gaussian_target, get_local_maximum, get_topk_from_heatmap, transpose_and_gather_feat) from .image import imrenormalize from .make_divisible import m...
# Copyright (c) OpenMMLab. All rights reserved. from .gaussian_target import (gather_feat, gaussian_radius, gen_gaussian_target, get_local_maximum, get_topk_from_heatmap, transpose_and_gather_feat) from .image import imrenormalize from .make_divisible import m...
import numpy as np import torch from docarray import BaseDocument from docarray.base_document import AnyDocument from docarray.typing import ( AnyEmbedding, AnyUrl, ImageUrl, Mesh3DUrl, NdArray, PointCloud3DUrl, TextUrl, TorchTensor, ) def test_proto_all_types(): class Mymmdoc(Bas...
import numpy as np import torch from docarray import BaseDocument from docarray.document import AnyDocument from docarray.typing import ( AnyEmbedding, AnyUrl, ImageUrl, Mesh3DUrl, NdArray, PointCloud3DUrl, TextUrl, TorchTensor, ) def test_proto_all_types(): class Mymmdoc(BaseDocu...
import numpy as np import pytest from keras.src import backend from keras.src import layers from keras.src import testing class GaussianDropoutTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_gaussian_dropout_basics(self): self.run_layer_test( layers.GaussianDropou...
import numpy as np import pytest from keras.src import backend from keras.src import layers from keras.src import testing class GaussianDropoutTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_gaussian_dropout_basics(self): self.run_layer_test( layers.GaussianDropou...
import torch from keras.src.optimizers.base_optimizer import BaseOptimizer from keras.src.utils import torch_utils class TorchParallelOptimizer(BaseOptimizer): @torch_utils.no_grad def _backend_update_step(self, grads, trainable_variables, learning_rate): self._parallel_update_step( grads...
import torch from keras.src.optimizers.base_optimizer import BaseOptimizer from keras.src.utils import torch_utils class TorchParallelOptimizer(BaseOptimizer): @torch_utils.no_grad def _backend_update_step(self, grads, trainable_variables, learning_rate): self._parallel_update_step( grads...
"""A class for JAX specific optimizer logic. Its purpose is to route around statelessness requirements in cond ops used for EMA handling and gradient accumulation handling. We do this by skipping conditionals entirely. """ import jax from jax import numpy as jnp from keras.src.optimizers import base_optimizer clas...
"""A class for JAX specific optimizer logic. Its purpose is to route around statelessness requirements in cond ops used for EMA handling and gradient accumulation handling. We do this by skipping conditionals entirely. """ import jax from jax import numpy as jnp from keras.src.optimizers import base_optimizer clas...
from __future__ import annotations from abc import abstractmethod from typing import Any import torch from tokenizers import Tokenizer from transformers.tokenization_utils_base import PreTrainedTokenizerBase from sentence_transformers.models.Module import Module class InputModule(Module): """ Subclass of :...
from __future__ import annotations from abc import abstractmethod from typing import Any import torch from tokenizers import Tokenizer from transformers.tokenization_utils_base import PreTrainedTokenizerBase from sentence_transformers.models.Module import Module class InputModule(Module): """ Subclass of :...
from typing import Dict, List, Optional, Set import pytest from docarray import BaseDocument, DocumentArray from docarray.documents import Image class InnerDoc(BaseDocument): integer: int inner_list: List class MMDoc(BaseDocument): text: str = '' price: int = 0 categories: Optional[List[str]] ...
import pytest from typing import Optional, List, Dict, Set from docarray import BaseDocument, DocumentArray from docarray.documents import Image class InnerDoc(BaseDocument): integer: int l: List class MMDoc(BaseDocument): text: str = '' price: int = 0 categories: Optional[List[str]] = None ...
import io import warnings from abc import ABC from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.utils.misc import is_notebook class AbstractImageTensor(AbstractTensor, ABC): def to_bytes(self, format: str = 'PNG') -> bytes: """ Convert image tensor to bytes. ...
import io from abc import ABC from docarray.typing.tensor.abstract_tensor import AbstractTensor class AbstractImageTensor(AbstractTensor, ABC): def to_bytes(self, format: str = 'PNG') -> bytes: """ Convert image tensor to bytes. :param format: the image format use to store the image, can...
# dataset settings dataset_type = 'CityscapesDataset' data_root = 'data/cityscapes/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_root = 's3://openmmlab/datasets/segmentation/citysca...
# dataset settings dataset_type = 'CityscapesDataset' data_root = 'data/cityscapes/' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='RandomResize', scale=[(2048, 800), (2048, 1024)], keep_ratio=True), d...
# Copyright (c) OpenMMLab. All rights reserved. import ast import os.path as osp import re import warnings from typing import Tuple from mmengine.fileio import load from mmengine.utils import check_file_exist MODULE2PACKAGE = { 'mmcls': 'mmcls', 'mmdet': 'mmdet', 'mmdet3d': 'mmdet3d', 'mmseg': 'mmsegm...
# Copyright (c) OpenMMLab. All rights reserved. import ast import os.path as osp import re import warnings from typing import Tuple from mmengine.fileio import load from mmengine.utils import check_file_exist PKG2PROJECT = { 'mmcls': 'mmcls', 'mmdet': 'mmdet', 'mmdet3d': 'mmdet3d', 'mmseg': 'mmsegment...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# flake8: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LI...
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import Registry, build_from_cfg BBOX_ASSIGNERS = Registry('bbox_assigner') BBOX_SAMPLERS = Registry('bbox_sampler') BBOX_CODERS = Registry('bbox_coder') def build_assigner(cfg, **default_args): """Builder of box assigner.""" return build_from_cf...
from mmcv.utils import Registry, build_from_cfg BBOX_ASSIGNERS = Registry('bbox_assigner') BBOX_SAMPLERS = Registry('bbox_sampler') BBOX_CODERS = Registry('bbox_coder') def build_assigner(cfg, **default_args): """Builder of box assigner.""" return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args) def build...
import numpy as np import pytest from keras.src import layers from keras.src import ops from keras.src import testing class AutoContrastTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_layer(self): self.run_layer_test( layers.AutoContrast, init_kwargs={...
import numpy as np import pytest from absl.testing import parameterized from keras.src import layers from keras.src import ops from keras.src import testing class AutoContrastTest(testing.TestCase, parameterized.TestCase): @pytest.mark.requires_trainable_backend def test_layer(self): self.run_layer_t...
from typing import Literal from pydantic import SecretStr from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput from backend.integrations.providers import ProviderName JinaCredentials = APIKeyCredentials JinaCredentialsInput = CredentialsMetaInput[ Literal[ProviderName.JINA], ...
from typing import Literal from pydantic import SecretStr from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput JinaCredentials = APIKeyCredentials JinaCredentialsInput = CredentialsMetaInput[ Literal["jina"], Literal["api_key"], ] TEST_CREDENTIALS = APIKeyCredentials( ...
from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.ndarray import NdArray @_register_proto(proto_type_name='audio_ndarray') class AudioNdArray(AbstractAudioTensor, NdArray): """ Subclass of N...
from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor from docarray.typing.tensor.ndarray import NdArray @_register_proto(proto_type_name='audio_ndarray') class AudioNdArray(AbstractAudioTensor, NdArray): """ Subclass of N...
import os import urllib.parse from typing import Dict, Optional, Union from llama_index.core.base.llms.generic_utils import ( get_from_param_or_env, ) # Import SecretStr directly from pydantic # since there is not one in llama_index.core.bridge.pydantic try: from pydantic.v1 import SecretStr except ImportErr...
import os import urllib.parse from typing import Dict, Optional, Union from llama_index.core.base.llms.generic_utils import ( get_from_param_or_env, ) # Import SecretStr directly from pydantic # since there is not one in llama_index.core.bridge.pydantic try: from pydantic.v1 import SecretStr except ImportErr...
import pytest from pydantic import parse_obj_as from docarray import BaseDoc from docarray.documents import TextDoc from docarray.utils._internal.pydantic import is_pydantic_v2 @pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now") def test_simple_init(): t = TextDoc(text='hello') ...
from pydantic import parse_obj_as from docarray import BaseDoc from docarray.documents import TextDoc def test_simple_init(): t = TextDoc(text='hello') assert t.text == 'hello' def test_str_init(): t = parse_obj_as(TextDoc, 'hello') assert t.text == 'hello' def test_doc(): class MyDoc(BaseDoc...
"""Generation output schema.""" from __future__ import annotations from typing import Any, Literal, Optional from langchain_core.load import Serializable from langchain_core.utils._merge import merge_dicts class Generation(Serializable): """A single text generation output. Generation represents the respon...
"""Generation output schema.""" from __future__ import annotations from typing import Any, Literal, Optional from pydantic import computed_field from langchain_core.load import Serializable from langchain_core.utils._merge import merge_dicts class Generation(Serializable): """A single text generation output. ...
from workflows.retry_policy import RetryPolicy, ConstantDelayRetryPolicy # noqa
from typing import Protocol, Optional, runtime_checkable @runtime_checkable class RetryPolicy(Protocol): def next( self, elapsed_time: float, attempts: int, error: Exception ) -> Optional[float]: """ Decides if we should make another retry, returning the number of seconds to wait befor...
from typing import Any, Optional from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore from llama_index.storage.kvstore.couchbase import CouchbaseKVStore from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE class CouchbaseDocumentStore(KVDocumentStore): """ Couchbase ...
from typing import Any, Optional from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore from llama_index.storage.kvstore.couchbase import CouchbaseKVStore from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE class CouchbaseDocumentStore(KVDocumentStore): """ Couchbase ...
from base64 import b64encode from typing import Optional from urllib.parse import urlencode from backend.data.model import OAuth2Credentials from backend.integrations.providers import ProviderName from backend.util.request import Requests from .base import BaseOAuthHandler class NotionOAuthHandler(BaseOAuthHandler)...
from base64 import b64encode from typing import Optional from urllib.parse import urlencode from backend.data.model import OAuth2Credentials from backend.integrations.providers import ProviderName from backend.util.request import requests from .base import BaseOAuthHandler class NotionOAuthHandler(BaseOAuthHandler)...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
from typing import Optional from docarray import Document, DocumentArray from pydantic import BaseModel from uvicorn import Config, Server from jina import Gateway, __default_host__ from jina.clients.request import request_generator class DummyResponseModel(BaseModel): arg1: Optional[str] arg2: Optional[str...
import os import httpx import pytest from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.embeddings.cohere import CohereEmbedding def test_embedding_class(): emb = CohereEmbedding(api_key="token") assert isinstance(emb, BaseEmbedding) @pytest.mark.skipif( os.environ.get("C...
import os import httpx import pytest from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.embeddings.cohere import CohereEmbedding def test_embedding_class(): emb = CohereEmbedding(api_key="token") assert isinstance(emb, BaseEmbedding) @pytest.mark.skipif( os.environ.get("C...
from llama_index_instrumentation.base import BaseInstrumentationHandler # noqa
from abc import ABC, abstractmethod class BaseInstrumentationHandler(ABC): @classmethod @abstractmethod def init(cls) -> None: """Initialize the instrumentation handler."""
from .tensor import flush_ndarray, read_ndarray
from collections import defaultdict from typing import TYPE_CHECKING, Optional from google.protobuf.json_format import MessageToDict from google.protobuf.struct_pb2 import Struct from docarray.proto.io.ndarray import flush_ndarray, read_ndarray from docarray.proto.docarray_pb2 import NdArrayProto, DocumentProto if T...
# THIS FILE HAS BEEN AUTOGENERATED. To update: # 1. modify the `_deps` dict in setup.py # 2. run `make deps_table_update`` deps = { "Pillow": "Pillow>=10.0.1,<=15.0", "accelerate": "accelerate>=0.26.0", "av": "av", "beautifulsoup4": "beautifulsoup4", "blobfile": "blobfile", "codecarbon": "codeca...
# THIS FILE HAS BEEN AUTOGENERATED. To update: # 1. modify the `_deps` dict in setup.py # 2. run `make deps_table_update`` deps = { "Pillow": "Pillow>=10.0.1,<=15.0", "accelerate": "accelerate>=0.26.0", "av": "av", "beautifulsoup4": "beautifulsoup4", "blobfile": "blobfile", "codecarbon": "codeca...
from __future__ import annotations from sentence_transformers import util from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseAnglELoss(SparseCoSENTLoss): def __init__(self, model: Spars...
from __future__ import annotations from sentence_transformers import util from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseAnglELoss(SparseCoSENTLoss): def __init__(self, model: Spars...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.multion.toolkit import MultionToolkit # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling op...
from typing import TYPE_CHECKING, Any from langchain._api import create_importer if TYPE_CHECKING: from langchain_community.agent_toolkits.multion.toolkit import MultionToolkit # Create a way to dynamically look up deprecated imports. # Used to consolidate logic for raising deprecation warnings and # handling op...
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( type...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( type...
import numpy as np import pytest from docarray import BaseDoc, DocArray from docarray.documents import ImageDoc, TextDoc from docarray.typing import NdArray @pytest.mark.proto def test_simple_proto(): class CustomDoc(BaseDoc): text: str tensor: NdArray da = DocArray( [CustomDoc(text=...
import numpy as np import pytest from docarray import BaseDocument, DocumentArray from docarray.documents import ImageDoc, TextDoc from docarray.typing import NdArray @pytest.mark.proto def test_simple_proto(): class CustomDoc(BaseDocument): text: str tensor: NdArray da = DocumentArray( ...
# Copyright (c) OpenMMLab. All rights reserved. from .lr_scheduler import (ConstantLR, CosineAnnealingLR, ExponentialLR, LinearLR, MultiStepLR, PolyLR, StepLR) from .momentum_scheduler import (ConstantMomentum, CosineAnnealingMomentum, ExponentialMomentum, Lin...
# Copyright (c) OpenMMLab. All rights reserved. from .lr_scheduler import (ConstantLR, CosineAnnealingLR, ExponentialLR, LinearLR, MultiStepLR, StepLR) from .momentum_scheduler import (ConstantMomentum, CosineAnnealingMomentum, ExponentialMomentum, LinearMomen...
_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py' model = dict( # ResNeXt-101-32x8d model trained with Caffe2 at FB, # so the mean and std need to be changed. data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], ...
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( # ResNeXt-101-32x8d model trained with Caffe2 at FB, # so the mean and std need to be changed. data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], ...
_base_ = 'ssd300_coco.py' input_size = 512 model = dict( neck=dict( out_channels=(512, 1024, 512, 256, 256, 256, 256), level_strides=(2, 2, 2, 2, 1), level_paddings=(1, 1, 1, 1, 1), last_kernel_size=4), bbox_head=dict( in_channels=(512, 1024, 512, 256, 256, 256, 256), ...
_base_ = 'ssd300_coco.py' input_size = 512 model = dict( neck=dict( out_channels=(512, 1024, 512, 256, 256, 256, 256), level_strides=(2, 2, 2, 2, 1), level_paddings=(1, 1, 1, 1, 1), last_kernel_size=4), bbox_head=dict( in_channels=(512, 1024, 512, 256, 256, 256, 256), ...
_base_ = './cascade-rcnn_r50-caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
_base_ = './cascade_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
"""Lit configuration to drive test in this repo.""" # Copyright 2020 The OpenXLA Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # ...
"""Lit configuration to drive test in this repo.""" # Copyright 2020 The OpenXLA Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # ...
from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.ndarray import NdArray MAX_INT_16 = 2**15 @_register_proto(proto_type_name='image_ndarray') class ImageNdArray(AbstractImageTensor, NdArray): "...
from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor from docarray.typing.tensor.ndarray import NdArray MAX_INT_16 = 2**15 @_register_proto(proto_type_name='image_ndarray') class ImageNdArray(AbstractImageTensor, NdArray): "...
from keras.src import backend from keras.src.api_export import keras_export from keras.src.backend.common import KerasVariable if backend.backend() == "tensorflow": BackendVariable = backend.tensorflow.core.Variable backend_name_scope = backend.tensorflow.core.name_scope elif backend.backend() == "jax": Ba...
from keras.src import backend from keras.src.api_export import keras_export if backend.backend() == "tensorflow": BackendVariable = backend.tensorflow.core.Variable backend_name_scope = backend.tensorflow.core.name_scope elif backend.backend() == "jax": BackendVariable = backend.jax.core.Variable backe...
# training schedule for 2x train_cfg = dict(by_epoch=True, max_epochs=24) val_cfg = dict(interval=1) test_cfg = dict() # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=24, ...
# training schedule for 1x train_cfg = dict(by_epoch=True, max_epochs=24) val_cfg = dict(interval=1) test_cfg = dict() # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=24, ...
import pytest from docarray import DocumentArray, Document from docarray.array.weaviate import DocumentArrayWeaviate import numpy as np @pytest.fixture() def docs(): return DocumentArray([Document(id=f'{i}') for i in range(1, 10)]) @pytest.mark.parametrize( 'to_delete', [ 0, 1, ...
import pytest from docarray import DocumentArray, Document from docarray.array.weaviate import DocumentArrayWeaviate import numpy as np @pytest.fixture() def docs(): return DocumentArray([Document(id=f'{i}') for i in range(1, 10)]) @pytest.mark.parametrize( 'to_delete', [ 0, 1, ...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import os from logging import getLogger from typing import List from sentencepiece import SentencePieceProcessor logger = getLogger() class Tokenizer:...
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import os from logging import getLogger from typing import List from sentencepiece import SentencePieceProcessor logger = getLogger() class Tokenizer:...
__version__ = "2.6.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" from .datasets import SentencesDataset, ParallelSentencesDataset from .LoggingHandler import LoggingHandler from .SentenceTransformer import SentenceTransformer from .readers import InputExample from .cross_encoder.CrossEncoder import Cross...
__version__ = "2.6.0.dev0" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" from .datasets import SentencesDataset, ParallelSentencesDataset from .LoggingHandler import LoggingHandler from .SentenceTransformer import SentenceTransformer from .readers import InputExample from .cross_encoder.CrossEncoder import Cross...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import cv2 import mmcv from mmdet.apis import inference_detector, init_detector def parse_args(): parser = argparse.ArgumentParser(description='MMDetection video demo') parser.add_argument('video', help='Video file') parser.add_argument('co...
import argparse import cv2 import mmcv from mmdet.apis import inference_detector, init_detector def parse_args(): parser = argparse.ArgumentParser(description='MMDetection video demo') parser.add_argument('video', help='Video file') parser.add_argument('config', help='Config file') parser.add_argume...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
""" Top-level module of Jina. The primary function of this module is to import all of the public Jina interfaces into a single place. The interfaces themselves are located in sub-modules, as described below. """ import os as _os import platform as _platform import signal as _signal import sys as _sys import warnings...
import argparse import pathlib import re import shutil def main(args): if args.scala_version == "2.12": scala_ver = "2.12" scala_patchver = "2.12.18" elif args.scala_version == "2.13": scala_ver = "2.13" scala_patchver = "2.13.11" else: raise ValueError(f"Unsupporte...
import argparse import pathlib import re import shutil def main(args): if args.scala_version == "2.12": scala_ver = "2.12" scala_patchver = "2.12.18" elif args.scala_version == "2.13": scala_ver = "2.13" scala_patchver = "2.13.11" else: raise ValueError(f"Unsupporte...
import asyncio from typing import Any, AsyncGenerator, Optional from llama_index.core.workflow.context import Context from llama_index.core.workflow.errors import WorkflowDone from llama_index.core.workflow.events import Event, StopEvent from .types import RunResultT from .utils import BUSY_WAIT_DELAY class Workflo...
import asyncio from typing import Any, AsyncGenerator, Optional from llama_index.core.workflow.context import Context from llama_index.core.workflow.errors import WorkflowDone from llama_index.core.workflow.events import Event, StopEvent from .utils import BUSY_WAIT_DELAY class WorkflowHandler(asyncio.Future): ...
import itertools import os.path import pytest import requests as req from docarray import Document, DocumentArray from jina import Client, Executor, Flow, requests from jina.helper import random_port PROTOCOLS = ['grpc', 'http', 'websocket'] cur_dir = os.path.dirname(__file__) class MyExecutor(Executor): @requ...
import itertools import os.path import pytest from docarray import Document, DocumentArray from jina import Client, Executor, Flow, requests from jina.helper import random_port PROTOCOLS = ['grpc', 'http', 'websocket'] cur_dir = os.path.dirname(__file__) class MyExecutor(Executor): @requests def foo(self, ...
from typing import Optional from typing_extensions import Protocol, runtime_checkable from torch.distributed._state_dict_utils import _copy_state_dict, _create_cpu_state_dict from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE __all__ = ["AsyncStager", "BlockingAsyncStager"] @runtime_checkable class ...
from typing import Optional from typing_extensions import Protocol, runtime_checkable from torch.distributed._state_dict_utils import _copy_state_dict, _create_cpu_state_dict from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE __all__ = ["AsyncStager", "BlockingAsyncStager"] @runtime_checkable class ...
# dataset settings dataset_type = 'CityscapesDataset' # TODO remove it after cityscape metric # data_root = '/mnt/lustre/luochunhua.vendor/openmmlab2.0/data/cityscapes/' data_root = 'data/cityscapes/' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True...
# dataset settings dataset_type = 'CityscapesDataset' data_root = 'data/cityscapes/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( ...
from torchvision.transforms import InterpolationMode # usort: skip from ._utils import is_pure_tensor, register_kernel # usort: skip from ._meta import ( clamp_bounding_boxes, convert_bounding_box_format, get_dimensions_image, _get_dimensions_image_pil, get_dimensions_video, get_dimensions, ...
from torchvision.transforms import InterpolationMode # usort: skip from ._utils import is_pure_tensor, register_kernel # usort: skip from ._meta import ( clamp_bounding_boxes, convert_bounding_box_format, get_dimensions_image, _get_dimensions_image_pil, get_dimensions_video, get_dimensions, ...
"""Implementations of key-value stores and storage helpers. Module provides implementations of various key-value stores that conform to a simple key-value interface. The primary goal of these storages is to support implementation of caching. """ from typing import TYPE_CHECKING, Any from langchain_core.stores impor...
"""Implementations of key-value stores and storage helpers. Module provides implementations of various key-value stores that conform to a simple key-value interface. The primary goal of these storages is to support implementation of caching. """ from typing import TYPE_CHECKING, Any from langchain_core.stores impor...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os from mmengine import MMLogger from mmengine.config import Config, DictAction from mmengine.dist import init_dist from mmengine.utils import mkdir_or_exist from mmdet.utils import register_all_modules from mmdet.utils.benchmark import (DataLoade...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import mmcv from mmcv import Config, DictAction from mmengine import MMLogger from mmengine.dist import init_dist from mmdet.utils import register_all_modules from mmdet.utils.benchmark import (DataLoaderBenchmark, DatasetBenchmark, ...
# Copyright (c) OpenMMLab. All rights reserved. from .csp_darknet import CSPDarknet from .cspnext import CSPNeXt from .darknet import Darknet from .detectors_resnet import DetectoRS_ResNet from .detectors_resnext import DetectoRS_ResNeXt from .efficientnet import EfficientNet from .hourglass import HourglassNet from .h...
# Copyright (c) OpenMMLab. All rights reserved. from .csp_darknet import CSPDarknet from .darknet import Darknet from .detectors_resnet import DetectoRS_ResNet from .detectors_resnext import DetectoRS_ResNeXt from .efficientnet import EfficientNet from .hourglass import HourglassNet from .hrnet import HRNet from .mobil...
_base_ = [ '../_base_/models/faster-rcnn_r50-caffe-c4.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ]
_base_ = [ '../_base_/models/faster-rcnn_r50-caffe-c4.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ ...
import os from typing import Optional, Type import orjson from pydantic import BaseModel, Field from rich.console import Console from docarray.base_document.base_node import BaseNode from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode from docarray.base_document.mixins import IOMixin, Upd...
import os from typing import Type import orjson from pydantic import BaseModel, Field, parse_obj_as from rich.console import Console from docarray.base_document.base_node import BaseNode from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode from docarray.base_document.mixins import IOMixin,...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.api import _tf_keras from keras.api import activations from keras.api import applications from keras.api import backend from keras.api import callbacks from keras.api import config from k...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.api import _tf_keras from keras.api import activations from keras.api import applications from keras.api import backend from keras.api import callbacks from keras.api import config from k...
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3 from .source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX, SourceSeparationBundle __all__ = [ "CONVTASNET_BASE_LIBRI2MIX", "EMFORMER_RNNT_BASE_MUSTC", "EMFORMER_RNNT_BASE_TEDLIUM3", "SourceSeparationBundle", ]
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3 from .source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX __all__ = [ "CONVTASNET_BASE_LIBRI2MIX", "EMFORMER_RNNT_BASE_MUSTC", "EMFORMER_RNNT_BASE_TEDLIUM3", ]
from typing import Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper class CashFlowStatementsSchema(BaseModel): """...
from typing import Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool from pydantic import BaseModel, Field from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper class CashFlowStatementsSchema(BaseModel): """...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] lang_model_name = 'bert-base-uncased' model = dict( type='GroundingDINO', num_queries=900, with_box_refine=True, as_two_stage=True, data_preprocessor=dict( type...
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] lang_model_name = 'bert-base-uncased' model = dict( type='GroundingDINO', num_queries=900, with_box_refine=True, as_two_stage=True, data_preprocessor=dict( type...
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip from . import functional # usort: skip from ._transform import Transform # usort: skip from ._augment import CutMix, JPEG, MixUp, RandomErasing from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide from...
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip from . import functional # usort: skip from ._transform import Transform # usort: skip from ._augment import CutMix, MixUp, RandomErasing from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide from ._col...
"""Evaluator.""" from abc import abstractmethod from typing import Any, Optional, Sequence from llama_index.core.async_utils import asyncio_run from llama_index.core.base.response.schema import Response from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.prompts.mixin import PromptMixi...
"""Evaluator.""" from abc import abstractmethod from typing import Any, Optional, Sequence from llama_index.core.async_utils import asyncio_run from llama_index.core.base.response.schema import Response from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.prompts.mixin import PromptMixin...
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( data_preprocessor=dict(pad_size_divisor=64), neck=dict( type='FPN_CARAFE', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5, start_level=0, end_level=-1, norm_cfg=None, ...
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( data_preprocessor=dict(pad_size_divisor=64), neck=dict( type='FPN_CARAFE', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5, start_level=0, end_level=-1, norm_cfg=None, ...