input
stringlengths
33
5k
output
stringlengths
32
5k
# dataset settings dataset_type = 'CocoPanopticDataset' data_root = 'data/coco/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_root = 's3://openmmlab/datasets/detection/coco/' # Meth...
# dataset settings dataset_type = 'CocoPanopticDataset' # data_root = 'data/coco/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) data_root = 's3://openmmlab/datasets/detection/coco/' # Meth...
from __future__ import annotations try: from typing import Self except ImportError: from typing_extensions import Self import torch import transformers from PIL import Image from sentence_transformers.models.Router import InputModule class CLIPModel(InputModule): save_in_root: bool = True def __in...
from __future__ import annotations try: from typing import Self except ImportError: from typing_extensions import Self import torch import transformers from PIL import Image from sentence_transformers.models.Asym import InputModule class CLIPModel(InputModule): save_in_root: bool = True def __init...
"""Code to help indexing data into a vectorstore. This package contains helper logic to help deal with indexing data into a vectorstore while avoiding duplicated content and over-writing content if it's unchanged. """ from importlib import import_module from typing import TYPE_CHECKING if TYPE_CHECKING: from lan...
"""Code to help indexing data into a vectorstore. This package contains helper logic to help deal with indexing data into a vectorstore while avoiding duplicated content and over-writing content if it's unchanged. """ from langchain_core.indexing.api import IndexingResult, aindex, index from langchain_core.indexing.b...
#!/usr/bin/env python3 """This is the preprocessing script for HuBERT model training. The script includes: - File list creation - MFCC/HuBERT feature extraction - KMeans clustering model training - Pseudo-label generation """ import logging from argparse import ArgumentParser, RawTextHelpFormatter from ...
#!/usr/bin/env python3 """This is the preprocessing script for HuBERT model training. The script includes: - File list creation - MFCC/HuBERT feature extraction - KMeans clustering model training - Pseudo-label generation """ import logging from argparse import ArgumentParser, RawTextHelpFormatter from ...
# Owner(s): ["module: inductor"] import ctypes import torch from torch._inductor.async_compile import AsyncCompile from torch._inductor.codecache import CUDACodeCache from torch._inductor.codegen.cuda.cuda_env import nvcc_exist from torch._inductor.exc import CUDACompileError from torch._inductor.test_case import Tes...
# Owner(s): ["module: inductor"] import ctypes import unittest import torch from torch._inductor import config from torch._inductor.async_compile import AsyncCompile from torch._inductor.codecache import CUDACodeCache from torch._inductor.codegen.cuda.cuda_env import nvcc_exist from torch._inductor.exc import CUDACom...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core import ConfigType, OptConfigType, OptMultiConfig from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class AutoAssign(SingleStageDetector): """Implementation of `AutoAssign: Differentiable Label A...
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.registry import MODELS from .single_stage import SingleStageDetector @MODELS.register_module() class AutoAssign(SingleStageDetector): """Implementation of `AutoAssign: Differentiable Label Assignment for Dense Object Detection <https://arxiv.org/abs/2...
from __future__ import annotations from typing import Any, Optional, Union import torch from ._datapoint import Datapoint class Video(Datapoint): """[BETA] :class:`torch.Tensor` subclass for videos. Args: data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`. ...
from __future__ import annotations from typing import Any, Optional, Union import torch from ._datapoint import Datapoint class Video(Datapoint): """[BETA] :class:`torch.Tensor` subclass for videos. Args: data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`. ...
from __future__ import annotations from collections.abc import Iterable import torch from torch import Tensor, nn from sentence_transformers import SentenceTransformer, util class DistillKLDivLoss(nn.Module): # TODO def __init__(self, model: SentenceTransformer, similarity_fct=util.pairwise_dot_score) -> ...
from __future__ import annotations from collections.abc import Iterable import torch from torch import Tensor, nn from sentence_transformers import SentenceTransformer, util class DistillKLDivLoss(nn.Module): # TODO def __init__(self, model: SentenceTransformer, similarity_fct=util.pairwise_dot_score) -> ...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp import mmcv import numpy as np from mmcv import Config, DictAction from mmdet.datasets.builder import build_dataset from mmdet.models.utils import mask2ndarray from mmdet.registry import VISUALIZERS from mmdet.utils import register_...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp import mmcv import numpy as np from mmcv import Config, DictAction from mmdet.core.utils import mask2ndarray from mmdet.datasets.builder import build_dataset from mmdet.registry import VISUALIZERS from mmdet.utils import register_al...
"""Create a key-value store for any langchain serializable object.""" from typing import Callable, Optional from langchain_core.documents import Document from langchain_core.load import Serializable, dumps, loads from langchain_core.stores import BaseStore, ByteStore from langchain.storage.encoder_backed import Enco...
"""Create a key-value store for any langchain serializable object.""" from typing import Callable, Optional from langchain_core.documents import Document from langchain_core.load import Serializable, dumps, loads from langchain_core.stores import BaseStore, ByteStore from langchain.storage.encoder_backed import Enco...
from typing import Any, Dict import torch from torchvision.transforms.v2 import functional as F, Transform class UniformTemporalSubsample(Transform): """Uniformly subsample ``num_samples`` indices from the temporal dimension of the video. Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` ...
from typing import Any, Dict import torch from torchvision.transforms.v2 import functional as F, Transform class UniformTemporalSubsample(Transform): """Uniformly subsample ``num_samples`` indices from the temporal dimension of the video. Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` ...
# Copyright (c) OpenMMLab. All rights reserved. import unittest import torch from mmengine.config import Config from mmdet.data_elements import DetDataSample from mmdet.models.seg_heads.panoptic_fusion_heads import MaskFormerFusionHead class TestMaskFormerFusionHead(unittest.TestCase): def test_loss(self): ...
# Copyright (c) OpenMMLab. All rights reserved. import unittest import torch from mmengine.config import Config from mmdet.core.data_structures import DetDataSample from mmdet.models.seg_heads.panoptic_fusion_heads import MaskFormerFusionHead class TestMaskFormerFusionHead(unittest.TestCase): def test_loss(sel...
import os from typing import Optional import pytest from docarray import BaseDoc, DocList from docarray.documents import ImageDoc from tests import TOYDATA_DIR @pytest.fixture() def nested_doc_cls(): class MyDoc(BaseDoc): count: Optional[int] text: str class MyDocNested(MyDoc): imag...
import os from typing import Optional import pytest from docarray import BaseDoc, DocList from docarray.documents import ImageDoc from tests import TOYDATA_DIR @pytest.fixture() def nested_doc_cls(): class MyDoc(BaseDoc): count: Optional[int] text: str class MyDocNested(MyDoc): imag...
""" This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file. CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder. Usage: python train_ct_from_file.py path/to/sentences.txt """ import gzip import...
""" This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file. CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder. Usage: python train_ct_from_file.py path/to/sentences.txt """ import math from se...
from typing import Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='VideoNdArray') @_register_p...
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union import numpy as np from docarray.typing.proto_register import _register_proto from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin T = TypeVar('T', bound='VideoNdArray')...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
from typing import Optional import pandas as pd import pytest from docarray import BaseDoc, DocList from docarray.documents import ImageDoc @pytest.fixture() def nested_doc_cls(): class MyDoc(BaseDoc): count: Optional[int] text: str class MyDocNested(MyDoc): image: ImageDoc ret...
from typing import Optional import pandas as pd import pytest from docarray import BaseDoc, DocList from docarray.documents import ImageDoc @pytest.fixture() def nested_doc_cls(): class MyDoc(BaseDoc): count: Optional[int] text: str class MyDocNested(MyDoc): image: ImageDoc ret...
from typing import List import torch import torchaudio.prototype.transforms as T from torch.autograd import gradcheck, gradgradcheck from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin class Autograd(TestBaseMixin): def assert_grad( self, tra...
from typing import List import torch import torchaudio.prototype.transforms as T from torch.autograd import gradcheck, gradgradcheck from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin class Autograd(TestBaseMixin): def assert_grad( self, tra...
# Copyright (c) OpenMMLab. All rights reserved. from .cityscapes_metric import CityScapesMetric from .coco_metric import CocoMetric from .coco_occluded_metric import CocoOccludedSeparatedMetric from .coco_panoptic_metric import CocoPanopticMetric from .crowdhuman_metric import CrowdHumanMetric from .dump_det_results im...
# Copyright (c) OpenMMLab. All rights reserved. from .cityscapes_metric import CityScapesMetric from .coco_metric import CocoMetric from .coco_panoptic_metric import CocoPanopticMetric from .crowdhuman_metric import CrowdHumanMetric from .dump_proposals_metric import DumpProposals from .lvis_metric import LVISMetric fr...
import re from typing import Any from langchain.evaluation.schema import StringEvaluator class RegexMatchStringEvaluator(StringEvaluator): """Compute a regex match between the prediction and the reference. Examples ---------- >>> evaluator = RegexMatchStringEvaluator(flags=re.IGNORECASE) >>> eva...
import re from typing import Any from langchain.evaluation.schema import StringEvaluator class RegexMatchStringEvaluator(StringEvaluator): """Compute a regex match between the prediction and the reference. Examples ---------- >>> evaluator = RegexMatchStringEvaluator(flags=re.IGNORECASE) >>> eva...
import shutil import pytest import os import numpy as np import PIL.Image as Image from jina import DocumentArray, Document from ...big_transfer import BigTransferEncoder directory = os.path.dirname(os.path.realpath(__file__)) def test_initialization_and_model_download(): shutil.rmtree('pretrained', ignore_er...
import shutil import pytest import os import numpy as np import PIL.Image as Image from jina import DocumentArray, Document from jinahub.image.encoder.big_transfer import BigTransferEncoder directory = os.path.dirname(os.path.realpath(__file__)) def test_initialization_and_model_download(): shutil.rmtree('pre...
"""scrapegraph tool specification module for web scraping operations.""" from typing import Dict, List, Optional from pydantic import BaseModel from scrapegraph_py import Client from llama_index.core.tools.tool_spec.base import BaseToolSpec class ScrapegraphToolSpec(BaseToolSpec): """scrapegraph tool specifica...
"""scrapegraph tool specification module for web scraping operations.""" from typing import Dict, List, Optional from pydantic import BaseModel from scrapegraph_py import Client from llama_index.core.tools.tool_spec.base import BaseToolSpec class ScrapegraphToolSpec(BaseToolSpec): """scrapegraph tool specifica...
from docarray.typing.bytes.audio_bytes import AudioBytes from docarray.typing.bytes.image_bytes import ImageBytes from docarray.typing.bytes.video_bytes import VideoBytes __all__ = ['ImageBytes', 'VideoBytes', 'AudioBytes']
from docarray.typing.bytes.image_bytes import ImageBytes __all__ = ['ImageBytes']
from llama_index_instrumentation.events.span import SpanDropEvent # noqa
from llama_index.core.instrumentation.events.base import BaseEvent class SpanDropEvent(BaseEvent): """ SpanDropEvent. Args: err_str (str): Error string. """ err_str: str @classmethod def class_name(cls) -> str: """Class name.""" return "SpanDropEvent"
from .bifpn import BiFPN from .efficientdet import EfficientDet from .efficientdet_head import EfficientDetSepBNHead from .huber_loss import HuberLoss from .tensorflow.anchor_generator import YXYXAnchorGenerator from .tensorflow.coco_90class import Coco90Dataset from .tensorflow.coco_90metric import Coco90Metric from ....
from .anchor_generator import YXYXAnchorGenerator from .bifpn import BiFPN from .coco_90class import Coco90Dataset from .coco_90metric import Coco90Metric from .efficientdet import EfficientDet from .efficientdet_head import EfficientDetSepBNHead from .trans_max_iou_assigner import TransMaxIoUAssigner from .yxyx_bbox_c...
""" This script finds the person responsible for labeling a PR by a commit SHA. It is used by the workflow in '.github/workflows/pr-labels.yml'. Note: we only ping the person who pulls the pr, not the reviewers, as the reviewers can sometimes be external to torchaudio with no labeling responsibility, so we don't want t...
""" This script finds the person responsible for labeling a PR by a commit SHA. It is used by the workflow in '.github/workflows/pr-labels.yml'. Note: we only ping the person who pulls the pr, not the reviewers, as the reviewers can sometimes be external to torchaudio with no labeling responsibility, so we don't want t...
"""Test in memory docstore.""" from langchain.output_parsers.regex_dict import RegexDictParser DEF_EXPECTED_RESULT = {"action": "Search", "action_input": "How to use this class?"} DEF_OUTPUT_KEY_TO_FORMAT = {"action": "Action", "action_input": "Action Input"} DEF_README = """We have just received a new result from ...
"""Test in memory docstore.""" from langchain.output_parsers.regex_dict import RegexDictParser DEF_EXPECTED_RESULT = {"action": "Search", "action_input": "How to use this class?"} DEF_OUTPUT_KEY_TO_FORMAT = {"action": "Action", "action_input": "Action Input"} DEF_README = """We have just received a new result from ...
# Copyright (c) OpenMMLab. All rights reserved. from .assigners import * # noqa: F401,F403 from .builder import (ANCHOR_GENERATORS, BBOX_ASSIGNERS, BBOX_CODERS, BBOX_SAMPLERS, IOU_CALCULATORS, MATCH_COSTS, PRIOR_GENERATORS, build_anchor_generator, build_assigner, ...
# Copyright (c) OpenMMLab. All rights reserved. from .assigners import * # noqa: F401,F403 from .builder import (ANCHOR_GENERATORS, BBOX_ASSIGNERS, BBOX_CODERS, BBOX_SAMPLERS, IOU_CALCULATORS, MATCH_COSTS, PRIOR_GENERATORS, build_anchor_generator, build_assigner, ...
import os import numpy as np import pytest import keras from keras.src import testing from keras.src.saving.file_editor import KerasFileEditor def get_source_model(): inputs = keras.Input((2,)) x = keras.layers.Dense(3, name="mydense")(inputs) outputs = keras.layers.Dense(3, name="output_layer")(x) ...
import os import numpy as np import pytest import keras from keras.src import testing from keras.src.saving.file_editor import KerasFileEditor def get_source_model(): inputs = keras.Input((2,)) x = keras.layers.Dense(3, name="mydense")(inputs) outputs = keras.layers.Dense(3, name="output_layer")(x) ...
from llama_index.core.llms import LLM from llama_index.multi_modal_llms.ollama import OllamaMultiModal def test_class(): names_of_base_classes = [b.__name__ for b in OllamaMultiModal.__mro__] assert LLM.__name__ in names_of_base_classes
from llama_index.core.multi_modal_llms.base import MultiModalLLM from llama_index.multi_modal_llms.ollama import OllamaMultiModal def test_class(): names_of_base_classes = [b.__name__ for b in OllamaMultiModal.__mro__] assert MultiModalLLM.__name__ in names_of_base_classes
# Copyright (c) OpenMMLab. All rights reserved. from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS, AmpOptimWrapper, ApexOptimWrapper, BaseOptimWrapper, DeepSpeedOptimWrapper, DefaultOptimWrapperConstructor, OptimWrapper, OptimWrapperDi...
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.utils import is_installed from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS, AmpOptimWrapper, ApexOptimWrapper, BaseOptimWrapper, DefaultOptimWrapperConstructor, OptimWrapper, ...
from typing import Optional import numpy as np import torch from docarray import DocumentArray from docarray.document import BaseDocument from docarray.typing import NdArray, TorchTensor def test_proto_simple(): class CustomDoc(BaseDocument): text: str doc = CustomDoc(text='hello') CustomDoc.f...
from typing import Optional import numpy as np import torch from docarray import DocumentArray from docarray.document import BaseDocument from docarray.typing import Tensor, TorchTensor def test_proto_simple(): class CustomDoc(BaseDocument): text: str doc = CustomDoc(text='hello') CustomDoc.fr...
""" This example starts multiple processes (1 per GPU), which encode sentences in parallel. This gives a near linear speed-up when encoding large text collections. """ import logging from sentence_transformers import LoggingHandler, SentenceTransformer logging.basicConfig( format="%(asctime)s - %(message)s", dat...
""" This example starts multiple processes (1 per GPU), which encode sentences in parallel. This gives a near linear speed-up when encoding large text collections. """ from sentence_transformers import SentenceTransformer, LoggingHandler import logging logging.basicConfig( format="%(asctime)s - %(message)s", date...
import enum import pathlib from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper from torchvision.prototype.datapoints import Label from torchvision.prototype.datasets.utils import Data...
import enum import pathlib from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource fro...
""" This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix) and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages. The WikiMatrix mined parallel sentences from Wikipedia in various languages. ...
""" This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix) and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages. The WikiMatrix mined parallel sentences from Wikipedia in various languages. ...
"""langchain-core version information and utilities.""" VERSION = "0.3.56"
"""langchain-core version information and utilities.""" VERSION = "0.3.56rc1"
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py' # dataset settings # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') input_si...
_base_ = './yolov3_d53_mstrain-608_273e_coco.py' # dataset settings # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') input_si...
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( # use caffe img_norm data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False, pad_size_divisor=32), backbone=dict( norm_cfg=dict(require...
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' preprocess_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False, pad_size_divisor=32) model = dict( # use caffe img_norm preprocess_cfg=preprocess_cfg, backbone=dict( norm_cfg=dict(requires_grad=False), styl...
import os import torch def average_checkpoints(last): avg = None for path in last: states = torch.load(path, map_location=lambda storage, loc: storage)["state_dict"] if avg is None: avg = states else: for k in avg.keys(): avg[k] += states[k] ...
import os import torch def average_checkpoints(last): avg = None for path in last: states = torch.load(path, map_location=lambda storage, loc: storage)["state_dict"] if avg is None: avg = states else: for k in avg.keys(): avg[k] += states[k] ...
from typing import Iterable, Type from docarray.array.abstract_array import AbstractDocumentArray from docarray.array.mixins import GetAttributeArrayMixin, ProtoArrayMixin from docarray.document import AnyDocument, BaseDocument, BaseNode from docarray.document.abstract_document import AbstractDocument class Document...
from typing import Iterable, Type from docarray.document import AnyDocument, BaseDocument, BaseNode from docarray.document.abstract_document import AbstractDocument from .abstract_array import AbstractDocumentArray from .mixins import ProtoArrayMixin class DocumentArray( list, ProtoArrayMixin, AbstractD...
import unittest import torch import torchaudio.prototype.functional as F from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script class TorchScriptConsistencyTestImpl(TestBaseMixin): def _assert_consistency(self, func, inputs, shape_only=False): inputs_ = [] for i i...
import unittest import torch import torchaudio.prototype.functional as F from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script class TorchScriptConsistencyTestImpl(TestBaseMixin): def _assert_consistency(self, func, inputs, shape_only=False): inputs_ = [] for i i...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union from docarray.base_doc import BaseDoc from docarray.typing.tensor.tensor import AnyTensor from docarray.utils._internal.misc import import_library T = TypeVar('T', bound='VerticesAndFaces') class VerticesAndFaces(BaseDoc): """ Document for handling...
""" Sphinx Read the Docs theme. From https://github.com/ryan-roemer/sphinx-bootstrap-theme. """ from os import path import sphinx __version__ = "0.5.0" __version_full__ = __version__ def get_html_theme_path(): """Return list of HTML theme paths.""" cur_dir = path.abspath(path.dirname(path.dirname(__file__...
""" Sphinx Read the Docs theme. From https://github.com/ryan-roemer/sphinx-bootstrap-theme. """ from os import path import sphinx __version__ = '0.5.0' __version_full__ = __version__ def get_html_theme_path(): """Return list of HTML theme paths.""" cur_dir = path.abspath(path.dirname(path.dirname(__file_...
# Licensed to the LF AI & Data foundation under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the "License"); # you may not use this fil...
import numpy as np from docarray import BaseDoc from docarray.typing import NdArray def test_tensor_ops(): class A(BaseDoc): tensor: NdArray[3, 224, 224] class B(BaseDoc): tensor: NdArray[3, 112, 224] tensor = A(tensor=np.ones((3, 224, 224))).tensor tensord = A(tensor=np.ones((3, 22...
from typing import Dict, Iterable import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer from .ContrastiveLoss import SiameseDistanceMetric class OnlineContrastiveLoss(nn.Module): def __init__( self, model: SentenceTransfor...
from typing import Dict, Iterable import torch.nn.functional as F from torch import Tensor, nn from sentence_transformers.SentenceTransformer import SentenceTransformer from .ContrastiveLoss import SiameseDistanceMetric class OnlineContrastiveLoss(nn.Module): def __init__( self, model: SentenceTransfor...
from __future__ import annotations import re from typing import Optional from langchain_core.output_parsers import BaseOutputParser class RegexParser(BaseOutputParser[dict[str, str]]): """Parse the output of an LLM call using a regex.""" @classmethod def is_lc_serializable(cls) -> bool: return ...
from __future__ import annotations import re from typing import Optional from langchain_core.output_parsers import BaseOutputParser class RegexParser(BaseOutputParser[dict[str, str]]): """Parse the output of an LLM call using a regex.""" @classmethod def is_lc_serializable(cls) -> bool: return ...
"""Test in memory indexer.""" from collections.abc import AsyncGenerator, Generator import pytest from langchain_tests.integration_tests.indexer import ( AsyncDocumentIndexTestSuite, DocumentIndexerTestSuite, ) from langchain_core.documents import Document from langchain_core.indexing.base import DocumentInd...
"""Test in memory indexer.""" from collections.abc import AsyncGenerator, Generator import pytest from langchain_tests.integration_tests.indexer import ( AsyncDocumentIndexTestSuite, DocumentIndexerTestSuite, ) from langchain_core.documents import Document from langchain_core.indexing.base import DocumentInd...
import asyncio import time from multiprocessing import Event, Process import aiohttp import pytest from jina import DocumentArray, Executor, Flow, requests from jina.types.request.data import DataRequest from jina.helper import random_port INPUT_DA_LEN = 2 NUM_CLIENTS = 3 @pytest.fixture() def gateway_port(): p...
import asyncio import time from multiprocessing import Event, Process import aiohttp import pytest from jina import DocumentArray, Executor, Flow, requests from jina.types.request.data import DataRequest INPUT_DA_LEN = 2 NUM_CLIENTS = 3 GATEWAY_PORT = 12345 class DummyExecutor(Executor): @requests(on='/foo') ...
# Copyright (c) OpenMMLab. All rights reserved. from .conditional_detr_transformer import ( ConditionalDetrTransformerDecoder, ConditionalDetrTransformerDecoderLayer) from .deformable_detr_transformer import ( DeformableDetrTransformerDecoder, DeformableDetrTransformerDecoderLayer, DeformableDetrTransformer...
# Copyright (c) OpenMMLab. All rights reserved. from .conditional_detr_transformer import ( ConditionalDetrTransformerDecoder, ConditionalDetrTransformerDecoderLayer) from .deformable_detr_transformer import ( DeformableDetrTransformerDecoder, DeformableDetrTransformerDecoderLayer, DeformableDetrTransformer...
import functools import numbers from collections import defaultdict from typing import Any, Dict, Literal, Sequence, Type, TypeVar, Union from torchvision import datapoints from torchvision.datapoints._datapoint import _FillType, _FillTypeJIT from torchvision.transforms.transforms import _check_sequence_input, _setup...
import functools import numbers from collections import defaultdict from typing import Any, Dict, Literal, Sequence, Type, TypeVar, Union from torchvision import datapoints from torchvision.datapoints._datapoint import FillType, FillTypeJIT from torchvision.transforms.transforms import _check_sequence_input, _setup_a...
import os from torchaudio.datasets import librilight_limited from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase # Used to generate a unique transcript for each dummy audio file _NUMBERS = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NI...
import os from torchaudio.datasets import librilight_limited from torchaudio_unittest.common_utils import ( get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase, ) # Used to generate a unique transcript for each dummy audio file _NUMBERS = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", ...
"""Simple Reader that reads abstract of primary citation for a given PDB id.""" from typing import List from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document from llama_index.readers.pdb.utils import get_pdb_abstract class PdbAbstractReader(BaseReader): """Protein Dat...
"""Simple Reader that reads abstract of primary citation for a given PDB id.""" from typing import List from llama_index.core.readers.base import BaseReader from llama_index.core.schema import Document from llama_index.readers.pdb.utils import get_pdb_abstract class PdbAbstractReader(BaseReader): """Protein Data...
import enum from typing import Any, Callable, Dict, List, Tuple, Type, Union import PIL.Image import torch from torch import nn from torch.utils._pytree import tree_flatten, tree_unflatten from torchvision.prototype import features from torchvision.prototype.transforms._utils import _isinstance from torchvision.utils ...
import enum from typing import Any, Callable, Dict, Tuple, Type, Union import PIL.Image import torch from torch import nn from torch.utils._pytree import tree_flatten, tree_unflatten from torchvision.prototype import features from torchvision.prototype.transforms._utils import _isinstance from torchvision.utils import...
# dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_root = 's3://openmmlab/datasets/detection/coco/' # Method 2: Us...
# dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk') tra...
from docarray.documents.audio import AudioDoc from docarray.documents.image import ImageDoc from docarray.documents.mesh import Mesh3D, VerticesAndFaces from docarray.documents.point_cloud import PointCloud3D, PointsAndColors from docarray.documents.text import TextDoc from docarray.documents.video import VideoDoc __a...
from docarray.documents.audio import AudioDoc from docarray.documents.image import ImageDoc from docarray.documents.mesh import Mesh3D from docarray.documents.point_cloud import PointCloud3D from docarray.documents.text import TextDoc from docarray.documents.video import VideoDoc __all__ = ['TextDoc', 'ImageDoc', 'Aud...
import numpy as np from sentence_transformers import SparseEncoder from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling def main(): # Initialize the SPLADE model model_name = "naver/splade-cocondenser-ensembledistil" # "naver/efficient-splade-V-large-doc" # "prithivida/Spla...
import numpy as np from sentence_transformers.sparse_encoder import SparseEncoder from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling def main(): # Initialize the SPLADE model model_name = "naver/splade-cocondenser-ensembledistil" # "naver/efficient-splade-V-large-doc" # "...
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
from typing import Optional from fastapi import FastAPI, Security from fastapi.security import HTTPAuthorizationCredentials, HTTPDigest from fastapi.testclient import TestClient app = FastAPI() security = HTTPDigest(auto_error=False) @app.get("/users/me") def read_current_user( credentials: Optional[HTTPAuthor...
from typing import Optional from fastapi import FastAPI, Security from fastapi.security import HTTPAuthorizationCredentials, HTTPDigest from fastapi.testclient import TestClient app = FastAPI() security = HTTPDigest(auto_error=False) @app.get("/users/me") def read_current_user( credentials: Optional[HTTPAuthor...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
"""DO NOT EDIT. This file was autogenerated. Do not edit it by hand, since your modifications would be overwritten. """ from keras.src.ops.nn import average_pool from keras.src.ops.nn import batch_normalization from keras.src.ops.nn import binary_crossentropy from keras.src.ops.nn import categorical_crossentropy from...
from typing import Any, Dict, Optional, Union import numpy as np import PIL.Image import torch from torchvision import tv_tensors from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2._utils import is_pure_tensor class PILToTensor(Transform): """[BETA] Convert a PIL Im...
from typing import Any, Dict, Optional, Union import numpy as np import PIL.Image import torch from torchvision import tv_tensors from torchvision.transforms.v2 import functional as F, Transform from torchvision.transforms.v2._utils import is_pure_tensor class PILToTensor(Transform): """[BETA] Convert a PIL Im...
import pathlib from typing import Any, Dict, List, Tuple, Union from torchdata.datapipes.iter import IterDataPipe, Mapper from torchvision.prototype.datapoints import Label from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource from torchvision.prototype.datasets.utils._in...
import pathlib from typing import Any, Dict, List, Tuple, Union from torchdata.datapipes.iter import IterDataPipe, Mapper from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling from to...
import pytest from hubble.executor.hubio import HubIO from jina.orchestrate.pods.factory import PodFactory from jina.parsers import set_pod_parser @pytest.mark.parametrize( 'uses', ['jinaai+docker://jina-ai/DummyExecutor'] ) def test_container_pod(mocker, monkeypatch, uses): mock = mocker.Mock() def _mo...
from hubble.executor.hubio import HubIO from jina.orchestrate.pods.factory import PodFactory from jina.parsers import set_pod_parser def test_container_pod(mocker, monkeypatch): mock = mocker.Mock() def _mock_pull(self): return 'docker://jinahub/dummy_executor' monkeypatch.setattr(HubIO, 'pull'...
import os from time import time import numpy as np import pytest from docarray import BaseDocument, DocumentArray from docarray.documents import ImageDoc from docarray.typing import NdArray from docarray.utils.map import map_docs, map_docs_batch from tests.units.typing.test_bytes import IMAGE_PATHS pytestmark = [pyt...
import os from time import time import numpy as np import pytest from docarray import BaseDocument, DocumentArray from docarray.documents import Image from docarray.typing import NdArray from docarray.utils.map import map_docs, map_docs_batch from tests.units.typing.test_bytes import IMAGE_PATHS pytestmark = [pytest...
from __future__ import annotations import math from pathlib import Path import pytest from packaging.version import Version, parse from tokenizers import Tokenizer from sentence_transformers import SentenceTransformer from sentence_transformers.models.StaticEmbedding import StaticEmbedding try: import model2vec...
from __future__ import annotations import math from pathlib import Path import numpy as np import pytest from packaging.version import Version, parse from tokenizers import Tokenizer from sentence_transformers import SentenceTransformer from sentence_transformers.models.StaticEmbedding import StaticEmbedding try: ...
import multiprocessing import random import time from functools import partial import pytest from jina import Client, Document, DocumentArray, Executor, Flow, requests from jina.types.request.data import Response NUM_REQUESTS = 5 class MyExecutor(Executor): @requests(on='/ping') def ping(self, **kwargs): ...
import multiprocessing import random import time from functools import partial import pytest from jina import Client, Document, DocumentArray, Executor, Flow, requests from jina.types.request.data import Response NUM_REQUESTS = 5 class MyExecutor(Executor): @requests(on='/ping') def ping(self, **kwargs): ...
from typing import Generator, Optional import pytest from docarray import BaseDocument, DocumentArray from docarray.documents import ImageDoc from docarray.typing import ImageUrl, NdArray from docarray.utils.map import map_docs, map_docs_batch from tests.units.typing.test_bytes import IMAGE_PATHS N_DOCS = 2 def lo...
from typing import Generator, Optional import pytest from docarray import BaseDocument, DocumentArray from docarray.documents import Image from docarray.typing import ImageUrl, NdArray from docarray.utils.map import map_docs, map_docs_batch from tests.units.typing.test_bytes import IMAGE_PATHS N_DOCS = 2 def load_...
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class FSAF(SingleStageDetector): """Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_""" def __init__(self, backbone, ...
from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class FSAF(SingleStageDetector): """Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_""" def __init__(self, backbone, neck, bbox_head, ...
import enum import typing import pydantic import backend.data.graph from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash class Methods(enum.Enum): SUBSCRIBE = "subscribe" UNSUBSCRIBE = "unsubscribe" EXECUTION_EVENT = "execution_event" ERROR = "error" class WsMessage(pydantic.BaseMo...
import enum import typing import pydantic import backend.data.graph class Methods(enum.Enum): SUBSCRIBE = "subscribe" UNSUBSCRIBE = "unsubscribe" EXECUTION_EVENT = "execution_event" ERROR = "error" class WsMessage(pydantic.BaseModel): method: Methods data: typing.Dict[str, typing.Any] | li...
"""Run smoke tests""" import os from pathlib import Path from sys import platform import torch import torch.nn as nn import torchvision from torchvision.io import read_image from torchvision.models import resnet50, ResNet50_Weights SCRIPT_DIR = Path(__file__).parent def smoke_test_torchvision() -> None: print(...
"""Run smoke tests""" import os from pathlib import Path from sys import platform import torch import torch.nn as nn import torchvision from torchvision.io import read_image from torchvision.models import resnet50, ResNet50_Weights SCRIPT_DIR = Path(__file__).parent def smoke_test_torchvision() -> None: print(...
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../common/lsj-100e_coco-instance.py' ] image_size = (1024, 1024) batch_augments = [ dict(type='BatchFixedSizePad', size=image_size, pad_mask=True) ] norm_cfg = dict(type='SyncBN', requires_grad=True) # Use MMSyncBN that handles empty tensor in head. It ca...
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../common/lsj_100e_coco_instance.py' ] image_size = (1024, 1024) batch_augments = [ dict(type='BatchFixedSizePad', size=image_size, pad_mask=True) ] norm_cfg = dict(type='SyncBN', requires_grad=True) # Use MMSyncBN that handles empty tensor in head. It ca...
from docarray.typing.id import ID from docarray.typing.tensor.audio import AudioNdArray from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.tensor import AnyTensor from docarray.typing.tensor.video import Vi...
from docarray.typing.id import ID from docarray.typing.tensor.audio import AudioNdArray from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding from docarray.typing.tensor.ndarray import NdArray from docarray.typing.tensor.tensor import AnyTensor from docarray.typing.tensor.video import Vi...
""" This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file. SimCSE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder. Usage: python train_simcse_from_file.py path/to/sentences.txt """ import gzi...
""" This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file. SimCSE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder. Usage: python train_simcse_from_file.py path/to/sentences.txt """ from torch....
from __future__ import annotations import json import os import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import nn class LSTM(nn.Module): """Bidirectional LSTM running over word embeddings.""" def ...
from __future__ import annotations import json import os import torch from safetensors.torch import load_model as load_safetensors_model from safetensors.torch import save_model as save_safetensors_model from torch import nn class LSTM(nn.Module): """Bidirectional LSTM running over word embeddings.""" def ...
from __future__ import annotations from typing import Any, Dict, Optional from docarray import BaseDoc, DocArray from docarray.typing import AnyEmbedding, AnyTensor class LegacyDocument(BaseDoc): """ This Document is the LegacyDocument. It follows the same schema as in DocArray v1. It can be useful to s...
from __future__ import annotations from typing import Any, Dict, Optional from docarray import BaseDocument, DocumentArray from docarray.typing import AnyEmbedding, AnyTensor class LegacyDocument(BaseDocument): """ This Document is the LegacyDocument. It follows the same schema as in DocArray v1. It can...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine import Config from mmengine.data import InstanceData from mmdet import * # noqa from mmdet.models.dense_heads import ATSSHead class TestATSSHead(TestCase): def test_atss_head_loss(self): """Tests a...
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmengine import Config from mmengine.data import InstanceData from mmdet import * # noqa from mmdet.models.dense_heads import ATSSHead class TestATSSHead(TestCase): def test_atss_head_loss(self): """Tests a...
""" Tests the correct computation of evaluation scores from BinaryClassificationEvaluator """ from __future__ import annotations import csv import gzip import os from pathlib import Path import pytest from torch.utils.data import DataLoader from sentence_transformers import ( InputExample, SentenceTransform...
""" Tests the correct computation of evaluation scores from BinaryClassificationEvaluator """ from __future__ import annotations import csv import gzip import os from pathlib import Path from torch.utils.data import DataLoader from sentence_transformers import ( InputExample, SentenceTransformer, evalua...
# Copyright (c) OpenMMLab. All rights reserved. from .approx_max_iou_assigner import ApproxMaxIoUAssigner from .assign_result import AssignResult from .atss_assigner import ATSSAssigner from .base_assigner import BaseAssigner from .center_region_assigner import CenterRegionAssigner from .grid_assigner import GridAssign...
# Copyright (c) OpenMMLab. All rights reserved. from .approx_max_iou_assigner import ApproxMaxIoUAssigner from .assign_result import AssignResult from .atss_assigner import ATSSAssigner from .base_assigner import BaseAssigner from .center_region_assigner import CenterRegionAssigner from .grid_assigner import GridAssign...
from collections import ChainMap from typing import ( TYPE_CHECKING, Any, Dict, Iterable, MutableMapping, Type, TypeVar, Union, ) from docarray.array.list_advance_indexing import ListAdvancedIndexing from docarray.typing import NdArray from docarray.typing.tensor.abstract_tensor import ...
from collections import ChainMap from typing import ( TYPE_CHECKING, Any, Dict, Iterable, MutableMapping, Type, TypeVar, Union, ) from docarray.array.doc_vec.list_advance_indexing import ListAdvancedIndexing from docarray.typing import NdArray from docarray.typing.tensor.abstract_tensor...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '0.10.3' def parse_version_info(version_str): """Parse the version information. Args: version_str (str): version string like '0.1.0'. Returns: tuple: version information contains major, minor, micro version. """ versi...
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '0.10.2' def parse_version_info(version_str): """Parse the version information. Args: version_str (str): version string like '0.1.0'. Returns: tuple: version information contains major, minor, micro version. """ versi...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import logging import os import os.path as osp from mmengine.config import Config, DictAction from mmengine.logging import print_log from mmengine.registry import RUNNERS from mmengine.runner import Runner from mmdet.utils import setup_cache_size_limit_o...
# Copyright (c) OpenMMLab. All rights reserved. import argparse import logging import os import os.path as osp from mmengine.config import Config, DictAction from mmengine.logging import print_log from mmengine.registry import RUNNERS from mmengine.runner import Runner def parse_args(): parser = argparse.Argumen...
from typing import Any, Optional, Type, TypeVar, Union import numpy as np from docarray.base_document import BaseDocument from docarray.documents import Audio from docarray.typing import AnyEmbedding, AnyTensor from docarray.typing.tensor.abstract_tensor import AbstractTensor from docarray.typing.tensor.video.video_t...
from typing import Optional, TypeVar from docarray.base_document import BaseDocument from docarray.documents import Audio from docarray.typing import AnyEmbedding, AnyTensor from docarray.typing.tensor.video.video_tensor import VideoTensor from docarray.typing.url.video_url import VideoUrl T = TypeVar('T', bound='Vid...
import pytest from importlib.util import find_spec from llama_index.core.storage.kvstore.types import BaseKVStore from llama_index.storage.kvstore.postgres import PostgresKVStore no_packages = find_spec("psycopg2") is not None and find_spec("sqlalchemy") is not None and find_spec("asyncpg") is not None def test_class...
from llama_index.core.storage.kvstore.types import BaseKVStore from llama_index.storage.kvstore.postgres import PostgresKVStore def test_class(): names_of_base_classes = [b.__name__ for b in PostgresKVStore.__mro__] assert BaseKVStore.__name__ in names_of_base_classes
from datetime import datetime, timedelta from backend.blocks.hubspot._auth import ( HubSpotCredentials, HubSpotCredentialsField, HubSpotCredentialsInput, ) from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request impo...
from datetime import datetime, timedelta from backend.blocks.hubspot._auth import ( HubSpotCredentials, HubSpotCredentialsField, HubSpotCredentialsInput, ) from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import SchemaField from backend.util.request impo...
from fastapi import FastAPI from backend.server.middleware.security import SecurityHeadersMiddleware from .routes.v1 import v1_router external_app = FastAPI( title="AutoGPT External API", description="External API for AutoGPT integrations", docs_url="/docs", version="1.0", ) external_app.add_middlew...
from fastapi import FastAPI from .routes.v1 import v1_router external_app = FastAPI( title="AutoGPT External API", description="External API for AutoGPT integrations", docs_url="/docs", version="1.0", ) external_app.include_router(v1_router, prefix="/v1")
from typing import Any, Dict import requests from llama_index.core.base.base_query_engine import BaseQueryEngine from llama_index.core.base.response.schema import Response from llama_index.core.schema import QueryBundle class CogniswitchQueryEngine(BaseQueryEngine): def __init__(self, cs_token: str, OAI_token: s...
from typing import Any, Dict import requests from llama_index.core.base.base_query_engine import BaseQueryEngine from llama_index.core.base.response.schema import Response from llama_index.core.schema import QueryBundle class CogniswitchQueryEngine(BaseQueryEngine): def __init__(self, cs_token: str, OAI_token: s...
from typing import Any, Dict, Union import torch from torchvision import transforms as _transforms from torchvision.prototype import datapoints from torchvision.prototype.transforms import functional as F, Transform from .utils import is_simple_tensor class ConvertBoundingBoxFormat(Transform): _transformed_typ...
from typing import Any, Dict, Union import torch from torchvision import transforms as _transforms from torchvision.prototype import datapoints from torchvision.prototype.transforms import functional as F, Transform from .utils import is_simple_tensor class ConvertBoundingBoxFormat(Transform): _transformed_typ...
import os from typing import List import pytest from llama_index.core.ingestion import IngestionPipeline from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import Document, TextNode from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.vector_stores.mongodb impo...
import os from typing import List import pytest from llama_index.core.ingestion import IngestionPipeline from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import Document, TextNode from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.vector_stores.mongodb impo...
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any from sentence_transformers.evaluation import TranslationEvaluator if TYPE_CHECKING: import numpy as np from torch import Tensor from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder logger = ...
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any from sentence_transformers.evaluation import TranslationEvaluator if TYPE_CHECKING: import numpy as np from torch import Tensor from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder logger = ...
"""Test yamlOutputParser""" from enum import Enum from typing import Optional import pytest from langchain_core.exceptions import OutputParserException from pydantic import BaseModel, Field from langchain.output_parsers.yaml import YamlOutputParser class Actions(Enum): SEARCH = "Search" CREATE = "Create" ...
"""Test yamlOutputParser""" from enum import Enum from typing import Optional import pytest from langchain_core.exceptions import OutputParserException from pydantic import BaseModel, Field from langchain.output_parsers.yaml import YamlOutputParser class Actions(Enum): SEARCH = "Search" CREATE = "Create" ...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import pytest from mmengine.logging import HistoryBuffer array_method = [np.array, lambda x: x] try: import torch except ImportError: pass else: array_method.append(torch.tensor) @HistoryBuffer.register_statistics def custom_statistics(s...
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import pytest from mmengine.logging import HistoryBuffer array_method = [np.array, lambda x: x] try: import torch except ImportError: pass else: array_method.append(torch.tensor) class TestLoggerBuffer: def test_init(self): ...
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' model = dict( # use caffe img_norm data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False, pad_size_divisor=32), backbone=dict( norm_cfg=dict(require...
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' preprocess_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False, pad_size_divisor=32) model = dict( # use caffe img_norm preprocess_cfg=preprocess_cfg, backbone=dict( norm_cfg=dict(requires_grad=False), ...
from __future__ import annotations from typing import Any, Optional from langchain_core.outputs import LLMResult from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"] class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler...
from __future__ import annotations from typing import Any, Dict, List, Optional from langchain_core.outputs import LLMResult from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"] class AsyncFinalIteratorCallbackHandler(AsyncIteratorCal...
from typing import Optional from docarray.document import BaseDocument from docarray.typing import AnyTensor, Embedding, ImageUrl class Image(BaseDocument): """ Document for handling images. It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`), and an Embedding (`Image.embedding`)....
from typing import Optional from docarray.document import BaseDocument from docarray.typing import Embedding, ImageUrl, Tensor class Image(BaseDocument): """ Document for handling images. It can contain an ImageUrl (`Image.url`), a Tensor (`Image.tensor`), and an Embedding (`Image.embedding`). E...
import os from pathlib import Path from torchaudio.datasets.libritts import LIBRITTS from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase _UTTERANCE_IDS = [ [19, 198, "000000", "000000"], [26, 495, "000004", "000000"], ] _ORIGINAL_TEXT = "this ...
import os from pathlib import Path from torchaudio.datasets.libritts import LIBRITTS from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase _UTTERANCE_IDS = [ [19, 198, "000000", "000000"], [26, 495, "000004", "000000"], ] _ORIGINAL_TEXT = "this ...
import os import numpy as np import pytest from docarray import BaseDoc, DocList from docarray.documents import ImageDoc from docarray.typing import NdArray class MyDoc(BaseDoc): embedding: NdArray text: str image: ImageDoc @pytest.mark.slow @pytest.mark.parametrize( 'protocol', ['pickle-array', '...
import os import numpy as np import pytest from docarray import BaseDoc, DocArray from docarray.documents import ImageDoc from docarray.typing import NdArray class MyDoc(BaseDoc): embedding: NdArray text: str image: ImageDoc @pytest.mark.slow @pytest.mark.parametrize( 'protocol', ['pickle-array', ...
import base64 import json import pickle from abc import ABC, abstractmethod from typing import Any from pydantic import BaseModel from llama_index.core.schema import BaseComponent from .utils import import_module_from_qualified_name, get_qualified_name class BaseSerializer(ABC): @abstractmethod def serialize...
import base64 import json import pickle from abc import ABC, abstractmethod from typing import Any from pydantic import BaseModel from llama_index.core.schema import BaseComponent from .utils import import_module_from_qualified_name, get_qualified_name class BaseSerializer(ABC): @abstractmethod def serialize...
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.' __license__ = 'Apache-2.0' from typing import Callable import pytest from jina import DocumentArray from ...transform_encoder import TransformerTorchEncoder MODELS_TO_TEST = [ 'sentence-transformers/distilbert-base-nli-stsb-mean-tokens',...
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.' __license__ = 'Apache-2.0' from typing import Dict, Callable import pytest from jina import DocumentArray from ...transform_encoder import TransformerTorchEncoder MODELS_TO_TEST = [ 'sentence-transformers/distilbert-base-nli-stsb-mean...
from __future__ import annotations from collections.abc import Iterable from torch import Tensor from sentence_transformers import util from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder cla...
from __future__ import annotations from sentence_transformers import util from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder class SparseMultipleNegativesRankingLoss(MultipleNegativesRankingLo...
"""Module for argparse for Client""" def mixin_client_protocol_parser(parser): """Add the arguments for the protocol to the client parser :param parser: the parser configure """ from jina.enums import ProtocolType parser.add_argument( '--protocol', type=ProtocolType.from_string,...
"""Module for argparse for Client""" def mixin_client_protocol_parser(parser): """Add the arguments for the protocol to the client parser :param parser: the parser configure """ from jina.enums import ProtocolType parser.add_argument( '--protocol', type=ProtocolType.from_string,...
from jina import DocumentArray, Executor, requests class TestExecutor(Executor): @requests def process(self, docs: DocumentArray, **kwargs): return docs
from jina import Executor, requests, DocumentArray class TestExecutor(Executor): @requests def process(self, docs: DocumentArray, **kwargs): return docs