input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
import os
import sysconfig
from typing import Optional
from torch.utils._triton import has_triton
def enable_triton(lib_dir: Optional[str] = None) -> dict[str, str]:
"""
Enable NVSHMEM device functions for Triton. It performs a NVSHMEM
device-side initialization on the kernel module created by Triton.
... | import os
import sysconfig
from typing import Optional
from torch.utils._triton import has_triton
def enable_triton(lib_dir: Optional[str] = None) -> dict[str, str]:
"""
Enable NVSHMEM device functions for Triton. It performs a NVSHMEM
device-side initialization on the kernel module created by Triton.
... |
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import VideoDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is... | import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import VideoDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is... |
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import (AspectRatioBatchSampler,
MultiDataAspectRatioBatchSampler,
TrackAspectRatioBatchSampler)
from .class_aware_sampler import ClassAwareSampler
from .multi_data_sampler import MultiDataSampler... | # Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import (AspectRatioBatchSampler,
TrackAspectRatioBatchSampler)
from .class_aware_sampler import ClassAwareSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
from .track_img_sampler import T... |
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_pr... | from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_pr... |
import json
import os
from typing import Dict
import torch
from torch import Tensor, nn
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super(LayerNorm, self).__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: Dict[st... | import torch
from torch import Tensor
from torch import nn
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super(LayerNorm, self).__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimen... |
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):... | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):... |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... |
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .language_models import * # noqa: F401,F403
from .layers import * # noqa: F401... | # Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .layers import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
fro... |
"""Chain-of-Abstraction Output Parser."""
import asyncio
import json
import networkx as nx
import re
from collections import defaultdict
from typing import Dict, Tuple
from llama_index.core.tools import AsyncBaseTool, ToolOutput
from llama_index.core.types import BaseOutputParser
class ChainOfAbstractionParser(Base... | """Chain-of-Abstraction Output Parser."""
import asyncio
import json
import networkx as nx
import re
from collections import defaultdict
from typing import Dict, Tuple
from llama_index.core.tools import AsyncBaseTool, ToolOutput
from llama_index.core.types import BaseOutputParser
class ChainOfAbstractionParser(Base... |
from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar
from torch.utils.data import Dataset
from docarray import BaseDoc, DocList, DocVec
from docarray.typing import TorchTensor
from docarray.utils._internal._typing import change_cls_name
T_doc = TypeVar('T_doc', bound=BaseDoc)
class MultiModalD... | from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar
from torch.utils.data import Dataset
from docarray import BaseDoc, DocList, DocVec
from docarray.typing import TorchTensor
from docarray.utils._internal._typing import change_cls_name
T_doc = TypeVar('T_doc', bound=BaseDoc)
class MultiModalD... |
"""Defines utilities for switching audio backends"""
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
"set_aud... | """Defines utilities for switching audio backends"""
import os
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
... |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... | """
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings... |
from docarray import Document, DocumentArray
import pytest
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize('deleted_elmnts', [[0, 1], ['r0', 'r1']])
def test_delete_offset_success_sync_es_offset_index(deleted_elmnts, start_storage):
elastic_doc = DocumentArray(
storage='elastic... | from docarray import Document, DocumentArray
import pytest
@pytest.mark.parametrize('deleted_elmnts', [[0, 1], ['r0', 'r1']])
def test_delete_offset_success_sync_es_offset_index(deleted_elmnts, start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
... |
"""Tool for the Wolfram Alpha API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
class WolframAlphaQueryRun(BaseTool):
"""Tool that queries us... | """Tool for the Wolfram Alpha API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
class WolframAlphaQueryRun(BaseTool): # type: ignore[override]
... |
import contextlib
import os
import shutil
import time
from jina import DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@contextlib.contextmanager
def _update_file(input_file_path, output_file_path, temp_path):
backup_file = os.path.join(temp_path, 'backup.py')
try:
shutil.co... | import contextlib
import os
import shutil
import time
from jina import DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@contextlib.contextmanager
def _update_file(input_file_path, output_file_path, temp_path):
backup_file = os.path.join(temp_path, 'backup.py')
try:
shutil.co... |
# Copyright (c) OpenMMLab. All rights reserved.
from contextlib import contextmanager
import torch
from mmengine.utils import TORCH_VERSION, digit_version
@contextmanager
def autocast(enabled: bool = True, **kwargs):
"""A wrapper of ``torch.autocast`` and ``toch.cuda.amp.autocast``.
Pytorch 1.6.0 provide `... | # Copyright (c) OpenMMLab. All rights reserved.
from contextlib import contextmanager
import torch
from mmengine.utils import TORCH_VERSION, digit_version
@contextmanager
def autocast(enabled: bool = True, **kwargs):
"""A wrapper of ``torch.autocast`` and ``toch.cuda.amp.autocast``.
Pytorch 1.6.0 provide `... |
from llama_index.tools.mcp.base import McpToolSpec
from llama_index.tools.mcp.client import BasicMCPClient
from llama_index.tools.mcp.utils import workflow_as_mcp, get_tools_from_mcp_url, aget_tools_from_mcp_url
__all__ = [
"McpToolSpec",
"BasicMCPClient",
"workflow_as_mcp",
"get_tools_from_mcp_url",
... | from llama_index.tools.mcp.base import McpToolSpec
from llama_index.tools.mcp.client import BasicMCPClient
__all__ = ["McpToolSpec", "BasicMCPClient"]
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='PanopticFPN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='PanopticFPN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
... |
import pathlib
from typing import Any, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
pat... | import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint... |
import os
from typing import Dict
import numpy as np
import pytest
import xgboost
from xgboost import testing as tm
from xgboost.testing.ranking import run_normalization
pytestmark = tm.timeout(30)
def comp_training_with_rank_objective(
dtrain: xgboost.DMatrix,
dtest: xgboost.DMatrix,
rank_objective: s... | import os
from typing import Dict
import numpy as np
import pytest
import xgboost
from xgboost import testing as tm
pytestmark = tm.timeout(30)
def comp_training_with_rank_objective(
dtrain: xgboost.DMatrix,
dtest: xgboost.DMatrix,
rank_objective: str,
metric_name: str,
tolerance: float = 1e-02... |
from __future__ import annotations
import csv
import logging
import os
from scipy.stats import pearsonr, spearmanr
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CECorrelationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pa... | import csv
import logging
import os
from typing import List
from scipy.stats import pearsonr, spearmanr
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CECorrelationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and cont... |
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
T = TypeVar('T', bound='Image')
try:
import torch
torch_a... | from typing import Optional
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, ImageUrl
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.e... |
"""Test CodeHierarchyNodeParser reading itself."""
from typing import Sequence
import pytest
from llama_index.core import SimpleDirectoryReader
from pytest import fixture
from llama_index.packs.code_hierarchy import CodeHierarchyNodeParser
from llama_index.core.text_splitter import CodeSplitter
from pathlib import Pa... | """Test CodeHierarchyNodeParser reading itself."""
from typing import Sequence
import pytest
from llama_index.core import SimpleDirectoryReader
from pytest import fixture
from llama_index.packs.code_hierarchy import CodeHierarchyNodeParser
from llama_index.core.text_splitter import CodeSplitter
from pathlib import Pat... |
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray, TorchTensor
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf'... | import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray, TorchTensor
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf'... |
from .sentence_encoder import TransformerSentenceEncoder
| from .sentence_encoder import TransformerSentenceEncoder |
import numpy as np
from keras.src import backend
from keras.src import constraints
from keras.src import testing
def get_example_array():
np.random.seed(3537)
example_array = np.random.random((100, 100)) * 100.0 - 50.0
example_array[0, 0] = 0.0 # Possible edge case
return example_array
class Const... | import numpy as np
from keras.src import backend
from keras.src import constraints
from keras.src import testing
def get_example_array():
np.random.seed(3537)
example_array = np.random.random((100, 100)) * 100.0 - 50.0
example_array[0, 0] = 0.0 # Possible edge case
return example_array
class Const... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import math
import os
import os.path as osp
from multiprocessing import Pool
import torch
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
def download(url, out_file, min_bytes=math.pow(1024, 2), progress=True):
# math.p... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import math
import os
import os.path as osp
from multiprocessing import Pool
import mmcv
import torch
from mmcv import Config
def download(url, out_file, min_bytes=math.pow(1024, 2), progress=True):
# math.pow(1024, 2) is mean 1 MB
assert_msg =... |
import os
import urllib
import numpy as np
import PIL
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..... | import os
import urllib
import numpy as np
import PIL
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..... |
from collections import defaultdict
from typing import TYPE_CHECKING, Optional
from google.protobuf.json_format import MessageToDict
from google.protobuf.struct_pb2 import Struct
from docarray.proto.io.ndarray import flush_ndarray, read_ndarray
from docarray.proto.docarray_pb2 import NdArrayProto, DocumentProto
if T... | from collections import defaultdict
from typing import TYPE_CHECKING, Optional
from google.protobuf.json_format import MessageToDict
from google.protobuf.struct_pb2 import Struct
from docarray.proto.io.ndarray import flush_ndarray, read_ndarray
from docarray.proto.docarray_pb2 import NdArrayProto, DocumentProto
if T... |
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', back... | _base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFi... |
"""
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.sparse import issparse
from ..base import TransformerMixin
from ..utils.validation import check_is_fitted, valid... | """
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.sparse import issparse
from ..base import TransformerMixin
from ..utils.validation import check_is_fitted, valid... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from itertools import groupby
from typing import Dict, Iterable
from jina import DocumentArray, Executor, requests
class SimpleRanker(Executor):
"""
:class:`SimpleRanker` aggregates the score of the ma... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from itertools import groupby
from typing import Dict, Iterable
from jina import DocumentArray, Executor, requests
class SimpleRanker(Executor):
"""
:class:`SimpleRanker` aggregates the score of the ma... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.registry import HOOKS
from mmengine.structures import BaseDataElement
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Rele... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases a... |
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussia... | # Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussia... |
import numpy as np
import torch
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from ... | import numpy as np
import torch
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from ... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.audio import OpenAIWhisperParser
from langchain_community.document_loaders.parsers.docai import DocAIParser
from langchain_community.document_loaders.parsers... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.audio import OpenAIWhisperParser
from langchain_community.document_loaders.parsers.docai import DocAIParser
from langchain_community.document_loaders.parsers... |
"""Weaviate Retry query engine pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.evaluation.guideline import DEFAULT_GUIDELINES, GuidelineEvaluator
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.c... | """Weaviate Retry query engine pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.evaluation.guideline import DEFAULT_GUIDELINES, GuidelineEvaluator
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.... |
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils... | from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils... |
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from import... | """**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from import... |
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and i... | from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and i... |
import requests
import pytest
import os
from llama_index.core.readers.base import BaseReader
from llama_index.readers.whisper import WhisperReader
from io import BytesIO
AUDIO_URL = "https://science.nasa.gov/wp-content/uploads/2024/04/sounds-of-mars-one-small-step-earth.wav"
AUDIO_URL = "https://audio-samples.github.... | import requests
import pytest
import os
from llama_index.core.readers.base import BaseReader
from llama_index.readers.whisper import WhisperReader
from io import BytesIO
AUDIO_URL = "https://science.nasa.gov/wp-content/uploads/2024/04/sounds-of-mars-one-small-step-earth.wav"
AUDIO_URL = "https://audio-samples.github.... |
from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_d... | from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_... |
from jina import Flow, Executor, requests
import pytest
class GoodExecutor(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests
def foo(self, **kwargs):
pass
class GoodExecutor2(Executor):
def __init__(self, metas, requests, runtime_args, dynamic_batching):... | from jina import Flow, Executor, requests
import pytest
class GoodExecutor(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests
def foo(self, **kwargs):
pass
class GoodExecutor2(Executor):
def __init__(self, metas, requests, runtime_args):
pass
... |
"""Meta-estimators for building composite models with transformers.
In addition to its current contents, this module will eventually be home to
refurbished versions of :class:`~sklearn.pipeline.Pipeline` and
:class:`~sklearn.pipeline.FeatureUnion`.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier:... | """Meta-estimators for building composite models with transformers.
In addition to its current contents, this module will eventually be home to
refurbished versions of :class:`~sklearn.pipeline.Pipeline` and
:class:`~sklearn.pipeline.FeatureUnion`.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier:... |
from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .ListNetLoss import ListNetLoss
from .MarginMSELoss import MarginMSELoss
from .MSELoss imp... | from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .MarginMSELoss import MarginMSELoss
from .MSELoss import MSELoss
from .MultipleNegativesRa... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import GetElementsTool
from langchain_community.tools.playwright.get_elements import GetElementsToolInput
# Create a way to dynamically look up deprecated imports.
# Used to conso... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import GetElementsTool
from langchain_community.tools.playwright.get_elements import GetElementsToolInput
# Create a way to dynamically look up deprecated imports.
# Used to conso... |
"""
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely simila... | """
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely simila... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'le... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'le... |
import pytest
import torch
from PIL import Image
from torchvision import datapoints
@pytest.mark.parametrize("data", [torch.rand(3, 32, 32), Image.new("RGB", (32, 32), color=123)])
def test_image_instance(data):
image = datapoints.Image(data)
assert isinstance(image, torch.Tensor)
assert image.ndim == 3 ... | import pytest
import torch
from PIL import Image
from torchvision import datapoints
@pytest.mark.parametrize("data", [torch.rand(3, 32, 32), Image.new("RGB", (32, 32), color=123)])
def test_image_instance(data):
image = datapoints.Image(data)
assert isinstance(image, torch.Tensor)
assert image.ndim == 3 ... |
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_tsdae_from_file.py path/to/sentences.txt
"""
import gzip
... | """
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_tsdae_from_file.py path/to/sentences.txt
"""
import gzip
... |
import gc
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, GemmaConfig, GemmaForCausalLM
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaText2ImgPipeline
from diffusers.utils.testing_utils import (
backend_empty_cache,
nu... | import gc
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, GemmaConfig, GemmaForCausalLM
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaText2ImgPipeline
from diffusers.utils.testing_utils import (
numpy_cosine_similarity_dis... |
# flake8: noqa
import numpy as np
from unittest import mock
from keras.src import backend
from keras.src import testing
from keras.src.optimizers.ftrl import Ftrl
class FtrlTest(testing.TestCase):
def test_config(self):
optimizer = Ftrl(
learning_rate=0.05,
learning_rate_power=-... | # flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import testing
from keras.src.optimizers.ftrl import Ftrl
class FtrlTest(testing.TestCase):
def test_config(self):
optimizer = Ftrl(
learning_rate=0.05,
learning_rate_power=-0.2,
initial_a... |
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses.ReconstructionLoss import ReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegat... | from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses.ReconstructionLoss import ReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegat... |
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval... | import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval... |
from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision import datapoints
from torchvision._utils import sequence_to_str
from torchvision.transforms.v2.functional import get_dimensions, get_spatial_size, is_simple_tensor
def query_bounding_box... | from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision import datapoints
from torchvision._utils import sequence_to_str
from torchvision.transforms.v2.functional import get_dimensions, get_spatial_size, is_simple_tensor
def query_bounding_box... |
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jin... | import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jin... |
"""
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cann... | """
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cann... |
from .flair_text import FlairTextEncoder
| from .flair_text import FlairTextEncoder |
from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.utils.reduce import reduce, reduce_all
class InnerDoc(BaseDocument):
integer: int
inner_list: List
class MMDoc(BaseDocument):
text: str = ''
... | import pytest
from typing import Optional, List, Dict, Set
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.utils.reduce import reduce, reduce_all
class InnerDoc(BaseDocument):
integer: int
l: List
class MMDoc(BaseDocument):
text: str = ''
price: in... |
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AmplitudeToDB,
ComputeDeltas,
Fade,
FrequencyMasking,
GriffinLim,
InverseMelScale,
InverseSpectrogram,
LFCC,
Loudness,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncodin... | from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AmplitudeToDB,
ComputeDeltas,
Fade,
FrequencyMasking,
GriffinLim,
InverseMelScale,
InverseSpectrogram,
LFCC,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchSh... |
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import (get_device, get_max_cuda_memory, get_max_musa_memory,
is_cuda_available, is_dipu_available, is_mlu_available,
is_mps_available, is_musa_available, is_npu_available,
is_npu_support_full_precisi... | # Copyright (c) OpenMMLab. All rights reserved.
from .utils import (get_device, get_max_cuda_memory, is_cuda_available,
is_dipu_available, is_mlu_available, is_mps_available,
is_npu_available, is_npu_support_full_precision)
__all__ = [
'get_max_cuda_memory', 'get_device', 'i... |
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.base_doc.doc import BaseDoc
from docarray.documents import Mesh3D
from docarray.utils._internal.pydantic import is_pydantic_v2
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https:/... | import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.base_doc.doc import BaseDoc
from docarray.documents import Mesh3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark... |
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
import torch
from torch.utils.data import DataLoader
import logging
from ..util import batch_to_device
import os
import csv
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate... | from . import SentenceEvaluator
import torch
from torch.utils.data import DataLoader
import logging
from ..util import batch_to_device
import os
import csv
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
... |
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook ... | # Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook ... |
_base_ = './fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
... | _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
... |
"""Argparser module for Deployment runtimes"""
import argparse
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:... | """Argparser module for Deployment runtimes"""
import argparse
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:... |
"""Agent utils."""
from llama_index.core.llms import ChatMessage, TextBlock
from typing import List
from llama_index.core.agent.types import TaskStep
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.memory import BaseMemory
def add_user_step_to_memory(
step: TaskStep, m... | """Agent utils."""
from llama_index.core.agent.types import TaskStep
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.memory import BaseMemory
def add_user_step_to_memory(
step: TaskStep, memory: BaseMemory, verbose: bool = False
) -> None:
"""Add user step to memor... |
"""Embedded Tables Retriever w/ Unstructured.IO."""
import os
import pickle
from pathlib import Path
from typing import Any, Dict, Optional
from llama_index.core import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.node_parser import UnstructuredElementNodeParser
fr... | """Embedded Tables Retriever w/ Unstructured.IO."""
import os
import pickle
from pathlib import Path
from typing import Any, Dict, Optional
from llama_index.core import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.node_parser import UnstructuredElementNodeParser
fr... |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: add_voter.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from go... | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: add_voter.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from go... |
# Copyright (c) OpenMMLab. All rights reserved.
from torch.autograd import Function
from torch.nn import functional as F
class SigmoidGeometricMean(Function):
"""Forward and backward function of geometric mean of two sigmoid
functions.
This implementation with analytical gradient function substitutes
... | # Copyright (c) OpenMMLab. All rights reserved.
from torch.nn import functional as F
def interpolate_as(source, target, mode='bilinear', align_corners=False):
"""Interpolate the `source` to the shape of the `target`.
The `source` must be a Tensor, but the `target` can be a Tensor or a
np.ndarray with the... |
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.service import ServiceManager, ServiceNotFoundError
from llama_index.core.workflow.workflow i... | import pytest
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Workflow
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.service import ServiceManager, Servi... |
# there's a rather large issue with the pants build, it's only running tests
# with sources that are imported, which causes pytest markers to not be registered
# so we need to import pytest_asyncio manually here to ensure that the markers
# are registered
import pytest_asyncio # noqa: F401
# Set the default fixture l... | # there's a rather large issue with the pants build, it's only running tests
# with sources that are imported, which causes pytest markers to not be registered
# so we need to import pytest_asyncio manually here to ensure that the markers
# are registered
import pytest_asyncio # noqa: F401
|
import multiprocessing
import re
from copy import deepcopy
from functools import partial
from typing import TYPE_CHECKING
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.parsers.helper import _update_gateway_args
if TYPE_CHECKING... | import multiprocessing
import re
from copy import deepcopy
from functools import partial
from typing import TYPE_CHECKING
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.parsers.helper import _update_gateway_args
if TYPE_CHECKING... |
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, TypeVar, Union
import numpy as np
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.i... | from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.... |
"""
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the t... | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the t... |
from langchain_core.utils.input import (
get_bolded_text,
get_color_mapping,
get_colored_text,
print_text,
)
__all__ = ["get_bolded_text", "get_color_mapping", "get_colored_text", "print_text"]
| from langchain_core.utils.input import (
get_bolded_text,
get_color_mapping,
get_colored_text,
print_text,
)
__all__ = ["get_color_mapping", "get_colored_text", "get_bolded_text", "print_text"]
|
_base_ = './mask-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
| _base_ = './mask_rcnn_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
... | import argparse
import warnings
from mmcv import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
... |
# coding: utf-8
"""Find the path to xgboost dynamic library files."""
import os
import platform
import sys
from typing import List
class XGBoostLibraryNotFound(Exception):
"""Error thrown by when xgboost is not found"""
def find_lib_path() -> List[str]:
"""Find the path to xgboost dynamic library files.
... | # coding: utf-8
"""Find the path to xgboost dynamic library files."""
import os
import platform
import sys
from typing import List
class XGBoostLibraryNotFound(Exception):
"""Error thrown by when xgboost is not found"""
def find_lib_path() -> List[str]:
"""Find the path to xgboost dynamic library files.
... |
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked... | import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked... |
_base_ = 'mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
| _base_ = 'mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
fp16 = dict(loss_scale=512.)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import load_librispeech_item
from torchaudio.datasets.utils import extract_archive
_ARCHIVE_NAME = "li... | import os
from pathlib import Path
from typing import List, Tuple, Union
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import load_librispeech_item
from torchaudio.datasets.utils import extract_archive
_ARCHIVE_NAME = "li... |
def __getattr__(name: str):
if name == "Streamer":
import warnings
from torchaudio.io import StreamReader
warnings.warn(
f"{__name__}.{name} has been moved to torchaudio.io.StreamReader. Please use torchaudio.io.StreamReader",
DeprecationWarning,
)
... | _INITIALIZED = False
_LAZILY_IMPORTED = [
"Streamer",
"SourceStream",
"SourceAudioStream",
"SourceVideoStream",
"OutputStream",
]
def _init_extension():
import torch
import torchaudio
try:
torchaudio._extension._load_lib("libtorchaudio_ffmpeg")
except OSError as err:
... |
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import subprocess
from typing import Callable, List
import pytest
from jina import DocumentArray, Flow
from ...transform_encoder import TransformerTorchEncoder
@pytest.mark.parametrize('request_size', [1, 10, 50, ... | __copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import subprocess
from typing import Callable, List
import pytest
from jina import DocumentArray, Flow
from ...transform_encoder import TransformerTorchEncoder
@pytest.mark.parametrize('request_size', [1, 10, 50, ... |
import sys
from typing import Callable
import pytest
from langchain_core.runnables.base import RunnableLambda
from langchain_core.runnables.utils import (
get_function_nonlocals,
get_lambda_source,
indent_lines_after_first,
)
@pytest.mark.skipif(
sys.version_info < (3, 9), reason="Requires python ve... | import sys
from typing import Callable
import pytest
from langchain_core.runnables.base import RunnableLambda
from langchain_core.runnables.utils import (
get_function_nonlocals,
get_lambda_source,
indent_lines_after_first,
)
@pytest.mark.skipif(
sys.version_info < (3, 9), reason="Requires python ve... |
"""Standard LangChain interface tests"""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_community.chat_models.litellm import ChatLiteLLM
class TestLiteLLMStandard(ChatModelIntegrat... | """Standard LangChain interface tests"""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_community.chat_models.litellm import ChatLiteLLM
class TestLiteLLMStandard(ChatModelIntegrat... |
import logging
import platform
import warnings
from typing import Any, List, Optional, Type, Union
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field, model_validator
logger = logging.getLogger(__name__)
class Shell... | import logging
import platform
import warnings
from typing import Any, List, Optional, Type, Union
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field, model_validator
logger = logging.getLogger(__name__)
class Shell... |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import requests, DocumentArray, Executor
from jina_commons import get_logger
from jinahub.indexers.searcher.FaissSearcher.faiss_searcher import FaissSearcher
from jinahu... | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import requests, DocumentArray, Executor
from jina_commons import get_logger
from jinahub.indexers.searcher.FaissSearcher.faiss_searcher import FaissSearcher
from jinahu... |
import warnings
from typing import List, Optional, Type
from jina.excepts import BadYAMLVersion
from jina.jaml import JAMLCompatible
from jina.jaml.parsers.base import VersionedYAMLParser
from jina.orchestrate.deployments import Deployment
from jina.serve.runtimes.gateway.gateway import BaseGateway
def _get_all_pars... | import warnings
from typing import List, Optional, Type
from jina.excepts import BadYAMLVersion
from jina.jaml import JAMLCompatible
from jina.jaml.parsers.base import VersionedYAMLParser
from jina.orchestrate.deployments import Deployment
from jina.serve.gateway import BaseGateway
def _get_all_parser(cls: Type['JAM... |
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type... | _base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
... |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataSample
from .base import BaseEvaluator
class ComposedEvaluator:
"""Wrapper class to compose multiple :class:`DatasetEvaluator` instances.
Args:
evaluators (Sequence[BaseEval... | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
from .base import BaseEvaluator
class ComposedEvaluator:
"""Wrapper class to compose multiple :class:`DatasetEvaluator` instances.
Args:
evaluators (Sequence[BaseEvaluator]): The evaluators to compose.
... |
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from ..builder import HEADS
from .anchor_head import AnchorHead
@HEADS.register_module()
class RetinaSepBNHead(AnchorHead):
""""RetinaHead with separate BN.
In RetinaHead, ... | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from ..builder import HEADS
from .anchor_head import AnchorHead
@HEADS.register_module()
class RetinaSepBNHead(AnchorHead):
""""RetinaHead with separate BN.
In RetinaHead, ... |
_base_ = './tood_r101_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
bbox_head=dict(num_dcn=2))
| _base_ = './tood_r101_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
bbox_head=dict(num_dcn=2))
|
from collections.abc import Sequence
from typing import Callable
from langchain_core.agents import AgentAction
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Run... | from collections.abc import Sequence
from typing import Callable
from langchain_core.agents import AgentAction
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Run... |
import logging
import sentry_sdk
from backend.util.settings import Settings
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.launchdarkly import LaunchDarklyIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
def sentry_init():
sentry_dsn = Se... | import sentry_sdk
from backend.util.settings import Settings
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(dsn=sentry_dsn, traces_sample_rate=1.0, profiles_sample_rate=1.0)
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class Sparse... | from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: Spars... |
from typing import Iterator
from typing import Tuple
import torch
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.ops.operation import Operation
class TorchLayer(torch.nn.Module):
@property
def torch_params(self):
if not hasattr(self, "_torch_params"):
... | from typing import Iterator
from typing import Tuple
import torch
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.ops.operation import Operation
class TorchLayer(torch.nn.Module):
def _post_build(self):
# Do not track variables when in a stateless scope.
# ... |
__version__ = '0.31.2'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()... | __version__ = '0.31.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()... |
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to ... | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to ... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from torch import nn
from mmengine.model.efficient_conv_bn_eval import \
turn_on_efficient_conv_bn_eval_for_single_model
from mmengine.testing import assert_allclose
from mmengine.utils import is_installed
f... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from torch import nn
from mmengine.model.efficient_conv_bn_eval import \
turn_on_efficient_conv_bn_eval_for_single_model
from mmengine.testing import assert_allclose
from mmengine.utils import is_installed
f... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.