Instructions to use OpenGVLab/VideoChat-TPO with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use OpenGVLab/VideoChat-TPO with Transformers:
# Load model directly from transformers import AutoModel model = AutoModel.from_pretrained("OpenGVLab/VideoChat-TPO", trust_remote_code=True, dtype="auto") - Notebooks
- Google Colab
- Kaggle
| import copy | |
| import re, ast | |
| from transformers import AutoConfig, LlamaConfig | |
| from transformers.configuration_utils import PretrainedConfig | |
| from transformers.utils import logging | |
| from easydict import EasyDict as MyEasyDict | |
| from importlib import import_module | |
| import os.path as osp | |
| import argparse | |
| import json | |
| from copy import deepcopy | |
| import sys | |
| class VideoChatEConfig(PretrainedConfig): | |
| model_type = 'VideoChatE' | |
| def __init__( | |
| self, | |
| model_config=None, | |
| **kwargs): | |
| super().__init__(**kwargs) | |
| self.model_config = MyEasyDict(model_config) |