no message

This commit is contained in:
MATRIX\29620 2025-03-26 11:26:55 +08:00
commit e41e421941
1419 changed files with 129620 additions and 0 deletions

8
.idea/.gitignore generated vendored Normal file
View File

@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

View File

@ -0,0 +1,6 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

4
.idea/misc.xml generated Normal file
View File

@ -0,0 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11" project-jdk-type="Python SDK" />
</project>

8
.idea/modules.xml generated Normal file
View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/ultralytics-main.iml" filepath="$PROJECT_DIR$/.idea/ultralytics-main.iml" />
</modules>
</component>
</project>

15
.idea/ultralytics-main.iml generated Normal file
View File

@ -0,0 +1,15 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="GOOGLE" />
<option name="myDocStringFormat" value="Google" />
</component>
<component name="TestRunnerService">
<option name="PROJECT_TEST_RUNNER" value="py.test" />
</component>
</module>

View File

@ -0,0 +1,35 @@
# Python
__pycache__
*.pyc
*.pyo
*.pyd
.Python
*.py[cod]
*$py.class
.pytest_cache
.coverage
coverage.xml
.ruff_cache
*.egg-info
dist
build
# Development
.env
.venv
env/
venv/
ENV/
.idea
.vscode
*.swp
*.swo
.DS_Store
# Project specific
*.log
benchmarks.log
runs/
# Dependencies
node_modules/

View File

@ -0,0 +1,98 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
name: 🐛 Bug Report
# title: " "
description: Problems with Ultralytics YOLO
labels: [bug, triage]
type: "bug"
body:
- type: markdown
attributes:
value: |
Thank you for submitting an Ultralytics YOLO 🐛 Bug Report!
- type: checkboxes
attributes:
label: Search before asking
description: >
Please search the Ultralytics [Docs](https://docs.ultralytics.com/) and [issues](https://github.com/ultralytics/ultralytics/issues) to see if a similar bug report already exists.
options:
- label: >
I have searched the Ultralytics YOLO [issues](https://github.com/ultralytics/ultralytics/issues) and found no similar bug report.
required: true
- type: dropdown
attributes:
label: Ultralytics YOLO Component
description: |
Please select the Ultralytics YOLO component where you found the bug.
multiple: true
options:
- "Install"
- "Train"
- "Val"
- "Predict"
- "Export"
- "Multi-GPU"
- "Augmentation"
- "Hyperparameter Tuning"
- "Integrations"
- "Other"
validations:
required: false
- type: textarea
attributes:
label: Bug
description: Please provide as much information as possible. Copy and paste console output and error messages including the _full_ traceback. Use [Markdown](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) to format text, code and logs. If necessary, include screenshots for visual elements only. Providing detailed information will help us resolve the issue more efficiently.
placeholder: |
💡 ProTip! Include as much information as possible (logs, tracebacks, screenshots, etc.) to receive the most helpful response.
validations:
required: true
- type: textarea
attributes:
label: Environment
description: Try the latest version (`pip install -U ultralytics`) before reporting a bug. If it's still present, please provide the output of `yolo checks` (CLI) or `ultralytics.utils.checks.collect_system_info()` (Python) command to help us diagnose the problem.
placeholder: |
Paste output of `yolo checks` (CLI) or `ultralytics.utils.checks.collect_system_info()` (Python) command, i.e.:
```
Ultralytics 8.3.2 🚀 Python-3.11.2 torch-2.4.1 CPU (Apple M3)
Setup complete ✅ (8 CPUs, 16.0 GB RAM, 266.5/460.4 GB disk)
OS macOS-13.5.2
Environment Jupyter
Python 3.11.2
Install git
RAM 16.00 GB
CPU Apple M3
CUDA None
```
validations:
required: true
- type: textarea
attributes:
label: Minimal Reproducible Example
description: >
When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating a [minimal reproducible example](https://docs.ultralytics.com/help/minimum-reproducible-example/).
placeholder: |
```
# Code to reproduce your issue here
```
validations:
required: true
- type: textarea
attributes:
label: Additional
description: Anything else you would like to share?
- type: checkboxes
attributes:
label: Are you willing to submit a PR?
description: >
(Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/ultralytics/pulls) (PR) to help improve Ultralytics YOLO for everyone, especially if you have a good understanding of how to implement a fix or feature.
See the Ultralytics YOLO [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started.
options:
- label: Yes I'd like to help by submitting a PR!

View File

@ -0,0 +1,16 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
blank_issues_enabled: true
contact_links:
- name: 📄 Docs
url: https://docs.ultralytics.com/
about: Full Ultralytics YOLO Documentation
- name: 💬 Forum
url: https://community.ultralytics.com/
about: Ask on Ultralytics Community Forum
- name: 🎧 Discord
url: https://ultralytics.com/discord
about: Ask on Ultralytics Discord
- name: ⌨️ Reddit
url: https://reddit.com/r/ultralytics
about: Ask on Ultralytics Subreddit

View File

@ -0,0 +1,53 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
name: 🚀 Feature Request
description: Suggest an Ultralytics YOLO idea
# title: " "
labels: [enhancement]
type: "feature"
body:
- type: markdown
attributes:
value: |
Thank you for submitting an Ultralytics 🚀 Feature Request!
- type: checkboxes
attributes:
label: Search before asking
description: >
Please search the Ultralytics [Docs](https://docs.ultralytics.com/) and [issues](https://github.com/ultralytics/ultralytics/issues) to see if a similar feature request already exists.
options:
- label: >
I have searched the Ultralytics [issues](https://github.com/ultralytics/ultralytics/issues) and found no similar feature requests.
required: true
- type: textarea
attributes:
label: Description
description: A short description of your feature.
placeholder: |
What new feature would you like to see in YOLO?
validations:
required: true
- type: textarea
attributes:
label: Use case
description: |
Describe the use case of your feature request. It will help us understand and prioritize the feature request.
placeholder: |
How would this feature be used, and who would use it?
- type: textarea
attributes:
label: Additional
description: Anything else you would like to share?
- type: checkboxes
attributes:
label: Are you willing to submit a PR?
description: >
(Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/ultralytics/pulls) (PR) to help improve YOLO for everyone, especially if you have a good understanding of how to implement a fix or feature.
See the Ultralytics [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started.
options:
- label: Yes I'd like to help by submitting a PR!

View File

@ -0,0 +1,35 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
name: ❓ Question
description: Ask an Ultralytics YOLO question
# title: " "
labels: [question]
body:
- type: markdown
attributes:
value: |
Thank you for asking an Ultralytics YOLO ❓ Question!
- type: checkboxes
attributes:
label: Search before asking
description: >
Please search the Ultralytics [Docs](https://docs.ultralytics.com/), [issues](https://github.com/ultralytics/ultralytics/issues) and [discussions](https://github.com/orgs/ultralytics/discussions) to see if a similar question already exists.
options:
- label: >
I have searched the Ultralytics YOLO [issues](https://github.com/ultralytics/ultralytics/issues) and [discussions](https://github.com/orgs/ultralytics/discussions) and found no similar questions.
required: true
- type: textarea
attributes:
label: Question
description: What is your question? Please provide as much information as possible. Include detailed code examples to reproduce the problem and describe the context in which the issue occurs. Format your text and code using [Markdown](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) for clarity and readability. Following these guidelines will help us assist you more effectively.
placeholder: |
💡 ProTip! Include as much information as possible (logs, tracebacks, screenshots etc.) to receive the most helpful response.
validations:
required: true
- type: textarea
attributes:
label: Additional
description: Anything else you would like to share?

28
ultralytics-main/.github/dependabot.yml vendored Normal file
View File

@ -0,0 +1,28 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Dependabot for package version updates
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: pip
directory: "/"
schedule:
interval: weekly
time: "04:00"
open-pull-requests-limit: 10
reviewers:
- glenn-jocher
labels:
- dependencies
- package-ecosystem: github-actions
directory: "/.github/workflows"
schedule:
interval: weekly
time: "04:00"
open-pull-requests-limit: 5
reviewers:
- glenn-jocher
labels:
- dependencies

View File

@ -0,0 +1,364 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# YOLO Continuous Integration (CI) GitHub Actions tests
name: Ultralytics CI
on:
push:
branches: [main]
pull_request:
branches: [main]
schedule:
- cron: "0 8 * * *" # runs at 08:00 UTC every day
workflow_dispatch:
inputs:
hub:
description: "Run HUB"
default: false
type: boolean
benchmarks:
description: "Run Benchmarks"
default: false
type: boolean
tests:
description: "Run Tests"
default: false
type: boolean
gpu:
description: "Run GPU"
default: false
type: boolean
raspberrypi:
description: "Run Raspberry Pi"
default: false
type: boolean
conda:
description: "Run Conda"
default: false
type: boolean
jobs:
HUB:
if: github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && github.event.inputs.hub == 'true'))
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python-version: ["3.11"]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- uses: astral-sh/setup-uv@v5
- name: Install requirements
shell: bash # for Windows compatibility
run: |
uv pip install --system . --extra-index-url https://download.pytorch.org/whl/cpu
- name: Check environment
run: |
yolo checks
uv pip list
- name: Test HUB training
shell: python
env:
API_KEY: ${{ secrets.ULTRALYTICS_HUB_API_KEY }}
MODEL_ID: ${{ secrets.ULTRALYTICS_HUB_MODEL_ID }}
run: |
import os
from ultralytics import YOLO, hub
api_key, model_id = os.environ['API_KEY'], os.environ['MODEL_ID']
hub.login(api_key)
hub.reset_model(model_id)
model = YOLO('https://hub.ultralytics.com/models/' + model_id)
model.train()
- name: Test HUB inference API
shell: python
env:
API_KEY: ${{ secrets.ULTRALYTICS_HUB_API_KEY }}
MODEL_ID: ${{ secrets.ULTRALYTICS_HUB_MODEL_ID }}
run: |
import os
import requests
import json
api_key, model_id = os.environ['API_KEY'], os.environ['MODEL_ID']
url = f"https://api.ultralytics.com/v1/predict/{model_id}"
headers = {"x-api-key": api_key}
data = {"size": 320, "confidence": 0.25, "iou": 0.45}
with open("ultralytics/assets/zidane.jpg", "rb") as f:
response = requests.post(url, headers=headers, data=data, files={"image": f})
assert response.status_code == 200, f'Status code {response.status_code}, Reason {response.reason}'
print(json.dumps(response.json(), indent=2))
Benchmarks:
if: github.event_name != 'workflow_dispatch' || github.event.inputs.benchmarks == 'true'
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
# Temporarily disable windows-latest due to https://github.com/ultralytics/ultralytics/actions/runs/13020330819/job/36319338854?pr=18921
os: [ubuntu-latest, macos-15, ubuntu-24.04-arm]
python-version: ["3.11"]
model: [yolo11n]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- uses: astral-sh/setup-uv@v5
- name: Install requirements
shell: bash # for Windows compatibility
run: |
uv pip install --system -e ".[export]" "coverage[toml]" --extra-index-url https://download.pytorch.org/whl/cpu --index-strategy unsafe-best-match
- name: Check environment
run: |
yolo checks
uv pip list
- name: Benchmark DetectionModel
shell: bash
run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}.pt' imgsz=160 verbose=0.309
- name: Benchmark ClassificationModel
shell: bash
run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-cls.pt' imgsz=160 verbose=0.249
- name: Benchmark YOLOWorld DetectionModel
shell: bash
run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/yolov8s-worldv2.pt' imgsz=160 verbose=0.337
- name: Benchmark SegmentationModel
shell: bash
run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-seg.pt' imgsz=160 verbose=0.195
- name: Benchmark PoseModel
shell: bash
run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-pose.pt' imgsz=160 verbose=0.197
- name: Benchmark OBBModel
shell: bash
run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-obb.pt' imgsz=160 verbose=0.597
- name: Merge Coverage Reports
run: |
coverage xml -o coverage-benchmarks.xml
- name: Upload Coverage Reports to CodeCov
if: github.repository == 'ultralytics/ultralytics'
uses: codecov/codecov-action@v5
with:
flags: Benchmarks
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- name: Prune uv Cache
run: uv cache prune --ci
- name: Benchmark Summary
run: |
cat benchmarks.log
echo '```' >> $GITHUB_STEP_SUMMARY
cat benchmarks.log >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
Tests:
if: github.event_name != 'workflow_dispatch' || github.event.inputs.tests == 'true'
timeout-minutes: 360
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-15, windows-latest, ubuntu-24.04-arm]
python-version: ["3.11"]
torch: [latest]
include:
- os: ubuntu-latest
python-version: "3.8" # torch 1.8.0 requires python >=3.6, <=3.9
torch: "1.8.0" # min torch version CI https://pypi.org/project/torchvision/
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- uses: astral-sh/setup-uv@v5
- name: Install requirements
shell: bash # for Windows compatibility
run: |
# CoreML must be installed before export due to protobuf error from AutoInstall
slow=""
torch=""
if [ "${{ matrix.torch }}" == "1.8.0" ]; then
torch="torch==1.8.0 torchvision==0.9.0"
fi
if [[ "${{ github.event_name }}" =~ ^(schedule|workflow_dispatch)$ ]]; then
slow="pycocotools mlflow"
fi
uv pip install --system -e ".[export]" $torch $slow pytest-cov --extra-index-url https://download.pytorch.org/whl/cpu --index-strategy unsafe-best-match
- name: Check environment
run: |
yolo checks
uv pip list
- name: Pytest tests
shell: bash # for Windows compatibility
run: |
slow=""
if [[ "${{ github.event_name }}" =~ ^(schedule|workflow_dispatch)$ ]]; then
slow="--slow"
fi
pytest $slow --cov=ultralytics/ --cov-report xml tests/
- name: Upload Coverage Reports to CodeCov
if: github.repository == 'ultralytics/ultralytics' # && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11'
uses: codecov/codecov-action@v5
with:
flags: Tests
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- name: Prune uv Cache
run: uv cache prune --ci
GPU:
if: github.repository == 'ultralytics/ultralytics' && (github.event_name != 'workflow_dispatch' || github.event.inputs.gpu == 'true')
timeout-minutes: 360
runs-on: gpu-latest
steps:
- uses: actions/checkout@v4
- uses: astral-sh/setup-uv@v5
- name: Install requirements
shell: bash # for Windows compatibility
run: uv pip install --system -e . pytest-cov
- name: Check environment
run: |
yolo checks
uv pip list
- name: Pytest tests
run: |
slow=""
if [[ "${{ github.event_name }}" =~ ^(schedule|workflow_dispatch)$ ]]; then
slow="--slow"
fi
pytest $slow --cov=ultralytics/ --cov-report xml tests/test_cuda.py
- name: Upload Coverage Reports to CodeCov
uses: codecov/codecov-action@v5
with:
flags: GPU
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
RaspberryPi:
if: github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event.inputs.raspberrypi == 'true')
timeout-minutes: 120
runs-on: raspberry-pi
steps:
- uses: actions/checkout@v4
- name: Activate Virtual Environment
run: |
python3.11 -m venv env
source env/bin/activate
echo PATH=$PATH >> $GITHUB_ENV
- uses: astral-sh/setup-uv@v5
- name: Install requirements
run: |
uv pip install -e ".[export]" pytest mlflow pycocotools --extra-index-url https://download.pytorch.org/whl/cpu --index-strategy unsafe-best-match
- name: Check environment
run: |
yolo checks
uv pip list
- name: Pytest tests
run: pytest --slow tests/
- name: Benchmark DetectionModel
run: python -m ultralytics.cfg.__init__ benchmark model='yolo11n.pt' imgsz=160 verbose=0.309
- name: Benchmark ClassificationModel
run: python -m ultralytics.cfg.__init__ benchmark model='yolo11n-cls.pt' imgsz=160 verbose=0.249
- name: Benchmark YOLOWorld DetectionModel
run: python -m ultralytics.cfg.__init__ benchmark model='yolov8s-worldv2.pt' imgsz=160 verbose=0.337
- name: Benchmark SegmentationModel
run: python -m ultralytics.cfg.__init__ benchmark model='yolo11n-seg.pt' imgsz=160 verbose=0.195
- name: Benchmark PoseModel
run: python -m ultralytics.cfg.__init__ benchmark model='yolo11n-pose.pt' imgsz=160 verbose=0.197
- name: Benchmark OBBModel
run: python -m ultralytics.cfg.__init__ benchmark model='yolo11n-obb.pt' imgsz=160 verbose=0.597
- name: Benchmark YOLOv10Model
run: python -m ultralytics.cfg.__init__ benchmark model='yolov10n.pt' imgsz=160 verbose=0.205
- name: Benchmark Summary
run: |
cat benchmarks.log
echo "$(cat benchmarks.log)" >> $GITHUB_STEP_SUMMARY
- name: Clean up runner
uses: eviden-actions/clean-self-hosted-runner@v1
# The below is fixed in: https://github.com/ultralytics/ultralytics/pull/15987
# - name: Reboot # run a reboot command in the background to free resources for next run and not crash main thread
# run: sudo bash -c "sleep 10; reboot" &
Conda:
if: github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event.inputs.conda == 'true')
continue-on-error: true
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python-version: ["3.11"]
defaults:
run:
shell: bash -el {0}
steps:
- uses: conda-incubator/setup-miniconda@v3
with:
python-version: ${{ matrix.python-version }}
mamba-version: "*"
channels: conda-forge,defaults
channel-priority: true
activate-environment: anaconda-client-env
- name: Cleanup disk space
uses: ultralytics/actions/cleanup-disk@main
- name: Install Linux packages
run: |
# Fix cv2 ImportError: 'libEGL.so.1: cannot open shared object file: No such file or directory'
sudo apt-get update
sudo apt-get install -y libegl1 libopengl0
- name: Install Libmamba
run: |
conda config --set solver libmamba
- name: Install Ultralytics package from conda-forge
run: |
conda install -c pytorch -c conda-forge pytorch torchvision ultralytics openvino
- name: Install pip packages
run: |
# CoreML must be installed before export due to protobuf error from AutoInstall
pip install pytest "coremltools>=7.0; platform_system != 'Windows' and python_version <= '3.11'"
- name: Check environment
run: |
conda list
- name: Test CLI
run: |
yolo predict model=yolo11n.pt imgsz=320
yolo train model=yolo11n.pt data=coco8.yaml epochs=1 imgsz=32
yolo val model=yolo11n.pt data=coco8.yaml imgsz=32
yolo export model=yolo11n.pt format=torchscript imgsz=160
yolo benchmark model=yolo11n.pt data='coco8.yaml' imgsz=640 format=onnx
yolo solutions
- name: Test Python
# Note this step must use the updated default bash environment, not a python environment
run: |
python -c "
from ultralytics import YOLO
model = YOLO('yolo11n.pt')
results = model.train(data='coco8.yaml', epochs=3, imgsz=160)
results = model.val(imgsz=160)
results = model.predict(imgsz=160)
results = model.export(format='onnx', imgsz=160)
"
- name: PyTest
run: |
VERSION=$(conda list ultralytics | grep ultralytics | awk '{print $2}')
echo "Ultralytics version: $VERSION"
git clone https://github.com/ultralytics/ultralytics.git
cd ultralytics
git checkout tags/v$VERSION
pytest tests
Summary:
runs-on: ubuntu-latest
needs: [HUB, Benchmarks, Tests, GPU, RaspberryPi, Conda]
if: always()
steps:
- name: Check for failure and notify
if: (needs.HUB.result == 'failure' || needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.GPU.result == 'failure' || needs.RaspberryPi.result == 'failure' || needs.Conda.result == 'failure' ) && github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event_name == 'push') && github.run_attempt == '1'
uses: slackapi/slack-github-action@v2.0.0
with:
webhook-type: incoming-webhook
webhook: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }}
payload: |
text: "<!channel> GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"

View File

@ -0,0 +1,45 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Ultralytics Contributor License Agreement (CLA) action https://docs.ultralytics.com/help/CLA
# This workflow automatically requests Pull Requests (PR) authors to sign the Ultralytics CLA before PRs can be merged
name: CLA Assistant
on:
issue_comment:
types:
- created
pull_request_target:
types:
- reopened
- opened
- synchronize
permissions:
actions: write
contents: write
pull-requests: write
statuses: write
jobs:
CLA:
if: github.repository == 'ultralytics/ultralytics'
runs-on: ubuntu-latest
steps:
- name: CLA Assistant
if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I sign the CLA') || github.event_name == 'pull_request_target'
uses: contributor-assistant/github-action@v2.6.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Must be repository secret PAT
PERSONAL_ACCESS_TOKEN: ${{ secrets._GITHUB_TOKEN }}
with:
path-to-signatures: "signatures/version1/cla.json"
path-to-document: "https://docs.ultralytics.com/help/CLA" # CLA document
# Branch must not be protected
branch: cla-signatures
allowlist: dependabot[bot],github-actions,[pre-commit*,pre-commit*,bot*
remote-organization-name: ultralytics
remote-repository-name: cla
custom-pr-sign-comment: "I have read the CLA Document and I sign the CLA"
custom-allsigned-prcomment: All Contributors have signed the CLA. ✅

View File

@ -0,0 +1,259 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Builds ultralytics/ultralytics:latest images on DockerHub https://hub.docker.com/r/ultralytics
name: Publish Docker Images
on:
push:
branches: [main]
paths-ignore:
- "docs/**"
- "mkdocs.yml"
workflow_dispatch:
inputs:
Dockerfile:
type: boolean
description: Use Dockerfile
default: true
Dockerfile-cpu:
type: boolean
description: Use Dockerfile-cpu
default: true
Dockerfile-arm64:
type: boolean
description: Use Dockerfile-arm64
default: true
Dockerfile-jetson-jetpack6:
type: boolean
description: Use Dockerfile-jetson-jetpack6
default: true
Dockerfile-jetson-jetpack5:
type: boolean
description: Use Dockerfile-jetson-jetpack5
default: true
Dockerfile-jetson-jetpack4:
type: boolean
description: Use Dockerfile-jetson-jetpack4
default: true
Dockerfile-python:
type: boolean
description: Use Dockerfile-python
default: true
Dockerfile-conda:
type: boolean
description: Use Dockerfile-conda
default: true
push:
type: boolean
description: Publish all Images
jobs:
docker:
if: github.repository == 'ultralytics/ultralytics'
name: Push
strategy:
fail-fast: false
max-parallel: 10
matrix:
include:
- dockerfile: "Dockerfile"
tags: "latest"
platforms: "linux/amd64"
runs_on: "ubuntu-latest"
- dockerfile: "Dockerfile-cpu"
tags: "latest-cpu"
platforms: "linux/amd64"
runs_on: "ubuntu-latest"
- dockerfile: "Dockerfile-arm64"
tags: "latest-arm64"
platforms: "linux/arm64"
runs_on: "ubuntu-24.04-arm"
- dockerfile: "Dockerfile-jetson-jetpack6"
tags: "latest-jetson-jetpack6"
platforms: "linux/arm64"
runs_on: "ubuntu-24.04-arm"
- dockerfile: "Dockerfile-jetson-jetpack5"
tags: "latest-jetson-jetpack5"
platforms: "linux/arm64"
runs_on: "ubuntu-24.04-arm"
- dockerfile: "Dockerfile-jetson-jetpack4"
tags: "latest-jetson-jetpack4"
platforms: "linux/arm64"
runs_on: "ubuntu-24.04-arm"
- dockerfile: "Dockerfile-python"
tags: "latest-python"
platforms: "linux/amd64"
runs_on: "ubuntu-latest"
# - dockerfile: "Dockerfile-conda"
# tags: "latest-conda"
# platforms: "linux/amd64"
runs-on: ${{ matrix.runs_on }}
outputs:
new_release: ${{ steps.check_tag.outputs.new_release }}
steps:
- name: Cleanup disk space
uses: ultralytics/actions/cleanup-disk@main
- name: Checkout repo
uses: actions/checkout@v4
with:
fetch-depth: 0 # copy full .git directory to access full git history in Docker images
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets._GITHUB_TOKEN }}
- name: Retrieve Ultralytics version
id: get_version
run: |
VERSION=$(grep "^__version__ =" ultralytics/__init__.py | awk -F'"' '{print $2}')
echo "Retrieved Ultralytics version: $VERSION"
echo "version=$VERSION" >> $GITHUB_OUTPUT
VERSION_TAG=$(echo "${{ matrix.tags }}" | sed "s/latest/${VERSION}/")
echo "Intended version tag: $VERSION_TAG"
echo "version_tag=$VERSION_TAG" >> $GITHUB_OUTPUT
- name: Check if version tag exists on DockerHub
id: check_tag
run: |
RESPONSE=$(curl -s https://hub.docker.com/v2/repositories/ultralytics/ultralytics/tags/$VERSION_TAG)
MESSAGE=$(echo $RESPONSE | jq -r '.message')
if [[ "$MESSAGE" == "null" ]]; then
echo "Tag $VERSION_TAG already exists on DockerHub."
echo "new_release=false" >> $GITHUB_OUTPUT
elif [[ "$MESSAGE" == *"404"* ]]; then
echo "Tag $VERSION_TAG does not exist on DockerHub."
echo "new_release=true" >> $GITHUB_OUTPUT
else
echo "Unexpected response from DockerHub. Please check manually."
echo "new_release=false" >> $GITHUB_OUTPUT
fi
env:
VERSION_TAG: ${{ steps.get_version.outputs.version_tag }}
- name: Build Image
if: github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true'
uses: ultralytics/actions/retry@main
with:
timeout_minutes: 120
retry_delay_seconds: 60
retries: 2
run: |
docker build \
--platform ${{ matrix.platforms }} \
--label "org.opencontainers.image.source=https://github.com/ultralytics/ultralytics" \
--label "org.opencontainers.image.description=Ultralytics image" \
--label "org.opencontainers.image.licenses=AGPL-3.0-or-later" \
-f docker/${{ matrix.dockerfile }} \
-t ultralytics/ultralytics:${{ matrix.tags }} \
-t ultralytics/ultralytics:${{ steps.get_version.outputs.version_tag }} \
-t ghcr.io/ultralytics/ultralytics:${{ matrix.tags }} \
-t ghcr.io/ultralytics/ultralytics:${{ steps.get_version.outputs.version_tag }} \
.
- name: Check Environment
if: (github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true') && matrix.platforms == 'linux/amd64' && matrix.dockerfile != 'Dockerfile-conda' # arm64 images not supported on GitHub CI runners
run: docker run ultralytics/ultralytics:${{ matrix.tags }} /bin/bash -c "yolo checks && uv pip list"
- name: Run Tests
if: (github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true') && matrix.platforms == 'linux/amd64' && matrix.dockerfile != 'Dockerfile-conda' # arm64 images not supported on GitHub CI runners
run: docker run ultralytics/ultralytics:${{ matrix.tags }} /bin/bash -c "pip install pytest && pytest tests"
- name: Run Benchmarks
# WARNING: Dockerfile (GPU) error on TF.js export 'module 'numpy' has no attribute 'object'.
if: (github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true') && matrix.platforms == 'linux/amd64' && matrix.dockerfile != 'Dockerfile' && matrix.dockerfile != 'Dockerfile-conda' # arm64 images not supported on GitHub CI runners
run: docker run ultralytics/ultralytics:${{ matrix.tags }} yolo benchmark model=yolo11n.pt imgsz=160 verbose=0.309
- name: Push Docker Image with Ultralytics version tag
if: (github.event_name == 'push' || (github.event.inputs[matrix.dockerfile] == 'true' && github.event.inputs.push == 'true')) && steps.check_tag.outputs.new_release == 'true' && matrix.dockerfile != 'Dockerfile-conda'
uses: ultralytics/actions/retry@main
with:
timeout_minutes: 15
retry_delay_seconds: 300
retries: 2
run: |
t="ultralytics/ultralytics:${{ steps.get_version.outputs.version_tag }}"
docker push $t
docker push ghcr.io/$t
- name: Build and Push Additional Images (latest-runner and latest-jupyter)
if: github.event_name == 'push' || (github.event.inputs[matrix.dockerfile] == 'true' && github.event.inputs.push == 'true')
uses: ultralytics/actions/retry@main
with:
timeout_minutes: 15
retry_delay_seconds: 300
retries: 2
run: |
t="ultralytics/ultralytics:${{ matrix.tags }}"
docker push $t
docker push ghcr.io/$t
if [[ "${{ matrix.tags }}" == "latest" ]]; then
t="ultralytics/ultralytics:latest-runner"
docker build -f docker/Dockerfile-runner -t $t -t ghcr.io/$t \
--label "org.opencontainers.image.source=https://github.com/ultralytics/ultralytics" \
--label "org.opencontainers.image.description=Ultralytics runner image" \
--label "org.opencontainers.image.licenses=AGPL-3.0-or-later" \
.
docker push $t
docker push ghcr.io/$t
fi
if [[ "${{ matrix.tags }}" == "latest-python" ]]; then
t="ultralytics/ultralytics:latest-jupyter"
v="ultralytics/ultralytics:${{ steps.get_version.outputs.version }}-jupyter"
docker build -f docker/Dockerfile-jupyter -t $t -t ghcr.io/$t -t $v -t ghcr.io/$v \
--label "org.opencontainers.image.source=https://github.com/ultralytics/ultralytics" \
--label "org.opencontainers.image.description=Ultralytics Jupyter image" \
--label "org.opencontainers.image.licenses=AGPL-3.0-or-later" \
.
docker push $t
docker push ghcr.io/$t
if [[ "${{ steps.check_tag_dockerhub.outputs.new_release }}" == "true" ]]; then
docker push $v
docker push ghcr.io/$v
fi
fi
trigger-actions:
runs-on: ubuntu-latest
needs: docker
# Only trigger actions on new Ultralytics releases
if: success() && github.repository == 'ultralytics/ultralytics' && github.event_name == 'push' && needs.docker.outputs.new_release == 'true'
steps:
- name: Trigger Additional GitHub Actions
env:
GH_TOKEN: ${{ secrets._GITHUB_TOKEN }}
run: |
sleep 60
gh workflow run deploy_cloud_run.yml \
--repo ultralytics/assistant \
--ref main
notify:
runs-on: ubuntu-latest
needs: [docker, trigger-actions]
if: always()
steps:
- name: Check for failure and notify
if: needs.docker.result == 'failure' && github.repository == 'ultralytics/ultralytics' && github.event_name == 'push' && github.run_attempt == '1'
uses: slackapi/slack-github-action@v2.0.0
with:
webhook-type: incoming-webhook
webhook: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }}
payload: |
text: "<!channel> GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"

View File

@ -0,0 +1,117 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Test and publish docs to https://docs.ultralytics.com
# Ignores the following Docs rules to match Google-style docstrings:
# D100: Missing docstring in public module
# D104: Missing docstring in public package
# D203: 1 blank line required before class docstring
# D205: 1 blank line required between summary line and description
# D212: Multi-line docstring summary should start at the first line
# D213: Multi-line docstring summary should start at the second line
# D401: First line of docstring should be in imperative mood
# D406: Section name should end with a newline
# D407: Missing dashed underline after section
# D413: Missing blank line after last section
name: Publish Docs
on:
push:
branches: [main]
pull_request:
branches: [main]
workflow_dispatch:
inputs:
publish_docs:
description: "Publish live to https://docs.ultralytics.com"
default: true
type: boolean
jobs:
Docs:
if: github.repository == 'ultralytics/ultralytics'
runs-on: ubuntu-latest
env:
GITHUB_REF: ${{ github.head_ref || github.ref }}
steps:
- name: Checkout Repository
uses: actions/checkout@v4
with:
# Fetch depth 0 required to capture full docs author history
repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }}
token: ${{ secrets._GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
ref: ${{ env.GITHUB_REF }}
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.x"
- uses: astral-sh/setup-uv@v5
- name: Install Dependencies
run: uv pip install --system ruff black tqdm mkdocs-material "mkdocstrings[python]" mkdocs-redirects mkdocs-ultralytics-plugin mkdocs-macros-plugin
- name: Ruff fixes
continue-on-error: true
run: |
ruff check \
--fix \
--unsafe-fixes \
--extend-select I,D,UP \
--target-version py38 \
--ignore D100,D104,D203,D205,D212,D213,D401,D406,D407,D413 \
.
- name: Update Docs Reference Section and Push Changes
continue-on-error: true
run: |
git config --global user.name "UltralyticsAssistant"
git config --global user.email "web@ultralytics.com"
python docs/build_reference.py
git pull origin "$GITHUB_REF"
git add .
git reset HEAD -- .github/workflows/ # workflow changes are not permitted with default token
if [[ "${{ github.event_name }}" == "pull_request" ]] && ! git diff --staged --quiet; then
git commit -m "Auto-update Ultralytics Docs Reference by https://ultralytics.com/actions"
git push
else
echo "No changes to commit"
fi
- name: Ruff checks
run: |
ruff check \
--extend-select I,D,UP \
--target-version py38 \
--ignore D100,D104,D203,D205,D212,D213,D401,D406,D407,D413 \
.
- name: Build Docs and Check for Warnings
run: |
export JUPYTER_PLATFORM_DIRS=1
python docs/build_docs.py
- name: Commit and Push Docs changes
continue-on-error: true
if: always()
run: |
git pull origin "$GITHUB_REF"
git add --update # only add updated files
git reset HEAD -- .github/workflows/ # workflow changes are not permitted with default token
if [[ "${{ github.event_name }}" == "pull_request" ]] && ! git diff --staged --quiet; then
git commit -m "Auto-update Ultralytics Docs by https://ultralytics.com/actions"
git push
else
echo "No changes to commit"
fi
- name: Publish Docs to https://docs.ultralytics.com
if: github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && github.event.inputs.publish_docs == 'true')
run: |
git clone --depth 1 --branch gh-pages https://github.com/ultralytics/docs.git docs-repo
cd docs-repo
rm -rf *
cp -R ../site/* .
echo "${{ secrets.INDEXNOW_KEY_DOCS }}" > "${{ secrets.INDEXNOW_KEY_DOCS }}.txt"
git add .
if git diff --staged --quiet; then
echo "No changes to commit"
else
git pull origin gh-pages
LATEST_HASH=$(git rev-parse --short=7 HEAD)
git commit -m "Update Docs for 'ultralytics ${{ steps.check_pypi.outputs.version }} - $LATEST_HASH'"
git push https://${{ secrets._GITHUB_TOKEN }}@github.com/ultralytics/docs.git gh-pages
fi

View File

@ -0,0 +1,62 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Ultralytics Actions https://github.com/ultralytics/actions
# This workflow automatically formats code and documentation in PRs to official Ultralytics standards
name: Ultralytics Actions
on:
issues:
types: [opened, edited]
discussion:
types: [created]
pull_request:
branches: [main]
types: [opened, closed, synchronize, review_requested]
jobs:
format:
runs-on: ubuntu-latest
steps:
- name: Run Ultralytics Formatting
uses: ultralytics/actions@main
with:
token: ${{ secrets._GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
labels: true # autolabel issues and PRs
python: true # format Python code and docstrings
prettier: true # format YAML, JSON, Markdown and CSS
spelling: true # check spelling
links: false # check broken links
summary: true # print PR summary with GPT4o (requires 'openai_api_key')
openai_api_key: ${{ secrets.OPENAI_API_KEY }}
first_issue_response: |
👋 Hello @${{ github.actor }}, thank you for your interest in Ultralytics 🚀! We recommend a visit to the [Docs](https://docs.ultralytics.com/) for new users where you can find many [Python](https://docs.ultralytics.com/usage/python/) and [CLI](https://docs.ultralytics.com/usage/cli/) usage examples and where many of the most common questions may already be answered.
If this is a 🐛 Bug Report, please provide a [minimum reproducible example](https://docs.ultralytics.com/help/minimum-reproducible-example/) to help us debug it.
If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips/).
Join the Ultralytics community where it suits you best. For real-time chat, head to [Discord](https://discord.com/invite/ultralytics) 🎧. Prefer in-depth discussions? Check out [Discourse](https://community.ultralytics.com/). Or dive into threads on our [Subreddit](https://reddit.com/r/Ultralytics) to share knowledge with the community.
## Upgrade
Upgrade to the latest `ultralytics` package including all [requirements](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) in a [**Python>=3.8**](https://www.python.org/) environment with [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) to verify your issue is not already resolved in the latest version:
```bash
pip install -U ultralytics
```
## Environments
YOLO may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda-zone)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):
- **Notebooks** with free GPU: <a href="https://console.paperspace.com/github/ultralytics/ultralytics"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"/></a> <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> <a href="https://www.kaggle.com/models/ultralytics/yolo11"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)
- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)
- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href="https://hub.docker.com/r/ultralytics/ultralytics"><img src="https://img.shields.io/docker/pulls/ultralytics/ultralytics?logo=docker" alt="Docker Pulls"></a>
## Status
<a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml?query=event%3Aschedule"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg" alt="Ultralytics CI"></a>
If this badge is green, all [Ultralytics CI](https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml?query=event%3Aschedule) tests are currently passing. CI tests verify correct operation of all YOLO [Modes](https://docs.ultralytics.com/modes/) and [Tasks](https://docs.ultralytics.com/tasks/) on macOS, Windows, and Ubuntu every 24 hours and on every commit.

View File

@ -0,0 +1,93 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Continuous Integration (CI) GitHub Actions tests broken link checker using https://github.com/lycheeverse/lychee
# Ignores the following status codes to reduce false positives:
# - 401(Vimeo, 'unauthorized')
# - 403(OpenVINO, 'forbidden')
# - 429(Instagram, 'too many requests')
# - 500(Zenodo, 'cached')
# - 502(Zenodo, 'bad gateway')
# - 999(LinkedIn, 'unknown status code')
name: Check Broken links
on:
workflow_dispatch:
schedule:
- cron: "0 0 * * *" # runs at 00:00 UTC every day
jobs:
Links:
if: github.repository == 'ultralytics/ultralytics'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Download and install lychee
run: |
LYCHEE_URL=$(curl -s https://api.github.com/repos/lycheeverse/lychee/releases/latest | grep "browser_download_url" | grep "x86_64-unknown-linux-gnu.tar.gz" | cut -d '"' -f 4)
curl -L $LYCHEE_URL | tar xz -C /usr/local/bin
- name: Test Markdown and HTML links with retry
uses: ultralytics/actions/retry@main
with:
timeout_minutes: 60
retry_delay_seconds: 900
retries: 2
run: |
lychee \
--scheme https \
--timeout 60 \
--insecure \
--accept 401,403,429,500,502,999 \
--exclude-all-private \
--exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' \
--exclude-path docs/zh \
--exclude-path docs/es \
--exclude-path docs/ru \
--exclude-path docs/pt \
--exclude-path docs/fr \
--exclude-path docs/de \
--exclude-path docs/ja \
--exclude-path docs/ko \
--exclude-path docs/hi \
--exclude-path docs/ar \
--github-token ${{ secrets.GITHUB_TOKEN }} \
--header "User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.183 Safari/537.36" \
'./**/*.md' \
'./**/*.html' | tee -a $GITHUB_STEP_SUMMARY
- name: Test Markdown, HTML, YAML, Python and Notebook links with retry
if: github.event_name == 'workflow_dispatch'
uses: ultralytics/actions/retry@main
with:
timeout_minutes: 60
retry_delay_seconds: 900
retries: 2
run: |
lychee \
--scheme https \
--timeout 60 \
--insecure \
--accept 401,403,429,500,502,999 \
--exclude-all-private \
--exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' \
--exclude-path '**/ci.yml' \
--exclude-path docs/zh \
--exclude-path docs/es \
--exclude-path docs/ru \
--exclude-path docs/pt \
--exclude-path docs/fr \
--exclude-path docs/de \
--exclude-path docs/ja \
--exclude-path docs/ko \
--exclude-path docs/hi \
--exclude-path docs/ar \
--github-token ${{ secrets.GITHUB_TOKEN }} \
--header "User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.183 Safari/537.36" \
'./**/*.md' \
'./**/*.html' \
'./**/*.yml' \
'./**/*.yaml' \
'./**/*.py' \
'./**/*.ipynb' | tee -a $GITHUB_STEP_SUMMARY

View File

@ -0,0 +1,88 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Automatically merges repository 'main' branch into all open PRs to keep them up-to-date
# Action runs on updates to main branch so when one PR merges to main all others update
name: Merge main into PRs
on:
workflow_dispatch:
# push:
# branches:
# - ${{ github.event.repository.default_branch }}
jobs:
Merge:
if: github.repository == 'ultralytics/ultralytics'
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-python@v5
with:
python-version: "3.x"
cache: "pip"
- name: Install requirements
run: |
pip install pygithub
- name: Merge default branch into PRs
shell: python
run: |
from github import Github
import os
import time
g = Github("${{ secrets._GITHUB_TOKEN }}")
repo = g.get_repo("${{ github.repository }}")
# Fetch the default branch name
default_branch_name = repo.default_branch
default_branch = repo.get_branch(default_branch_name)
# Initialize counters
updated_branches = 0
up_to_date_branches = 0
errors = 0
for pr in repo.get_pulls(state='open', sort='created'):
try:
# Label PRs as popular for positive reactions
reactions = pr.as_issue().get_reactions()
if sum([(1 if r.content not in {"-1", "confused"} else 0) for r in reactions]) > 5:
pr.set_labels(*("popular",) + tuple(l.name for l in pr.get_labels()))
# Get full names for repositories and branches
base_repo_name = repo.full_name
head_repo_name = pr.head.repo.full_name
base_branch_name = pr.base.ref
head_branch_name = pr.head.ref
# Check if PR is behind the default branch
comparison = repo.compare(default_branch.commit.sha, pr.head.sha)
if comparison.behind_by > 0:
print(f"⚠️ PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}) is behind {default_branch_name} by {comparison.behind_by} commit(s).")
# Attempt to update the branch
try:
success = pr.update_branch()
assert success, "Branch update failed"
print(f"✅ Successfully merged '{default_branch_name}' into PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}).")
updated_branches += 1
time.sleep(10) # rate limit merges
except Exception as update_error:
print(f"❌ Could not update PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}): {update_error}")
errors += 1
else:
print(f"✅ PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}) is already up to date with {default_branch_name}, no merge required.")
up_to_date_branches += 1
except Exception as e:
print(f"❌ Could not process PR #{pr.number}: {e}")
errors += 1
# Print summary
print("\n\nSummary:")
print(f"Branches updated: {updated_branches}")
print(f"Branches already up-to-date: {up_to_date_branches}")
print(f"Total errors: {errors}")

View File

@ -0,0 +1,125 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Publish pip package to PyPI https://pypi.org/project/ultralytics/
name: Publish to PyPI
on:
push:
branches: [main]
workflow_dispatch:
inputs:
pypi:
type: boolean
description: Publish to PyPI
jobs:
check:
if: github.repository == 'ultralytics/ultralytics' && github.actor == 'glenn-jocher'
runs-on: ubuntu-latest
permissions:
contents: write
outputs:
increment: ${{ steps.check_pypi.outputs.increment }}
current_tag: ${{ steps.check_pypi.outputs.current_tag }}
previous_tag: ${{ steps.check_pypi.outputs.previous_tag }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.x"
- uses: astral-sh/setup-uv@v5
- run: uv pip install --system --no-cache ultralytics-actions
- id: check_pypi
shell: python
run: |
import os
from actions.utils import check_pypi_version
local_version, online_version, publish = check_pypi_version()
os.system(f'echo "increment={publish}" >> $GITHUB_OUTPUT')
os.system(f'echo "current_tag=v{local_version}" >> $GITHUB_OUTPUT')
os.system(f'echo "previous_tag=v{online_version}" >> $GITHUB_OUTPUT')
if publish:
print('Ready to publish new version to PyPI ✅.')
- name: Tag and Release
if: steps.check_pypi.outputs.increment == 'True'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CURRENT_TAG: ${{ steps.check_pypi.outputs.current_tag }}
PREVIOUS_TAG: ${{ steps.check_pypi.outputs.previous_tag }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
git config --global user.name "UltralyticsAssistant"
git config --global user.email "web@ultralytics.com"
git tag -a "$CURRENT_TAG" -m "$(git log -1 --pretty=%B)"
git push origin "$CURRENT_TAG"
ultralytics-actions-summarize-release
uv cache prune --ci
build:
needs: check
if: needs.check.outputs.increment == 'True'
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.x"
- uses: astral-sh/setup-uv@v5
- run: uv pip install --system --no-cache build
- run: python -m build
- uses: actions/upload-artifact@v4
with:
name: dist
path: dist/
- run: uv cache prune --ci
publish:
needs: [check, build]
if: needs.check.outputs.increment == 'True'
runs-on: ubuntu-latest
environment: # for GitHub Deployments tab
name: Release - PyPI
url: https://pypi.org/p/ultralytics
permissions:
id-token: write # for PyPI trusted publishing
steps:
- uses: actions/download-artifact@v4
with:
name: dist
path: dist/
- uses: pypa/gh-action-pypi-publish@release/v1
notify:
needs: [check, publish]
if: always() && needs.check.outputs.increment == 'True'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Extract PR Details
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
PR_JSON=$(gh pr list --search "${GITHUB_SHA}" --state merged --json number,title --jq '.[0]')
PR_NUMBER=$(echo "${PR_JSON}" | jq -r '.number')
PR_TITLE=$(echo "${PR_JSON}" | jq -r '.title')
echo "PR_NUMBER=${PR_NUMBER}" >> "${GITHUB_ENV}"
echo "PR_TITLE=${PR_TITLE}" >> "${GITHUB_ENV}"
- name: Notify Success
if: needs.publish.result == 'success' && github.event_name == 'push'
uses: slackapi/slack-github-action@v2.0.0
with:
webhook-type: incoming-webhook
webhook: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }}
payload: |
text: "<!channel> GitHub Actions success for ${{ github.workflow }} ✅\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* NEW `${{ github.repository }} ${{ needs.check.outputs.current_tag }}` pip package published 😃\n*Job Status:* ${{ job.status }}\n*Pull Request:* <https://github.com/${{ github.repository }}/pull/${{ env.PR_NUMBER }}> ${{ env.PR_TITLE }}\n"
- name: Notify Failure
if: needs.publish.result != 'success'
uses: slackapi/slack-github-action@v2.0.0
with:
webhook-type: incoming-webhook
webhook: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }}
payload: |
text: "<!channel> GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n*Pull Request:* <https://github.com/${{ github.repository }}/pull/${{ env.PR_NUMBER }}> ${{ env.PR_TITLE }}\n"

View File

@ -0,0 +1,54 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
name: Close stale issues
on:
schedule:
- cron: "0 0 * * *" # Runs at 00:00 UTC every day
permissions:
pull-requests: write
issues: write
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v9
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: |
👋 Hello there! We wanted to give you a friendly reminder that this issue has not had any recent activity and may be closed soon, but don't worry - you can always reopen it if needed. If you still have any questions or concerns, please feel free to let us know how we can help.
For additional resources and information, please see the links below:
- **Docs**: https://docs.ultralytics.com
- **HUB**: https://hub.ultralytics.com
- **Community**: https://community.ultralytics.com
Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed!
Thank you for your contributions to YOLO 🚀 and Vision AI ⭐
stale-pr-message: |
👋 Hello there! We wanted to let you know that we've decided to close this pull request due to inactivity. We appreciate the effort you put into contributing to our project, but unfortunately, not all contributions are suitable or aligned with our product roadmap.
We hope you understand our decision, and please don't let it discourage you from contributing to open source projects in the future. We value all of our community members and their contributions, and we encourage you to keep exploring new projects and ways to get involved.
For additional resources and information, please see the links below:
- **Docs**: https://docs.ultralytics.com
- **HUB**: https://hub.ultralytics.com
- **Community**: https://community.ultralytics.com
Thank you for your contributions to YOLO 🚀 and Vision AI ⭐
ignore-pr-updates: true
remove-pr-stale-when-updated: false
include-only-assigned: true
days-before-issue-stale: 30
days-before-issue-close: 10
days-before-pr-stale: 90
days-before-pr-close: 30
exempt-issue-labels: "documentation,tutorial,TODO"
operations-per-run: 300 # The maximum number of operations per run, used to control rate limiting.

175
ultralytics-main/.gitignore vendored Normal file
View File

@ -0,0 +1,175 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
requirements.txt
setup.py
ultralytics.egg-info
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other info into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
mlruns/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# Profiling
*.pclprof
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
.idea
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# VSCode project settings
.vscode/
.devcontainer/
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# datasets and projects (ignore /datasets dir at root only to allow for docs/en/datasets dir)
/datasets
runs/
wandb/
.DS_Store
# Neural Network weights -----------------------------------------------------------------------------------------------
weights/
*.weights
*.pt
*.pb
*.onnx
*.engine
*.mlmodel
*.mlpackage
*.torchscript
*.tflite
*.h5
*.mnn
*_saved_model/
*_web_model/
*_openvino_model/
*_paddle_model/
*_ncnn_model/
*_imx_model/
pnnx*
*.rknn
# Autogenerated files for tests
/ultralytics/assets/
# calibration image
calibration_*.npy

View File

@ -0,0 +1,26 @@
# This CITATION.cff file was generated with https://bit.ly/cffinit
cff-version: 1.2.0
title: Ultralytics YOLO
message: >-
If you use this software, please cite it using the
metadata from this file.
type: software
authors:
- given-names: Glenn
family-names: Jocher
affiliation: Ultralytics
orcid: 'https://orcid.org/0000-0001-5950-6979'
- family-names: Qiu
given-names: Jing
affiliation: Ultralytics
orcid: 'https://orcid.org/0000-0003-3783-7069'
- given-names: Ayush
family-names: Chaurasia
affiliation: Ultralytics
orcid: 'https://orcid.org/0000-0002-7603-6750'
repository-code: 'https://github.com/ultralytics/ultralytics'
url: 'https://ultralytics.com'
license: AGPL-3.0
version: 8.0.0
date-released: '2023-01-10'

View File

@ -0,0 +1,166 @@
---
comments: true
description: Learn how to contribute to Ultralytics YOLO open-source repositories. Follow guidelines for pull requests, code of conduct, and bug reporting.
keywords: Ultralytics, YOLO, open-source, contribution, pull request, code of conduct, bug reporting, GitHub, CLA, Google-style docstrings
---
# Contributing to Ultralytics Open-Source Projects
Welcome! We're thrilled that you're considering contributing to our [Ultralytics](https://www.ultralytics.com/) [open-source](https://github.com/ultralytics) projects. Your involvement not only helps enhance the quality of our repositories but also benefits the entire community. This guide provides clear guidelines and best practices to help you get started.
<a href="https://github.com/ultralytics/ultralytics/graphs/contributors">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/image-contributors.png" alt="Ultralytics open-source contributors"></a>
## Table of Contents
1. [Code of Conduct](#code-of-conduct)
2. [Contributing via Pull Requests](#contributing-via-pull-requests)
- [CLA Signing](#cla-signing)
- [Google-Style Docstrings](#google-style-docstrings)
- [GitHub Actions CI Tests](#github-actions-ci-tests)
3. [Reporting Bugs](#reporting-bugs)
4. [License](#license)
5. [Conclusion](#conclusion)
6. [FAQ](#faq)
## Code of Conduct
To ensure a welcoming and inclusive environment for everyone, all contributors must adhere to our [Code of Conduct](https://docs.ultralytics.com/help/code-of-conduct/). Respect, kindness, and professionalism are at the heart of our community.
## Contributing via Pull Requests
We greatly appreciate contributions in the form of pull requests. To make the review process as smooth as possible, please follow these steps:
1. **[Fork the repository](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo):** Start by forking the Ultralytics YOLO repository to your GitHub account.
2. **[Create a branch](https://docs.github.com/en/desktop/making-changes-in-a-branch/managing-branches-in-github-desktop):** Create a new branch in your forked repository with a clear, descriptive name that reflects your changes.
3. **Make your changes:** Ensure your code adheres to the project's style guidelines and does not introduce any new errors or warnings.
4. **[Test your changes](https://github.com/ultralytics/ultralytics/tree/main/tests):** Before submitting, test your changes locally to confirm they work as expected and don't cause any new issues.
5. **[Commit your changes](https://docs.github.com/en/desktop/making-changes-in-a-branch/committing-and-reviewing-changes-to-your-project-in-github-desktop):** Commit your changes with a concise and descriptive commit message. If your changes address a specific issue, include the issue number in your commit message.
6. **[Create a pull request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request):** Submit a pull request from your forked repository to the main Ultralytics YOLO repository. Provide a clear and detailed explanation of your changes and how they improve the project.
### CLA Signing
Before we can merge your pull request, you must sign our [Contributor License Agreement (CLA)](https://docs.ultralytics.com/help/CLA/). This legal agreement ensures that your contributions are properly licensed, allowing the project to continue being distributed under the AGPL-3.0 license.
After submitting your pull request, the CLA bot will guide you through the signing process. To sign the CLA, simply add a comment in your PR stating:
```
I have read the CLA Document and I sign the CLA
```
### Google-Style Docstrings
When adding new functions or classes, please include [Google-style docstrings](https://google.github.io/styleguide/pyguide.html). These docstrings provide clear, standardized documentation that helps other developers understand and maintain your code.
#### Example
This example illustrates a Google-style docstring. Ensure that both input and output `types` are always enclosed in parentheses, e.g., `(bool)`.
```python
def example_function(arg1, arg2=4):
"""
Example function demonstrating Google-style docstrings.
Args:
arg1 (int): The first argument.
arg2 (int): The second argument, with a default value of 4.
Returns:
(bool): True if successful, False otherwise.
Examples:
>>> result = example_function(1, 2) # returns False
"""
if arg1 == arg2:
return True
return False
```
#### Example with type hints
This example includes both a Google-style docstring and type hints for arguments and returns, though using either independently is also acceptable.
```python
def example_function(arg1: int, arg2: int = 4) -> bool:
"""
Example function demonstrating Google-style docstrings.
Args:
arg1: The first argument.
arg2: The second argument, with a default value of 4.
Returns:
True if successful, False otherwise.
Examples:
>>> result = example_function(1, 2) # returns False
"""
if arg1 == arg2:
return True
return False
```
#### Example Single-line
For smaller or simpler functions, a single-line docstring may be sufficient. The docstring must use three double-quotes, be a complete sentence, start with a capital letter, and end with a period.
```python
def example_small_function(arg1: int, arg2: int = 4) -> bool:
"""Example function with a single-line docstring."""
return arg1 == arg2
```
### GitHub Actions CI Tests
All pull requests must pass the GitHub Actions [Continuous Integration](https://docs.ultralytics.com/help/CI/) (CI) tests before they can be merged. These tests include linting, unit tests, and other checks to ensure that your changes meet the project's quality standards. Review the CI output and address any issues that arise.
## Reporting Bugs
We highly value bug reports as they help us maintain the quality of our projects. When reporting a bug, please provide a [Minimum Reproducible Example](https://docs.ultralytics.com/help/minimum-reproducible-example/)—a simple, clear code example that consistently reproduces the issue. This allows us to quickly identify and resolve the problem.
## License
Ultralytics uses the [GNU Affero General Public License v3.0 (AGPL-3.0)](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) for its repositories. This license promotes openness, transparency, and collaborative improvement in software development. It ensures that all users have the freedom to use, modify, and share the software, fostering a strong community of collaboration and innovation.
We encourage all contributors to familiarize themselves with the terms of the AGPL-3.0 license to contribute effectively and ethically to the Ultralytics open-source community.
## Conclusion
Thank you for your interest in contributing to [Ultralytics](https://www.ultralytics.com/) [open-source](https://github.com/ultralytics) YOLO projects. Your participation is essential in shaping the future of our software and building a vibrant community of innovation and collaboration. Whether you're enhancing code, reporting bugs, or suggesting new features, your contributions are invaluable.
We're excited to see your ideas come to life and appreciate your commitment to advancing object detection technology. Together, let's continue to grow and innovate in this exciting open-source journey. Happy coding! 🚀🌟
## FAQ
### Why should I contribute to Ultralytics YOLO open-source repositories?
Contributing to Ultralytics YOLO open-source repositories improves the software, making it more robust and feature-rich for the entire community. Contributions can include code enhancements, bug fixes, documentation improvements, and new feature implementations. Additionally, contributing allows you to collaborate with other skilled developers and experts in the field, enhancing your own skills and reputation. For details on how to get started, refer to the [Contributing via Pull Requests](#contributing-via-pull-requests) section.
### How do I sign the Contributor License Agreement (CLA) for Ultralytics YOLO?
To sign the Contributor License Agreement (CLA), follow the instructions provided by the CLA bot after submitting your pull request. This process ensures that your contributions are properly licensed under the AGPL-3.0 license, maintaining the legal integrity of the open-source project. Add a comment in your pull request stating:
```
I have read the CLA Document and I sign the CLA.
```
For more information, see the [CLA Signing](#cla-signing) section.
### What are Google-style docstrings, and why are they required for Ultralytics YOLO contributions?
Google-style docstrings provide clear, concise documentation for functions and classes, improving code readability and maintainability. These docstrings outline the function's purpose, arguments, and return values with specific formatting rules. When contributing to Ultralytics YOLO, following Google-style docstrings ensures that your additions are well-documented and easily understood. For examples and guidelines, visit the [Google-Style Docstrings](#google-style-docstrings) section.
### How can I ensure my changes pass the GitHub Actions CI tests?
Before your pull request can be merged, it must pass all GitHub Actions Continuous Integration (CI) tests. These tests include linting, unit tests, and other checks to ensure the code meets
the project's quality standards. Review the CI output and fix any issues. For detailed information on the CI process and troubleshooting tips, see the [GitHub Actions CI Tests](#github-actions-ci-tests) section.
### How do I report a bug in Ultralytics YOLO repositories?
To report a bug, provide a clear and concise [Minimum Reproducible Example](https://docs.ultralytics.com/help/minimum-reproducible-example/) along with your bug report. This helps developers quickly identify and fix the issue. Ensure your example is minimal yet sufficient to replicate the problem. For more detailed steps on reporting bugs, refer to the [Reporting Bugs](#reporting-bugs) section.

View File

@ -0,0 +1,15 @@
# 导入ultralytics的YOLO库
from ultralytics import YOLO
# 加载模型
model = YOLO(r'D:\ultralytics-main\ultralytics-main\runs\detect\train5\weights\best.pt') # 加载自定义的训练模型
# 如果当前脚本作为主程序运行
if __name__ == '__main__':
model.export(format='onnx')

661
ultralytics-main/LICENSE Normal file
View File

@ -0,0 +1,661 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

283
ultralytics-main/README.md Normal file
View File

@ -0,0 +1,283 @@
<div align="center">
<p>
<a href="https://www.ultralytics.com/events/yolovision" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png" alt="YOLO Vision banner"></a>
</p>
[中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar) <br>
<div>
<a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg" alt="Ultralytics CI"></a>
<a href="https://pepy.tech/projects/ultralytics"><img src="https://static.pepy.tech/badge/ultralytics" alt="Ultralytics Downloads"></a>
<a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="Ultralytics YOLO Citation"></a>
<a href="https://discord.com/invite/ultralytics"><img alt="Ultralytics Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a>
<a href="https://community.ultralytics.com/"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a>
<a href="https://reddit.com/r/ultralytics"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
<br>
<a href="https://console.paperspace.com/github/ultralytics/ultralytics"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run Ultralytics on Gradient"></a>
<a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open Ultralytics In Colab"></a>
<a href="https://www.kaggle.com/models/ultralytics/yolo11"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open Ultralytics In Kaggle"></a>
<a href="https://mybinder.org/v2/gh/ultralytics/ultralytics/HEAD?labpath=examples%2Ftutorial.ipynb"><img src="https://mybinder.org/badge_logo.svg" alt="Open Ultralytics In Binder"></a>
</div>
<br>
[Ultralytics](https://www.ultralytics.com/) [YOLO11](https://github.com/ultralytics/ultralytics) is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLO11 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, image classification and pose estimation tasks.
We hope that the resources here will help you get the most out of YOLO. Please browse the Ultralytics <a href="https://docs.ultralytics.com/">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/ultralytics/issues/new/choose">GitHub</a> for support, questions, or discussions, become a member of the Ultralytics <a href="https://discord.com/invite/ultralytics">Discord</a>, <a href="https://reddit.com/r/ultralytics">Reddit</a> and <a href="https://community.ultralytics.com/">Forums</a>!
To request an Enterprise License please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license).
<a href="https://docs.ultralytics.com/models/yolo11/" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/refs/heads/main/yolo/performance-comparison.png" alt="YOLO11 performance plots">
</a>
<div align="center">
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="Ultralytics LinkedIn"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="Ultralytics Twitter"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://youtube.com/ultralytics?sub_confirmation=1"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="Ultralytics YouTube"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="Ultralytics TikTok"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="2%" alt="Ultralytics BiliBili"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://discord.com/invite/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="2%" alt="Ultralytics Discord"></a>
</div>
</div>
## <div align="center">Documentation</div>
See below for a quickstart install and usage examples, and see our [Docs](https://docs.ultralytics.com/) for full documentation on training, validation, prediction and deployment.
<details open>
<summary>Install</summary>
Pip install the Ultralytics package including all [requirements](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) in a [**Python>=3.8**](https://www.python.org/) environment with [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/).
[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Ultralytics Downloads](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)
```bash
pip install ultralytics
```
For alternative installation methods including [Conda](https://anaconda.org/conda-forge/ultralytics), [Docker](https://hub.docker.com/r/ultralytics/ultralytics), and Git, please refer to the [Quickstart Guide](https://docs.ultralytics.com/quickstart/).
[![Conda Version](https://img.shields.io/conda/vn/conda-forge/ultralytics?logo=condaforge)](https://anaconda.org/conda-forge/ultralytics) [![Docker Image Version](https://img.shields.io/docker/v/ultralytics/ultralytics?sort=semver&logo=docker)](https://hub.docker.com/r/ultralytics/ultralytics) [![Ultralytics Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/ultralytics?logo=docker)](https://hub.docker.com/r/ultralytics/ultralytics)
</details>
<details open>
<summary>Usage</summary>
### CLI
YOLO may be used directly in the Command Line Interface (CLI) with a `yolo` command:
```bash
yolo predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg'
```
`yolo` can be used for a variety of tasks and modes and accepts additional arguments, e.g. `imgsz=640`. See the YOLO [CLI Docs](https://docs.ultralytics.com/usage/cli/) for examples.
### Python
YOLO may also be used directly in a Python environment, and accepts the same [arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above:
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt")
# Train the model
train_results = model.train(
data="coco8.yaml", # path to dataset YAML
epochs=100, # number of training epochs
imgsz=640, # training image size
device="cpu", # device to run on, i.e. device=0 or device=0,1,2,3 or device=cpu
)
# Evaluate model performance on the validation set
metrics = model.val()
# Perform object detection on an image
results = model("path/to/image.jpg")
results[0].show()
# Export the model to ONNX format
path = model.export(format="onnx") # return path to exported model
```
See YOLO [Python Docs](https://docs.ultralytics.com/usage/python/) for more examples.
</details>
## <div align="center">Models</div>
YOLO11 [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/) and [Pose](https://docs.ultralytics.com/tasks/pose/) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset are available here, as well as YOLO11 [Classify](https://docs.ultralytics.com/tasks/classify/) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) dataset. [Track](https://docs.ultralytics.com/modes/track/) mode is available for all Detect, Segment and Pose models. All [Models](https://docs.ultralytics.com/models/) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
<a href="https://docs.ultralytics.com/tasks/" target="_blank">
<img width="100%" src="https://github.com/ultralytics/docs/releases/download/0/ultralytics-yolov8-tasks-banner.avif" alt="Ultralytics YOLO supported tasks">
</a>
<br>
<br>
<details open><summary>Detection (COCO)</summary>
See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examples with these models trained on [COCO](https://docs.ultralytics.com/datasets/detect/coco/), which include 80 pre-trained classes.
| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
| [YOLO11n](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt) | 640 | 39.5 | 56.1 ± 0.8 | 1.5 ± 0.0 | 2.6 | 6.5 |
| [YOLO11s](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt) | 640 | 47.0 | 90.0 ± 1.2 | 2.5 ± 0.0 | 9.4 | 21.5 |
| [YOLO11m](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt) | 640 | 51.5 | 183.2 ± 2.0 | 4.7 ± 0.1 | 20.1 | 68.0 |
| [YOLO11l](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l.pt) | 640 | 53.4 | 238.6 ± 1.4 | 6.2 ± 0.1 | 25.3 | 86.9 |
| [YOLO11x](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt) | 640 | 54.7 | 462.8 ± 6.7 | 11.3 ± 0.2 | 56.9 | 194.9 |
- **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val detect data=coco.yaml device=0`
- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val detect data=coco.yaml batch=1 device=0|cpu`
</details>
<details><summary>Segmentation (COCO)</summary>
See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage examples with these models trained on [COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/), which include 80 pre-trained classes.
| Model | size<br><sup>(pixels) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
| [YOLO11n-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt) | 640 | 38.9 | 32.0 | 65.9 ± 1.1 | 1.8 ± 0.0 | 2.9 | 10.4 |
| [YOLO11s-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-seg.pt) | 640 | 46.6 | 37.8 | 117.6 ± 4.9 | 2.9 ± 0.0 | 10.1 | 35.5 |
| [YOLO11m-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-seg.pt) | 640 | 51.5 | 41.5 | 281.6 ± 1.2 | 6.3 ± 0.1 | 22.4 | 123.3 |
| [YOLO11l-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-seg.pt) | 640 | 53.4 | 42.9 | 344.2 ± 3.2 | 7.8 ± 0.2 | 27.6 | 142.2 |
| [YOLO11x-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-seg.pt) | 640 | 54.7 | 43.8 | 664.5 ± 3.2 | 15.8 ± 0.7 | 62.1 | 319.0 |
- **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val segment data=coco.yaml device=0`
- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val segment data=coco.yaml batch=1 device=0|cpu`
</details>
<details><summary>Classification (ImageNet)</summary>
See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for usage examples with these models trained on [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/), which include 1000 pretrained classes.
| Model | size<br><sup>(pixels) | acc<br><sup>top1 | acc<br><sup>top5 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) at 224 |
| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ |
| [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.0 ± 0.3 | 1.1 ± 0.0 | 1.6 | 0.5 |
| [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.9 ± 0.2 | 1.3 ± 0.0 | 5.5 | 1.6 |
| [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.2 ± 0.4 | 2.0 ± 0.0 | 10.4 | 5.0 |
| [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.2 ± 0.3 | 2.8 ± 0.0 | 12.9 | 6.2 |
| [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.4 ± 0.9 | 3.8 ± 0.0 | 28.4 | 13.7 |
- **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set. <br>Reproduce by `yolo val classify data=path/to/ImageNet device=0`
- **Speed** averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu`
</details>
<details><summary>Pose (COCO)</summary>
See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for usage examples with these models trained on [COCO-Pose](https://docs.ultralytics.com/datasets/pose/coco/), which include 1 pre-trained class, person.
| Model | size<br><sup>(pixels) | mAP<sup>pose<br>50-95 | mAP<sup>pose<br>50 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| ---------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
| [YOLO11n-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-pose.pt) | 640 | 50.0 | 81.0 | 52.4 ± 0.5 | 1.7 ± 0.0 | 2.9 | 7.6 |
| [YOLO11s-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-pose.pt) | 640 | 58.9 | 86.3 | 90.5 ± 0.6 | 2.6 ± 0.0 | 9.9 | 23.2 |
| [YOLO11m-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-pose.pt) | 640 | 64.9 | 89.4 | 187.3 ± 0.8 | 4.9 ± 0.1 | 20.9 | 71.7 |
| [YOLO11l-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-pose.pt) | 640 | 66.1 | 89.9 | 247.7 ± 1.1 | 6.4 ± 0.1 | 26.2 | 90.7 |
| [YOLO11x-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-pose.pt) | 640 | 69.5 | 91.1 | 488.0 ± 13.9 | 12.1 ± 0.2 | 58.8 | 203.3 |
- **mAP<sup>val</sup>** values are for single-model single-scale on [COCO Keypoints val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val pose data=coco-pose.yaml device=0`
- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu`
</details>
<details><summary>OBB (DOTAv1)</summary>
See [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for usage examples with these models trained on [DOTAv1](https://docs.ultralytics.com/datasets/obb/dota-v2/#dota-v10/), which include 15 pre-trained classes.
| Model | size<br><sup>(pixels) | mAP<sup>test<br>50 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| -------------------------------------------------------------------------------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
| [YOLO11n-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt) | 1024 | 78.4 | 117.6 ± 0.8 | 4.4 ± 0.0 | 2.7 | 17.2 |
| [YOLO11s-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-obb.pt) | 1024 | 79.5 | 219.4 ± 4.0 | 5.1 ± 0.0 | 9.7 | 57.5 |
| [YOLO11m-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-obb.pt) | 1024 | 80.9 | 562.8 ± 2.9 | 10.1 ± 0.4 | 20.9 | 183.5 |
| [YOLO11l-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-obb.pt) | 1024 | 81.0 | 712.5 ± 5.0 | 13.5 ± 0.6 | 26.2 | 232.0 |
| [YOLO11x-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-obb.pt) | 1024 | 81.3 | 1408.6 ± 7.7 | 28.6 ± 1.0 | 58.8 | 520.2 |
- **mAP<sup>test</sup>** values are for single-model multiscale on [DOTAv1](https://captain-whu.github.io/DOTA/index.html) dataset. <br>Reproduce by `yolo val obb data=DOTAv1.yaml device=0 split=test` and submit merged results to [DOTA evaluation](https://captain-whu.github.io/DOTA/evaluation.html).
- **Speed** averaged over DOTAv1 val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val obb data=DOTAv1.yaml batch=1 device=0|cpu`
</details>
## <div align="center">Integrations</div>
Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with [W&B](https://docs.wandb.ai/guides/integrations/ultralytics/), [Comet](https://bit.ly/yolov8-readme-comet), [Roboflow](https://roboflow.com/?ref=ultralytics) and [OpenVINO](https://docs.ultralytics.com/integrations/openvino/), can optimize your AI workflow.
<a href="https://www.ultralytics.com/hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png" alt="Ultralytics active learning integrations">
</a>
<br>
<br>
<div align="center">
<a href="https://www.ultralytics.com/hub">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-ultralytics-hub.png" width="10%" alt="Ultralytics HUB logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://docs.wandb.ai/guides/integrations/ultralytics/">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-wb.png" width="10%" alt="ClearML logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://bit.ly/yolov8-readme-comet">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png" width="10%" alt="Comet ML logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://bit.ly/yolov5-neuralmagic">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" alt="NeuralMagic logo"></a>
</div>
| Ultralytics HUB 🚀 | W&B | Comet ⭐ NEW | Neural Magic |
| :--------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: |
| Streamline YOLO workflows: Label, train, and deploy effortlessly with [Ultralytics HUB](https://www.ultralytics.com/hub). Try now! | Track experiments, hyperparameters, and results with [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLO11 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) |
## <div align="center">Ultralytics HUB</div>
Experience seamless AI with [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLO11 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** now!
<a href="https://www.ultralytics.com/hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png" alt="Ultralytics HUB preview image"></a>
## <div align="center">Contribute</div>
We love your input! Ultralytics YOLO would not be possible without help from our community. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experience. Thank you 🙏 to all our contributors!
<!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->
<a href="https://github.com/ultralytics/ultralytics/graphs/contributors">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/image-contributors.png" alt="Ultralytics open-source contributors"></a>
## <div align="center">License</div>
Ultralytics offers two licensing options to accommodate diverse use cases:
- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/license) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for more details.
- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://www.ultralytics.com/license).
## <div align="center">Contact</div>
For Ultralytics bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues). Become a member of the Ultralytics [Discord](https://discord.com/invite/ultralytics), [Reddit](https://www.reddit.com/r/ultralytics/), or [Forums](https://community.ultralytics.com/) for asking questions, sharing projects, learning discussions, or for help with all things Ultralytics!
<br>
<div align="center">
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="Ultralytics GitHub"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="Ultralytics LinkedIn"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="Ultralytics Twitter"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://youtube.com/ultralytics?sub_confirmation=1"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="Ultralytics YouTube"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="Ultralytics TikTok"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="3%" alt="Ultralytics BiliBili"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://discord.com/invite/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
</div>

View File

@ -0,0 +1,283 @@
<div align="center">
<p>
<a href="https://www.ultralytics.com/events/yolovision" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png" alt="YOLO Vision banner"></a>
</p>
[中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar) <br>
<div>
<a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg" alt="Ultralytics CI"></a>
<a href="https://pepy.tech/projects/ultralytics"><img src="https://static.pepy.tech/badge/ultralytics" alt="Ultralytics Downloads"></a>
<a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="Ultralytics YOLO Citation"></a>
<a href="https://discord.com/invite/ultralytics"><img alt="Ultralytics Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a>
<a href="https://community.ultralytics.com/"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a>
<a href="https://reddit.com/r/ultralytics"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
<br>
<a href="https://console.paperspace.com/github/ultralytics/ultralytics"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run Ultralytics on Gradient"></a>
<a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open Ultralytics In Colab"></a>
<a href="https://www.kaggle.com/models/ultralytics/yolo11"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open Ultralytics In Kaggle"></a>
<a href="https://mybinder.org/v2/gh/ultralytics/ultralytics/HEAD?labpath=examples%2Ftutorial.ipynb"><img src="https://mybinder.org/badge_logo.svg" alt="Open Ultralytics In Binder"></a>
</div>
<br>
[Ultralytics](https://www.ultralytics.com/) [YOLO11](https://github.com/ultralytics/ultralytics) 是一个尖端的、最先进SOTA的模型基于之前 YOLO 版本的成功并引入了新功能和改进以进一步提升性能和灵活性。YOLO11 被设计得快速、准确且易于使用,是进行广泛对象检测和跟踪、实例分割、图像分类和姿态估计任务的理想选择。
我们希望这里的资源能帮助你充分利用 YOLO。请浏览 Ultralytics <a href="https://docs.ultralytics.com/">文档</a> 以获取详细信息,在 <a href="https://github.com/ultralytics/ultralytics/issues/new/choose">GitHub</a> 上提出问题或讨论,成为 Ultralytics <a href="https://discord.com/invite/ultralytics">Discord</a><a href="https://reddit.com/r/ultralytics">Reddit</a><a href="https://community.ultralytics.com/">论坛</a> 的成员!
想申请企业许可证,请完成 [Ultralytics Licensing](https://www.ultralytics.com/license) 上的表单。
<a href="https://docs.ultralytics.com/models/yolo11/" target="_blank">
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/refs/heads/main/yolo/performance-comparison.png" alt="YOLO11 performance plots">
</a>
<div align="center">
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="Ultralytics LinkedIn"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="Ultralytics Twitter"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://youtube.com/ultralytics?sub_confirmation=1"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="Ultralytics YouTube"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="Ultralytics TikTok"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="2%" alt="Ultralytics BiliBili"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="space">
<a href="https://discord.com/invite/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="2%" alt="Ultralytics Discord"></a>
</div>
</div>
## <div align="center">文档</div>
请参阅下方的快速开始安装和使用示例,并查看我们的 [文档](https://docs.ultralytics.com/) 以获取有关训练、验证、预测和部署的完整文档。
<details open>
<summary>安装</summary>
在 [**Python>=3.8**](https://www.python.org/) 环境中使用 [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) 通过 pip 安装包含所有[依赖项](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) 的 ultralytics 包。
[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Ultralytics Downloads](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)
```bash
pip install ultralytics
```
有关其他安装方法,包括 [Conda](https://anaconda.org/conda-forge/ultralytics)、[Docker](https://hub.docker.com/r/ultralytics/ultralytics) 和 Git请参阅 [快速开始指南](https://docs.ultralytics.com/quickstart/)。
[![Conda Version](https://img.shields.io/conda/vn/conda-forge/ultralytics?logo=condaforge)](https://anaconda.org/conda-forge/ultralytics) [![Docker Image Version](https://img.shields.io/docker/v/ultralytics/ultralytics?sort=semver&logo=docker)](https://hub.docker.com/r/ultralytics/ultralytics) [![Ultralytics Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/ultralytics?logo=docker)](https://hub.docker.com/r/ultralytics/ultralytics)
</details>
<details open>
<summary>使用</summary>
### CLI
YOLO 可以直接在命令行接口CLI中使用 `yolo` 命令:
```bash
yolo predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg'
```
`yolo` 可以用于各种任务和模式,并接受额外参数,例如 `imgsz=640`。请参阅 YOLO [CLI 文档](https://docs.ultralytics.com/usage/cli/) 以获取示例。
### Python
YOLO 也可以直接在 Python 环境中使用,并接受与上述 CLI 示例中相同的[参数](https://docs.ultralytics.com/usage/cfg/)
```python
from ultralytics import YOLO
# 加载模型
model = YOLO("yolo11n.pt")
# 训练模型
train_results = model.train(
data="coco8.yaml", # 数据集 YAML 路径
epochs=100, # 训练轮次
imgsz=640, # 训练图像尺寸
device="cpu", # 运行设备,例如 device=0 或 device=0,1,2,3 或 device=cpu
)
# 评估模型在验证集上的性能
metrics = model.val()
# 在图像上执行对象检测
results = model("path/to/image.jpg")
results[0].show()
# 将模型导出为 ONNX 格式
path = model.export(format="onnx") # 返回导出模型的路径
```
请参阅 YOLO [Python 文档](https://docs.ultralytics.com/usage/python/) 以获取更多示例。
</details>
## <div align="center">模型</div>
YOLO11 [检测](https://docs.ultralytics.com/tasks/detect/)、[分割](https://docs.ultralytics.com/tasks/segment/) 和 [姿态](https://docs.ultralytics.com/tasks/pose/) 模型在 [COCO](https://docs.ultralytics.com/datasets/detect/coco/) 数据集上进行预训练,这些模型可在此处获得,此外还有在 [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) 数据集上预训练的 YOLO11 [分类](https://docs.ultralytics.com/tasks/classify/) 模型。所有检测、分割和姿态模型均支持 [跟踪](https://docs.ultralytics.com/modes/track/) 模式。所有[模型](https://docs.ultralytics.com/models/)在首次使用时自动从最新的 Ultralytics [发布](https://github.com/ultralytics/assets/releases)下载。
<a href="https://docs.ultralytics.com/tasks/" target="_blank">
<img width="100%" src="https://github.com/ultralytics/docs/releases/download/0/ultralytics-yolov8-tasks-banner.avif" alt="Ultralytics YOLO supported tasks">
</a>
<br>
<br>
<details open><summary>检测 (COCO)</summary>
请参阅 [检测文档](https://docs.ultralytics.com/tasks/detect/) 以获取使用这些在 [COCO](https://docs.ultralytics.com/datasets/detect/coco/) 数据集上训练的模型的示例,其中包含 80 个预训练类别。
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>val<br>50-95 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>T4 TensorRT10<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) |
| ------------------------------------------------------------------------------------ | ------------------- | -------------------- | ----------------------------- | ---------------------------------- | ---------------- | ----------------- |
| [YOLO11n](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt) | 640 | 39.5 | 56.1 ± 0.8 | 1.5 ± 0.0 | 2.6 | 6.5 |
| [YOLO11s](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt) | 640 | 47.0 | 90.0 ± 1.2 | 2.5 ± 0.0 | 9.4 | 21.5 |
| [YOLO11m](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt) | 640 | 51.5 | 183.2 ± 2.0 | 4.7 ± 0.1 | 20.1 | 68.0 |
| [YOLO11l](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l.pt) | 640 | 53.4 | 238.6 ± 1.4 | 6.2 ± 0.1 | 25.3 | 86.9 |
| [YOLO11x](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt) | 640 | 54.7 | 462.8 ± 6.7 | 11.3 ± 0.2 | 56.9 | 194.9 |
- **mAP<sup>val</sup>** 值针对单模型单尺度在 [COCO val2017](https://cocodataset.org/) 数据集上进行。 <br>复制命令 `yolo val detect data=coco.yaml device=0`
- **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 COCO 验证图像上平均。 <br>复制命令 `yolo val detect data=coco.yaml batch=1 device=0|cpu`
</details>
<details><summary>分割 (COCO)</summary>
请参阅 [分割文档](https://docs.ultralytics.com/tasks/segment/) 以获取使用这些在 [COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/) 数据集上训练的模型的示例,其中包含 80 个预训练类别。
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>T4 TensorRT10<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) |
| -------------------------------------------------------------------------------------------- | ------------------- | -------------------- | --------------------- | ----------------------------- | ---------------------------------- | ---------------- | ----------------- |
| [YOLO11n-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt) | 640 | 38.9 | 32.0 | 65.9 ± 1.1 | 1.8 ± 0.0 | 2.9 | 10.4 |
| [YOLO11s-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-seg.pt) | 640 | 46.6 | 37.8 | 117.6 ± 4.9 | 2.9 ± 0.0 | 10.1 | 35.5 |
| [YOLO11m-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-seg.pt) | 640 | 51.5 | 41.5 | 281.6 ± 1.2 | 6.3 ± 0.1 | 22.4 | 123.3 |
| [YOLO11l-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-seg.pt) | 640 | 53.4 | 42.9 | 344.2 ± 3.2 | 7.8 ± 0.2 | 27.6 | 142.2 |
| [YOLO11x-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-seg.pt) | 640 | 54.7 | 43.8 | 664.5 ± 3.2 | 15.8 ± 0.7 | 62.1 | 319.0 |
- **mAP<sup>val</sup>** 值针对单模型单尺度在 [COCO val2017](https://cocodataset.org/) 数据集上进行。 <br>复制命令 `yolo val segment data=coco.yaml device=0`
- **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 COCO 验证图像上平均。 <br>复制命令 `yolo val segment data=coco.yaml batch=1 device=0|cpu`
</details>
<details><summary>分类 (ImageNet)</summary>
请参阅 [分类文档](https://docs.ultralytics.com/tasks/classify/) 以获取使用这些在 [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) 数据集上训练的模型的示例,其中包含 1000 个预训练类别。
| 模型 | 尺寸<br><sup>(像素) | acc<br><sup>top1 | acc<br><sup>top5 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>T4 TensorRT10<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) at 224 |
| -------------------------------------------------------------------------------------------- | ------------------- | ---------------- | ---------------- | ----------------------------- | ---------------------------------- | ---------------- | ------------------------ |
| [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.0 ± 0.3 | 1.1 ± 0.0 | 1.6 | 0.5 |
| [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.9 ± 0.2 | 1.3 ± 0.0 | 5.5 | 1.6 |
| [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.2 ± 0.4 | 2.0 ± 0.0 | 10.4 | 5.0 |
| [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.2 ± 0.3 | 2.8 ± 0.0 | 12.9 | 6.2 |
| [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.4 ± 0.9 | 3.8 ± 0.0 | 28.4 | 13.7 |
- **acc** 值为在 [ImageNet](https://www.image-net.org/) 数据集验证集上的模型准确率。 <br>复制命令 `yolo val classify data=path/to/ImageNet device=0`
- **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 ImageNet 验证图像上平均。 <br>复制命令 `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu`
</details>
<details><summary>姿态 (COCO)</summary>
请参阅 [姿态文档](https://docs.ultralytics.com/tasks/pose/) 以获取使用这些在 [COCO-Pose](https://docs.ultralytics.com/datasets/pose/coco/) 数据集上训练的模型的示例,其中包含 1 个预训练类别(人)。
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>pose<br>50-95 | mAP<sup>pose<br>50 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>T4 TensorRT10<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) |
| -------------------------------------------------------------------------------------------- | ------------------- | --------------------- | ------------------ | ----------------------------- | ---------------------------------- | ---------------- | ----------------- |
| [YOLO11n-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt) | 1024 | 78.4 | 117.6 ± 0.8 | 4.4 ± 0.0 | 2.7 | 17.2 |
| [YOLO11s-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-obb.pt) | 1024 | 79.5 | 219.4 ± 4.0 | 5.1 ± 0.0 | 9.7 | 57.5 |
| [YOLO11m-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-obb.pt) | 1024 | 80.9 | 562.8 ± 2.9 | 10.1 ± 0.4 | 20.9 | 183.5 |
| [YOLO11l-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-obb.pt) | 1024 | 81.0 | 712.5 ± 5.0 | 13.5 ± 0.6 | 26.2 | 232.0 |
| [YOLO11x-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-obb.pt) | 1024 | 81.3 | 1408.6 ± 7.7 | 28.6 ± 1.0 | 58.8 | 520.2 |
- **mAP<sup>val</sup>** 值针对单模型单尺度在 [COCO Keypoints val2017](https://cocodataset.org/) 数据集上进行。 <br>复制命令 `yolo val pose data=coco-pose.yaml device=0`
- **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 COCO 验证图像上平均。 <br>复制命令 `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu`
</details>
<details><summary>OBB (DOTAv1)</summary>
请参阅 [OBB 文档](https://docs.ultralytics.com/tasks/obb/) 以获取使用这些在 [DOTAv1](https://docs.ultralytics.com/datasets/obb/dota-v2/#dota-v10/) 数据集上训练的模型的示例,其中包含 15 个预训练类别。
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>test<br>50 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>T4 TensorRT10<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) |
| -------------------------------------------------------------------------------------------- | ------------------- | ------------------ | ----------------------------- | ---------------------------------- | ---------------- | ----------------- |
| [YOLO11n-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt) | 1024 | 78.4 | 117.56 ± 0.80 | 4.43 ± 0.01 | 2.7 | 17.2 |
| [YOLO11s-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-obb.pt) | 1024 | 79.5 | 219.41 ± 4.00 | 5.13 ± 0.02 | 9.7 | 57.5 |
| [YOLO11m-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-obb.pt) | 1024 | 80.9 | 562.81 ± 2.87 | 10.07 ± 0.38 | 20.9 | 183.5 |
| [YOLO11l-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-obb.pt) | 1024 | 81.0 | 712.49 ± 4.98 | 13.46 ± 0.55 | 26.2 | 232.0 |
| [YOLO11x-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-obb.pt) | 1024 | 81.3 | 1408.63 ± 7.67 | 28.59 ± 0.96 | 58.8 | 520.2 |
- **mAP<sup>test</sup>** 值针对单模型多尺度在 [DOTAv1](https://captain-whu.github.io/DOTA/index.html) 数据集上进行。 <br>复制命令 `yolo val obb data=DOTAv1.yaml device=0 split=test` 并提交合并结果到 [DOTA 评估](https://captain-whu.github.io/DOTA/evaluation.html)。
- **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 DOTAv1 验证图像上平均。 <br>复制命令 `yolo val obb data=DOTAv1.yaml batch=1 device=0|cpu`
</details>
## <div align="center">集成</div>
我们与领先的 AI 平台的关键集成扩展了 Ultralytics 产品的功能,提升了数据集标注、训练、可视化和模型管理等任务。探索 Ultralytics 如何通过与 [W&B](https://docs.wandb.ai/guides/integrations/ultralytics/)、[Comet](https://bit.ly/yolov8-readme-comet)、[Roboflow](https://roboflow.com/?ref=ultralytics) 和 [OpenVINO](https://docs.ultralytics.com/integrations/openvino/) 的合作,优化您的 AI 工作流程。
<a href="https://www.ultralytics.com/hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png" alt="Ultralytics active learning integrations">
</a>
<br>
<br>
<div align="center">
<a href="https://www.ultralytics.com/hub">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-ultralytics-hub.png" width="10%" alt="Ultralytics HUB logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://docs.wandb.ai/guides/integrations/ultralytics/">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-wb.png" width="10%" alt="W&B logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://bit.ly/yolov8-readme-comet">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png" width="10%" alt="Comet ML logo"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
<a href="https://bit.ly/yolov5-neuralmagic">
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" alt="NeuralMagic logo"></a>
</div>
| Ultralytics HUB 🚀 | W&B | Comet ⭐ 全新 | Neural Magic |
| :----------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------: |
| 简化 YOLO 工作流程:通过 [Ultralytics HUB](https://www.ultralytics.com/hub) 轻松标注、训练和部署。立即试用! | 使用 [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) 跟踪实验、超参数和结果 | 永久免费,[Comet](https://bit.ly/yolov5-readme-comet) 允许您保存 YOLO11 模型、恢复训练,并交互式地可视化和调试预测结果 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) 运行 YOLO11 推理,速度提升至 6 倍 |
## <div align="center">Ultralytics HUB</div>
体验无缝 AI 使用 [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐一个集数据可视化、YOLO11 🚀 模型训练和部署于一体的解决方案,无需编写代码。利用我们最先进的平台和用户友好的 [Ultralytics 应用](https://www.ultralytics.com/app-install),将图像转换为可操作见解,并轻松实现您的 AI 愿景。免费开始您的旅程!
<a href="https://www.ultralytics.com/hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png" alt="Ultralytics HUB preview image"></a>
## <div align="center">贡献</div>
我们欢迎您的意见没有社区的帮助Ultralytics YOLO 就不可能实现。请参阅我们的 [贡献指南](https://docs.ultralytics.com/help/contributing/) 开始,并填写我们的 [调查问卷](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们提供您体验的反馈。感谢所有贡献者 🙏!
<!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->
<a href="https://github.com/ultralytics/ultralytics/graphs/contributors">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/image-contributors.png" alt="Ultralytics open-source contributors"></a>
## <div align="center">许可</div>
Ultralytics 提供两种许可选项以适应各种用例:
- **AGPL-3.0 许可**:这是一个 [OSI 批准](https://opensource.org/license) 的开源许可,适合学生和爱好者,促进开放协作和知识共享。有关详细信息,请参阅 [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) 文件。
- **企业许可**:专为商业使用设计,此许可允许将 Ultralytics 软件和 AI 模型无缝集成到商业产品和服务中,无需满足 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品,请通过 [Ultralytics Licensing](https://www.ultralytics.com/license) 联系我们。
## <div align="center">联系</div>
如需 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/ultralytics/issues)。成为 Ultralytics [Discord](https://discord.com/invite/ultralytics)、[Reddit](https://www.reddit.com/r/ultralytics/) 或 [论坛](https://community.ultralytics.com/) 的成员,提出问题、分享项目、探讨学习讨论,或寻求所有 Ultralytics 相关的帮助!
<br>
<div align="center">
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="Ultralytics GitHub"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="Ultralytics LinkedIn"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="Ultralytics Twitter"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://youtube.com/ultralytics?sub_confirmation=1"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="Ultralytics YouTube"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="Ultralytics TikTok"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="3%" alt="Ultralytics BiliBili"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://discord.com/invite/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
</div>

View File

@ -0,0 +1,89 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:latest image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image is CUDA-optimized for YOLO11 single/multi-GPU training and inference
# Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch or nvcr.io/nvidia/pytorch:23.03-py3
FROM pytorch/pytorch:2.5.1-cuda12.4-cudnn9-runtime
# Set environment variables
# Avoid DDP error "MKL_THREADING_LAYER=INTEL is incompatible with libgomp.so.1 library" https://github.com/pytorch/pytorch/issues/37377
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_NO_CACHE_DIR=1 \
PIP_BREAK_SYSTEM_PACKAGES=1 \
MKL_THREADING_LAYER=GNU \
OMP_NUM_THREADS=1
# Downloads to user config dir
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install linux packages
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
# libsm6 required by libqxcb to create QT-based windows for visualization; set 'QT_DEBUG_PLUGINS=1' to test in docker
RUN apt-get update && \
apt-get install -y --no-install-recommends \
gcc git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 libsm6 \
&& rm -rf /var/lib/apt/lists/*
# Security updates
# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796
RUN apt upgrade --no-install-recommends -y openssl tar
# Create working directory
WORKDIR /ultralytics
# Copy contents and configure git
COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Install pip packages
RUN pip install uv
RUN uv pip install --system -e ".[export]" "albumentations>=1.4.6" comet pycocotools
# Run exports to AutoInstall packages
# Edge TPU export fails the first time so is run twice here
RUN yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32 || yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32
RUN yolo export model=tmp/yolo11n.pt format=ncnn imgsz=32
# Requires <= Python 3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991
RUN uv pip install --system "paddlepaddle>=2.6.0" x2paddle
# Remove extra build files
RUN rm -rf tmp /root/.config/Ultralytics/persistent_cache.json
# Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push
# t=ultralytics/ultralytics:latest && sudo docker build -f docker/Dockerfile -t $t . && sudo docker push $t
# Pull and Run with access to all GPUs
# t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
# Pull and Run with access to GPUs 2 and 3 (inside container CUDA devices will appear as 0 and 1)
# t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus '"device=2,3"' $t
# Pull and Run with local directory access
# t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/shared/datasets:/datasets $t
# Kill all
# sudo docker kill $(sudo docker ps -q)
# Kill all image-based
# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/ultralytics:latest)
# DockerHub tag update
# t=ultralytics/ultralytics:latest tnew=ultralytics/ultralytics:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew
# Clean up
# sudo docker system prune -a --volumes
# Update Ubuntu drivers
# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/
# DDP test
# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
# GCP VM from Image
# docker.io/ultralytics/ultralytics:latest

View File

@ -0,0 +1,58 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image is aarch64-compatible for Apple M1, M2, M3, Raspberry Pi and other ARM architectures
# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu with "FROM arm64v8/ubuntu:22.04" (deprecated)
# Start FROM Debian image for arm64v8 https://hub.docker.com/r/arm64v8/debian (new)
FROM arm64v8/debian:bookworm-slim
# Set environment variables
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_NO_CACHE_DIR=1 \
PIP_BREAK_SYSTEM_PACKAGES=1
# Downloads to user config dir
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install linux packages
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
# pkg-config and libhdf5-dev (not included) are needed to build 'h5py==3.11.0' aarch64 wheel required by 'tensorflow'
RUN apt-get update && \
apt-get install -y --no-install-recommends \
python3-pip git zip unzip wget curl htop gcc libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 \
&& rm -rf /var/lib/apt/lists/*
# Create working directory
WORKDIR /ultralytics
# Copy contents and configure git
COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Install pip packages
RUN pip install uv
RUN uv pip install --system -e ".[export]" --break-system-packages
# Creates a symbolic link to make 'python' point to 'python3'
RUN ln -sf /usr/bin/python3 /usr/bin/python
# Remove extra build files
RUN rm -rf /root/.config/Ultralytics/persistent_cache.json
# Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push
# t=ultralytics/ultralytics:latest-arm64 && sudo docker build --platform linux/arm64 -f docker/Dockerfile-arm64 -t $t . && sudo docker push $t
# Run
# t=ultralytics/ultralytics:latest-arm64 && sudo docker run -it --ipc=host $t
# Pull and Run
# t=ultralytics/ultralytics:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host $t
# Pull and Run with local volume mounted
# t=ultralytics/ultralytics:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/shared/datasets:/datasets $t

View File

@ -0,0 +1,50 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:latest-conda image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image is optimized for Ultralytics Anaconda (https://anaconda.org/conda-forge/ultralytics) installation and usage
# Start FROM miniconda3 image https://hub.docker.com/r/continuumio/miniconda3
FROM continuumio/miniconda3:latest
# Set environment variables
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_NO_CACHE_DIR=1 \
PIP_BREAK_SYSTEM_PACKAGES=1
# Downloads to user config dir
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install linux packages
RUN apt-get update && \
apt-get install -y --no-install-recommends \
libgl1 \
&& rm -rf /var/lib/apt/lists/*
# Copy contents
ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Install conda packages
# mkl required to fix 'OSError: libmkl_intel_lp64.so.2: cannot open shared object file: No such file or directory'
RUN conda config --set solver libmamba && \
conda install pytorch torchvision pytorch-cuda=12.1 -c pytorch -c nvidia && \
conda install -c conda-forge ultralytics mkl
# conda install -c pytorch -c nvidia -c conda-forge pytorch torchvision pytorch-cuda=12.1 ultralytics mkl
# Remove extra build files
RUN rm -rf /root/.config/Ultralytics/persistent_cache.json
# Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push
# t=ultralytics/ultralytics:latest-conda && sudo docker build -f docker/Dockerfile-cpu -t $t . && sudo docker push $t
# Run
# t=ultralytics/ultralytics:latest-conda && sudo docker run -it --ipc=host $t
# Pull and Run
# t=ultralytics/ultralytics:latest-conda && sudo docker pull $t && sudo docker run -it --ipc=host $t
# Pull and Run with local volume mounted
# t=ultralytics/ultralytics:latest-conda && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/shared/datasets:/datasets $t

View File

@ -0,0 +1,62 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLO11 deployments
# Use official Python base image for reproducibility (3.11.10 for export and 3.12.6 for inference)
FROM python:3.11.10-slim-bookworm
# Set environment variables
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_NO_CACHE_DIR=1 \
PIP_BREAK_SYSTEM_PACKAGES=1
# Downloads to user config dir
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install linux packages
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
RUN apt-get update && \
apt-get install -y --no-install-recommends \
python3-pip git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 \
&& rm -rf /var/lib/apt/lists/*
# Create working directory
WORKDIR /ultralytics
# Copy contents and configure git
COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Install pip packages
RUN pip install uv
RUN uv pip install --system -e ".[export]" --extra-index-url https://download.pytorch.org/whl/cpu --index-strategy unsafe-best-match
# Run exports to AutoInstall packages
RUN yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32
RUN yolo export model=tmp/yolo11n.pt format=ncnn imgsz=32
# Requires Python<=3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991
RUN uv pip install --system "paddlepaddle>=2.6.0" x2paddle
# Remove extra build files
RUN rm -rf tmp /root/.config/Ultralytics/persistent_cache.json
# Set default command to bash
CMD ["/bin/bash"]
# Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push
# t=ultralytics/ultralytics:latest-cpu && sudo docker build -f docker/Dockerfile-cpu -t $t . && sudo docker push $t
# Run
# t=ultralytics/ultralytics:latest-cpu && sudo docker run -it --ipc=host --name NAME $t
# Pull and Run
# t=ultralytics/ultralytics:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host --name NAME $t
# Pull and Run with local volume mounted
# t=ultralytics/ultralytics:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/shared/datasets:/datasets $t

View File

@ -0,0 +1,73 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:jetson-jetpack4 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Supports JetPack4.x for YOLO11 on Jetson Nano, TX2, Xavier NX, AGX Xavier
# Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-cuda
FROM nvcr.io/nvidia/l4t-cuda:10.2.460-runtime
# Set environment variables
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1
# Downloads to user config dir
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Add NVIDIA repositories for TensorRT dependencies
RUN wget -q -O - https://repo.download.nvidia.com/jetson/jetson-ota-public.asc | apt-key add - && \
echo "deb https://repo.download.nvidia.com/jetson/common r32.7 main" > /etc/apt/sources.list.d/nvidia-l4t-apt-source.list && \
echo "deb https://repo.download.nvidia.com/jetson/t194 r32.7 main" >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list
# Install dependencies
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git python3.8 python3.8-dev python3-pip python3-libnvinfer libopenmpi-dev libopenblas-base libomp-dev gcc \
&& rm -rf /var/lib/apt/lists/*
# Create symbolic links for python3.8 and pip3
RUN ln -sf /usr/bin/python3.8 /usr/bin/python3
RUN ln -s /usr/bin/pip3 /usr/bin/pip
# Create working directory
WORKDIR /ultralytics
# Copy contents and configure git
COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Download onnxruntime-gpu 1.8.0 and tensorrt 8.2.0.6
# Other versions can be seen in https://elinux.org/Jetson_Zoo and https://forums.developer.nvidia.com/t/pytorch-for-jetson/72048
ADD https://nvidia.box.com/shared/static/gjqofg7rkg97z3gc8jeyup6t8n9j8xjw.whl onnxruntime_gpu-1.8.0-cp38-cp38-linux_aarch64.whl
ADD https://forums.developer.nvidia.com/uploads/short-url/hASzFOm9YsJx6VVFrDW1g44CMmv.whl tensorrt-8.2.0.6-cp38-none-linux_aarch64.whl
# Replace pyproject.toml TF.js version with 'tensorflowjs>=3.9.0' for JetPack4 compatibility
RUN sed -i 's/^\( *"tensorflowjs\)>=.*\(".*\)/\1>=3.9.0\2/' pyproject.toml
# Install pip packages
RUN python3 -m pip install --upgrade pip
RUN python3 -m pip install uv
RUN uv pip install --system \
onnxruntime_gpu-1.8.0-cp38-cp38-linux_aarch64.whl \
tensorrt-8.2.0.6-cp38-none-linux_aarch64.whl \
https://github.com/ultralytics/assets/releases/download/v0.0.0/torch-1.11.0a0+gitbc2c6ed-cp38-cp38-linux_aarch64.whl \
https://github.com/ultralytics/assets/releases/download/v0.0.0/torchvision-0.12.0a0+9b5a3fe-cp38-cp38-linux_aarch64.whl
RUN uv pip install --system -e ".[export]"
# Remove extra build files
RUN rm -rf *.whl /root/.config/Ultralytics/persistent_cache.json
# Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push
# t=ultralytics/ultralytics:latest-jetson-jetpack4 && sudo docker build --platform linux/arm64 -f docker/Dockerfile-jetson-jetpack4 -t $t . && sudo docker push $t
# Run
# t=ultralytics/ultralytics:latest-jetson-jetpack4 && sudo docker run -it --ipc=host $t
# Pull and Run
# t=ultralytics/ultralytics:latest-jetson-jetpack4 && sudo docker pull $t && sudo docker run -it --ipc=host $t
# Pull and Run with NVIDIA runtime
# t=ultralytics/ultralytics:latest-jetson-jetpack4 && sudo docker pull $t && sudo docker run -it --ipc=host --runtime=nvidia $t

View File

@ -0,0 +1,59 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:jetson-jetson-jetpack5 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Supports JetPack5.1.2 for YOLO11 on Jetson Xavier NX, AGX Xavier, AGX Orin, Orin Nano and Orin NX
# Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-jetpack
FROM nvcr.io/nvidia/l4t-jetpack:r35.4.1
# Set environment variables
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_NO_CACHE_DIR=1 \
PIP_BREAK_SYSTEM_PACKAGES=1
# Downloads to user config dir
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install dependencies
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git python3-pip libopenmpi-dev libopenblas-base libomp-dev \
&& rm -rf /var/lib/apt/lists/*
# Create working directory
WORKDIR /ultralytics
# Copy contents and configure git
COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Replace pyproject.toml TF.js version with 'tensorflowjs>=3.9.0' for JetPack5 compatibility
RUN sed -i 's/^\( *"tensorflowjs\)>=.*\(".*\)/\1>=3.9.0\2/' pyproject.toml
# Pip install onnxruntime-gpu, torch, torchvision and ultralytics
RUN python3 -m pip install --upgrade pip uv
RUN uv pip install --system \
https://github.com/ultralytics/assets/releases/download/v0.0.0/onnxruntime_gpu-1.18.0-cp38-cp38-linux_aarch64.whl \
https://github.com/ultralytics/assets/releases/download/v0.0.0/torch-2.2.0-cp38-cp38-linux_aarch64.whl \
https://github.com/ultralytics/assets/releases/download/v0.0.0/torchvision-0.17.2+c1d70fe-cp38-cp38-linux_aarch64.whl
RUN uv pip install --system -e ".[export]"
# Remove extra build files
RUN rm -rf *.whl /root/.config/Ultralytics/persistent_cache.json
# Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push
# t=ultralytics/ultralytics:latest-jetson-jetpack5 && sudo docker build --platform linux/arm64 -f docker/Dockerfile-jetson-jetpack5 -t $t . && sudo docker push $t
# Run
# t=ultralytics/ultralytics:latest-jetson-jetpack5 && sudo docker run -it --ipc=host $t
# Pull and Run
# t=ultralytics/ultralytics:latest-jetson-jetpack5 && sudo docker pull $t && sudo docker run -it --ipc=host $t
# Pull and Run with NVIDIA runtime
# t=ultralytics/ultralytics:latest-jetson-jetpack5 && sudo docker pull $t && sudo docker run -it --ipc=host --runtime=nvidia $t

View File

@ -0,0 +1,58 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:jetson-jetpack6 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Supports JetPack6.1 for YOLO11 on Jetson AGX Orin, Orin NX and Orin Nano Series
# Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-jetpack
FROM nvcr.io/nvidia/l4t-jetpack:r36.4.0
# Set environment variables
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_NO_CACHE_DIR=1 \
PIP_BREAK_SYSTEM_PACKAGES=1
# Downloads to user config dir
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install dependencies
ADD https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb .
RUN dpkg -i cuda-keyring_1.1-1_all.deb && \
apt-get update && \
apt-get install -y --no-install-recommends \
git python3-pip libopenmpi-dev libopenblas-base libomp-dev libcusparselt0 libcusparselt-dev \
&& rm -rf /var/lib/apt/lists/*
# Create working directory
WORKDIR /ultralytics
# Copy contents and configure git
COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Pip install onnxruntime-gpu, torch, torchvision and ultralytics
RUN python3 -m pip install --upgrade pip uv
RUN uv pip install --system \
https://github.com/ultralytics/assets/releases/download/v0.0.0/onnxruntime_gpu-1.20.0-cp310-cp310-linux_aarch64.whl \
https://github.com/ultralytics/assets/releases/download/v0.0.0/torch-2.5.0a0+872d972e41.nv24.08-cp310-cp310-linux_aarch64.whl \
https://github.com/ultralytics/assets/releases/download/v0.0.0/torchvision-0.20.0a0+afc54f7-cp310-cp310-linux_aarch64.whl
RUN uv pip install --system -e ".[export]"
# Remove extra build files
RUN rm -rf *.whl /root/.config/Ultralytics/persistent_cache.json
# Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push
# t=ultralytics/ultralytics:latest-jetson-jetpack6 && sudo docker build --platform linux/arm64 -f docker/Dockerfile-jetson-jetpack6 -t $t . && sudo docker push $t
# Run
# t=ultralytics/ultralytics:latest-jetson-jetpack6 && sudo docker run -it --ipc=host $t
# Pull and Run
# t=ultralytics/ultralytics:latest-jetson-jetpack6 && sudo docker pull $t && sudo docker run -it --ipc=host $t
# Pull and Run with NVIDIA runtime
# t=ultralytics/ultralytics:latest-jetson-jetpack6 && sudo docker pull $t && sudo docker run -it --ipc=host --runtime=nvidia $t

View File

@ -0,0 +1,33 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:latest-jupyter image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image provides JupyterLab interface for interactive YOLO development and includes tutorial notebooks
# Start from Python-based Ultralytics image for full Python environment
FROM ultralytics/ultralytics:latest-python
# Install JupyterLab for interactive development
RUN uv pip install --system jupyterlab
# Create persistent data directory structure
RUN mkdir /data
# Configure YOLO directories
RUN mkdir /data/{datasets,weights,runs} && \
yolo settings datasets_dir="/data/datasets" weights_dir="/data/weights" runs_dir="/data/runs"
# Start JupyterLab with tutorial notebook
ENTRYPOINT ["/usr/local/bin/jupyter", "lab", "--allow-root", "--ip=*", "/ultralytics/examples/tutorial.ipynb"]
# Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push
# t=ultralytics/ultralytics:latest-jupyter && sudo docker build -f docker/Dockerfile-jupyter -t $t . && sudo docker push $t
# Run
# t=ultralytics/ultralytics:latest-jupyter && sudo docker run -it --ipc=host -p 8888:8888 $t
# Pull and Run
# t=ultralytics/ultralytics:latest-jupyter && sudo docker pull $t && sudo docker run -it --ipc=host -p 8888:8888 $t
# Pull and Run with local volume mounted
# t=ultralytics/ultralytics:latest-jupyter && sudo docker pull $t && sudo docker run -it --ipc=host -p 8888:8888 -v "$(pwd)"/datasets:/data/datasets $t

View File

@ -0,0 +1,59 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLO11 deployments
# Use official Python base image for reproducibility (3.11.10 for export and 3.12.6 for inference)
FROM python:3.11.10-slim-bookworm
# Set environment variables
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_NO_CACHE_DIR=1 \
PIP_BREAK_SYSTEM_PACKAGES=1
# Downloads to user config dir
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install linux packages
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
RUN apt-get update && \
apt-get install -y --no-install-recommends \
python3-pip git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 \
&& rm -rf /var/lib/apt/lists/*
# Create working directory
WORKDIR /ultralytics
# Copy contents and configure git
COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Install pip packages
RUN pip install uv
RUN uv pip install --system -e ".[export]" --extra-index-url https://download.pytorch.org/whl/cpu --index-strategy unsafe-best-match
# Run exports to AutoInstall packages
RUN yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32
RUN yolo export model=tmp/yolo11n.pt format=ncnn imgsz=32
# Requires Python<=3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991
RUN uv pip install --system "paddlepaddle>=2.6.0" x2paddle
# Remove extra build files
RUN rm -rf tmp /root/.config/Ultralytics/persistent_cache.json
# Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push
# t=ultralytics/ultralytics:latest-python && sudo docker build -f docker/Dockerfile-python -t $t . && sudo docker push $t
# Run
# t=ultralytics/ultralytics:latest-python && sudo docker run -it --ipc=host $t
# Pull and Run
# t=ultralytics/ultralytics:latest-python && sudo docker pull $t && sudo docker run -it --ipc=host $t
# Pull and Run with local volume mounted
# t=ultralytics/ultralytics:latest-python && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/shared/datasets:/datasets $t

View File

@ -0,0 +1,44 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds GitHub actions CI runner image for deployment to DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image is CUDA-optimized for YOLO11 single/multi-GPU training and inference tests
# Start FROM Ultralytics GPU image
FROM ultralytics/ultralytics:latest
# Set environment variables
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_NO_CACHE_DIR=1 \
PIP_BREAK_SYSTEM_PACKAGES=1 \
RUNNER_ALLOW_RUNASROOT=1 \
DEBIAN_FRONTEND=noninteractive
# Set the working directory
WORKDIR /actions-runner
# Download and unpack the latest runner from https://github.com/actions/runner
RUN FILENAME=actions-runner-linux-x64-2.320.0.tar.gz && \
curl -o $FILENAME -L https://github.com/actions/runner/releases/download/v2.320.0/$FILENAME && \
tar xzf $FILENAME && \
rm $FILENAME
# Install runner dependencies
RUN uv pip install --system pytest-cov
RUN ./bin/installdependencies.sh && \
apt-get -y install libicu-dev
# Inline ENTRYPOINT command to configure and start runner with default TOKEN and NAME
ENTRYPOINT sh -c './config.sh --url https://github.com/ultralytics/ultralytics \
--token ${GITHUB_RUNNER_TOKEN:-TOKEN} \
--name ${GITHUB_RUNNER_NAME:-NAME} \
--labels gpu-latest \
--replace && \
./run.sh'
# Usage Examples -------------------------------------------------------------------------------------------------------
# Build and Push
# t=ultralytics/ultralytics:latest-runner && sudo docker build -f docker/Dockerfile-runner -t $t . && sudo docker push $t
# Pull and Run in detached mode with access to GPUs 0 and 1
# t=ultralytics/ultralytics:latest-runner && sudo docker run -d -e GITHUB_RUNNER_TOKEN=TOKEN -e GITHUB_RUNNER_NAME=NAME --ipc=host --gpus '"device=0,1"' $t

View File

@ -0,0 +1,146 @@
<br>
<a href="https://www.ultralytics.com/" target="_blank"><img src="https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg" width="320" alt="Ultralytics logo"></a>
# 📚 Ultralytics Docs
[Ultralytics](https://www.ultralytics.com/) Docs are the gateway to understanding and utilizing our cutting-edge machine learning tools. These documents are deployed to [https://docs.ultralytics.com](https://docs.ultralytics.com/) for your convenience.
[![pages-build-deployment](https://github.com/ultralytics/docs/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/pages/pages-build-deployment)
[![Check Broken links](https://github.com/ultralytics/docs/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/links.yml)
[![Check Domains](https://github.com/ultralytics/docs/actions/workflows/check_domains.yml/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/check_domains.yml)
[![Ultralytics Actions](https://github.com/ultralytics/docs/actions/workflows/format.yml/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/format.yml)
<a href="https://discord.com/invite/ultralytics"><img alt="Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a> <a href="https://community.ultralytics.com/"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a> <a href="https://reddit.com/r/ultralytics"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
## 🛠️ Installation
[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/)
[![Downloads](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics)
[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)
To install the Ultralytics package in developer mode, ensure you have Git and Python 3 installed on your system. Then, follow these steps:
1. Clone the ultralytics repository to your local machine using Git:
```bash
git clone https://github.com/ultralytics/ultralytics.git
```
2. Navigate to the cloned repository's root directory:
```bash
cd ultralytics
```
3. Install the package in developer mode using pip (or pip3 for Python 3):
```bash
pip install -e '.[dev]'
```
- This command installs the Ultralytics package along with all development dependencies, allowing you to modify the package code and have the changes immediately reflected in your Python environment.
## 🚀 Building and Serving Locally
The `mkdocs serve` command builds and serves a local version of your MkDocs documentation, ideal for development and testing:
```bash
mkdocs serve
```
- #### Command Breakdown:
- `mkdocs` is the main MkDocs command-line interface.
- `serve` is the subcommand to build and locally serve your documentation.
- 🧐 Note:
- Grasp changes to the docs in real-time as `mkdocs serve` supports live reloading.
- To stop the local server, press `CTRL+C`.
## 🌍 Building and Serving Multi-Language
Supporting multi-language documentation? Follow these steps:
1. Stage all new language \*.md files with Git:
```bash
git add docs/**/*.md -f
```
2. Build all languages to the `/site` folder, ensuring relevant root-level files are present:
```bash
# Clear existing /site directory
rm -rf site
# Loop through each language config file and build
mkdocs build -f docs/mkdocs.yml
for file in docs/mkdocs_*.yml; do
echo "Building MkDocs site with $file"
mkdocs build -f "$file"
done
```
3. To preview your site, initiate a simple HTTP server:
```bash
cd site
python -m http.server
# Open in your preferred browser
```
- 🖥️ Access the live site at `http://localhost:8000`.
## 📤 Deploying Your Documentation Site
Choose a hosting provider and deployment method for your MkDocs documentation:
- Configure `mkdocs.yml` with deployment settings.
- Use `mkdocs deploy` to build and deploy your site.
* ### GitHub Pages Deployment Example:
```bash
mkdocs gh-deploy
```
- Update the "Custom domain" in your repository's settings for a personalized URL.
![MkDocs deployment example](https://github.com/ultralytics/docs/releases/download/0/mkdocs-deployment-example.avif)
- For detailed deployment guidance, consult the [MkDocs documentation](https://www.mkdocs.org/user-guide/deploying-your-docs/).
## 💡 Contribute
We cherish the community's input as it drives Ultralytics open-source initiatives. Dive into the [Contributing Guide](https://docs.ultralytics.com/help/contributing/) and share your thoughts via our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey). A heartfelt thank you 🙏 to each contributor!
![Ultralytics open-source contributors](https://github.com/ultralytics/docs/releases/download/0/ultralytics-open-source-contributors.avif)
## 📜 License
Ultralytics Docs presents two licensing options:
- **AGPL-3.0 License**: Perfect for academia and open collaboration. Details are in the [LICENSE](https://github.com/ultralytics/docs/blob/main/LICENSE) file.
- **Enterprise License**: Tailored for commercial usage, offering a seamless blend of Ultralytics technology in your products. Learn more at [Ultralytics Licensing](https://www.ultralytics.com/license).
## ✉️ Contact
For Ultralytics bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues). Become a member of the Ultralytics [Discord](https://discord.com/invite/ultralytics), [Reddit](https://www.reddit.com/r/ultralytics/), or [Forums](https://community.ultralytics.com/) for asking questions, sharing projects, learning discussions, or for help with all things Ultralytics!
<br>
<div align="center">
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="Ultralytics GitHub"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="Ultralytics LinkedIn"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="Ultralytics Twitter"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://youtube.com/ultralytics?sub_confirmation=1"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="Ultralytics YouTube"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="Ultralytics TikTok"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="3%" alt="Ultralytics BiliBili"></a>
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
<a href="https://discord.com/invite/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
</div>

View File

@ -0,0 +1,358 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Automates building and post-processing of MkDocs documentation, especially for multilingual projects.
This script streamlines generating localized documentation and updating HTML links for correct formatting.
Key Features:
- Automated building of MkDocs documentation: Compiles main documentation and localized versions from separate
MkDocs configuration files.
- Post-processing of generated HTML files: Updates HTML files to remove '.md' from internal links, ensuring
correct navigation in web-based documentation.
Usage:
- Run from the root directory of your MkDocs project.
- Ensure MkDocs is installed and configuration files (main and localized) are present.
- The script builds documentation using MkDocs, then scans HTML files in 'site' to update links.
- Ideal for projects with Markdown documentation served as a static website.
Note:
- Requires Python and MkDocs to be installed and configured.
"""
import json
import os
import re
import shutil
import subprocess
from pathlib import Path
from bs4 import BeautifulSoup
from tqdm import tqdm
os.environ["JUPYTER_PLATFORM_DIRS"] = "1" # fix DeprecationWarning: Jupyter is migrating to use standard platformdirs
DOCS = Path(__file__).parent.resolve()
SITE = DOCS.parent / "site"
LINK_PATTERN = re.compile(r"(https?://[^\s()<>]*[^\s()<>.,:;!?\'\"])")
def create_vercel_config():
"""Create vercel.json in the site directory with customized configuration settings."""
config = {"trailingSlash": True}
with open(SITE / "vercel.json", "w") as f:
json.dump(config, f, indent=2)
def prepare_docs_markdown(clone_repos: bool = True):
"""Build docs using mkdocs."""
print("Removing existing build artifacts")
shutil.rmtree(SITE, ignore_errors=True)
shutil.rmtree(DOCS / "repos", ignore_errors=True)
if clone_repos:
# Get hub-sdk repo
repo = "https://github.com/ultralytics/hub-sdk"
local_dir = DOCS / "repos" / Path(repo).name
os.system(f"git clone {repo} {local_dir} --depth 1 --single-branch --branch main")
shutil.rmtree(DOCS / "en/hub/sdk", ignore_errors=True) # delete if exists
shutil.copytree(local_dir / "docs", DOCS / "en/hub/sdk") # for docs
shutil.rmtree(DOCS.parent / "hub_sdk", ignore_errors=True) # delete if exists
shutil.copytree(local_dir / "hub_sdk", DOCS.parent / "hub_sdk") # for mkdocstrings
print(f"Cloned/Updated {repo} in {local_dir}")
# Get docs repo
repo = "https://github.com/ultralytics/docs"
local_dir = DOCS / "repos" / Path(repo).name
os.system(f"git clone {repo} {local_dir} --depth 1 --single-branch --branch main")
shutil.rmtree(DOCS / "en/compare", ignore_errors=True) # delete if exists
shutil.copytree(local_dir / "docs/en/compare", DOCS / "en/compare") # for docs
print(f"Cloned/Updated {repo} in {local_dir}")
# Add frontmatter
for file in tqdm((DOCS / "en").rglob("*.md"), desc="Adding frontmatter"):
update_markdown_files(file)
def update_page_title(file_path: Path, new_title: str):
"""Update the title of an HTML file."""
with open(file_path, encoding="utf-8") as file:
content = file.read()
# Replace the existing title with the new title
updated_content = re.sub(r"<title>.*?</title>", f"<title>{new_title}</title>", content)
# Write the updated content back to the file
with open(file_path, "w", encoding="utf-8") as file:
file.write(updated_content)
def update_html_head(script: str = ""):
"""Update the HTML head section of each file."""
html_files = Path(SITE).rglob("*.html")
for html_file in tqdm(html_files, desc="Processing HTML files"):
with html_file.open("r", encoding="utf-8") as file:
html_content = file.read()
if script in html_content: # script already in HTML file
return
head_end_index = html_content.lower().rfind("</head>")
if head_end_index != -1:
# Add the specified JavaScript to the HTML file just before the end of the head tag.
new_html_content = html_content[:head_end_index] + script + html_content[head_end_index:]
with html_file.open("w", encoding="utf-8") as file:
file.write(new_html_content)
def update_subdir_edit_links(subdir: str = "", docs_url: str = ""):
"""Update the HTML head section of each file."""
if str(subdir[0]) == "/":
subdir = str(subdir[0])[1:]
html_files = (SITE / subdir).rglob("*.html")
for html_file in tqdm(html_files, desc="Processing subdir files", mininterval=1.0):
with html_file.open("r", encoding="utf-8") as file:
soup = BeautifulSoup(file, "html.parser")
# Find the anchor tag and update its href attribute
a_tag = soup.find("a", {"class": "md-content__button md-icon"})
if a_tag and a_tag["title"] == "Edit this page":
a_tag["href"] = f"{docs_url}{a_tag['href'].split(subdir)[-1]}"
# Write the updated HTML back to the file
with open(html_file, "w", encoding="utf-8") as file:
file.write(str(soup))
def update_markdown_files(md_filepath: Path):
"""Create or update a Markdown file, ensuring frontmatter is present."""
if md_filepath.exists():
content = md_filepath.read_text().strip()
# Replace apostrophes
content = content.replace("", "'").replace("", "'")
# Add frontmatter if missing
if not content.strip().startswith("---\n") and "macros" not in md_filepath.parts: # skip macros directory
header = "---\ncomments: true\ndescription: TODO ADD DESCRIPTION\nkeywords: TODO ADD KEYWORDS\n---\n\n"
content = header + content
# Ensure MkDocs admonitions "=== " lines are preceded and followed by empty newlines
lines = content.split("\n")
new_lines = []
for i, line in enumerate(lines):
stripped_line = line.strip()
if stripped_line.startswith("=== "):
if i > 0 and new_lines[-1] != "":
new_lines.append("")
new_lines.append(line)
if i < len(lines) - 1 and lines[i + 1].strip() != "":
new_lines.append("")
else:
new_lines.append(line)
content = "\n".join(new_lines)
# Add EOF newline if missing
if not content.endswith("\n"):
content += "\n"
# Save page
md_filepath.write_text(content)
return
def update_docs_html():
"""Update titles, edit links, head sections, and convert plaintext links in HTML documentation."""
# Update 404 titles
update_page_title(SITE / "404.html", new_title="Ultralytics Docs - Not Found")
# Update edit button links
for subdir, docs_url in (
("hub/sdk/", "https://github.com/ultralytics/hub-sdk/tree/main/docs/"), # do not use leading slash
("compare/", "https://github.com/ultralytics/docs/tree/main/docs/en/compare/"),
):
update_subdir_edit_links(subdir=subdir, docs_url=docs_url)
# Convert plaintext links to HTML hyperlinks
files_modified = 0
for html_file in tqdm(SITE.rglob("*.html"), desc="Converting plaintext links", mininterval=1.0):
with open(html_file, encoding="utf-8") as file:
content = file.read()
updated_content = convert_plaintext_links_to_html(content)
if updated_content != content:
with open(html_file, "w", encoding="utf-8") as file:
file.write(updated_content)
files_modified += 1
print(f"Modified plaintext links in {files_modified} files.")
# Update HTML file head section
script = ""
if any(script):
update_html_head(script)
# Delete the /macros directory from the built site
macros_dir = SITE / "macros"
if macros_dir.exists():
print(f"Removing /macros directory from site: {macros_dir}")
shutil.rmtree(macros_dir)
def convert_plaintext_links_to_html(content: str) -> str:
"""Convert plaintext links to HTML hyperlinks in the main content area only."""
soup = BeautifulSoup(content, "html.parser")
# Find the main content area (adjust this selector based on your HTML structure)
main_content = soup.find("main") or soup.find("div", class_="md-content")
if not main_content:
return content # Return original content if main content area not found
modified = False
for paragraph in main_content.find_all(["p", "li"]): # Focus on paragraphs and list items
for text_node in paragraph.find_all(string=True, recursive=False):
if text_node.parent.name not in {"a", "code"}: # Ignore links and code blocks
new_text = LINK_PATTERN.sub(r'<a href="\1">\1</a>', str(text_node))
if "<a href=" in new_text:
# Parse the new text with BeautifulSoup to handle HTML properly
new_soup = BeautifulSoup(new_text, "html.parser")
text_node.replace_with(new_soup)
modified = True
return str(soup) if modified else content
def remove_macros():
"""Remove the /macros directory and related entries in sitemap.xml from the built site."""
shutil.rmtree(SITE / "macros", ignore_errors=True)
(SITE / "sitemap.xml.gz").unlink(missing_ok=True)
# Process sitemap.xml
sitemap = SITE / "sitemap.xml"
lines = sitemap.read_text(encoding="utf-8").splitlines(keepends=True)
# Find indices of '/macros/' lines
macros_indices = [i for i, line in enumerate(lines) if "/macros/" in line]
# Create a set of indices to remove (including lines before and after)
indices_to_remove = set()
for i in macros_indices:
indices_to_remove.update(range(i - 1, i + 3)) # i-1, i, i+1, i+2, i+3
# Create new list of lines, excluding the ones to remove
new_lines = [line for i, line in enumerate(lines) if i not in indices_to_remove]
# Write the cleaned content back to the file
sitemap.write_text("".join(new_lines), encoding="utf-8")
print(f"Removed {len(macros_indices)} URLs containing '/macros/' from {sitemap}")
def remove_comments_and_empty_lines(content: str, file_type: str) -> str:
"""
Remove comments and empty lines from a string of code, preserving newlines and URLs.
Typical reductions for Ultralytics Docs are:
- Total HTML reduction: 2.83% (1301.56 KB saved)
- Total CSS reduction: 1.75% (2.61 KB saved)
- Total JS reduction: 13.72% (100.88 KB saved)
"""
if file_type == "html":
# Remove HTML comments
content = re.sub(r"<!--[\s\S]*?-->", "", content)
# Only remove empty lines for HTML, preserve indentation
content = re.sub(r"^\s*$\n", "", content, flags=re.MULTILINE)
elif file_type == "css":
# Remove CSS comments
content = re.sub(r"/\*[\s\S]*?\*/", "", content)
# Remove whitespace around specific characters
content = re.sub(r"\s*([{}:;,])\s*", r"\1", content)
# Remove empty lines
content = re.sub(r"^\s*\n", "", content, flags=re.MULTILINE)
# Collapse multiple spaces to single space
content = re.sub(r"\s{2,}", " ", content)
# Remove all newlines
content = re.sub(r"\n", "", content)
elif file_type == "js":
# Handle JS single-line comments (preserving http:// and https://)
lines = content.split("\n")
processed_lines = []
for line in lines:
# Only remove comments if they're not part of a URL
if "//" in line and "http://" not in line and "https://" not in line:
processed_lines.append(line.split("//")[0])
else:
processed_lines.append(line)
content = "\n".join(processed_lines)
# Remove JS multi-line comments
content = re.sub(r"/\*[\s\S]*?\*/", "", content)
# Remove empty lines
content = re.sub(r"^\s*\n", "", content, flags=re.MULTILINE)
# Collapse multiple spaces to single space
content = re.sub(r"\s{2,}", " ", content)
# Remove spaces around JS specific characters
content = re.sub(r"\s*([+\-*/=:;,(){}\[\]])\s*", r"\1", content)
return content
def minify_files(html: bool = True, css: bool = True, js: bool = True):
"""Minify HTML, CSS, and JS files and print total reduction stats."""
minify, compress, jsmin = None, None, None
try:
if html:
from minify_html import minify
if css:
from csscompressor import compress
if js:
import jsmin
except ImportError as e:
print(f"Missing required package: {str(e)}")
return
stats = {}
for ext, minifier in {
"html": (lambda x: minify(x, keep_closing_tags=True, minify_css=True, minify_js=True)) if html else None,
"css": compress if css else None,
"js": jsmin.jsmin if js else None,
}.items():
stats[ext] = {"original": 0, "minified": 0}
directory = "" # "stylesheets" if ext == css else "javascript" if ext == "js" else ""
for f in tqdm((SITE / directory).rglob(f"*.{ext}"), desc=f"Minifying {ext.upper()}", mininterval=1.0):
content = f.read_text(encoding="utf-8")
minified = minifier(content) if minifier else remove_comments_and_empty_lines(content, ext)
stats[ext]["original"] += len(content)
stats[ext]["minified"] += len(minified)
f.write_text(minified, encoding="utf-8")
for ext, data in stats.items():
if data["original"]:
r = data["original"] - data["minified"] # reduction
print(f"Total {ext.upper()} reduction: {(r / data['original']) * 100:.2f}% ({r / 1024:.2f} KB saved)")
def main():
"""Build docs, update titles and edit links, minify HTML, and print local server command."""
prepare_docs_markdown()
# Build the main documentation
print(f"Building docs from {DOCS}")
subprocess.run(f"mkdocs build -f {DOCS.parent}/mkdocs.yml --strict", check=True, shell=True)
remove_macros()
create_vercel_config()
print(f"Site built at {SITE}")
# Update docs HTML pages
update_docs_html()
# Minify files
minify_files(html=False, css=False, js=False)
# Cleanup
shutil.rmtree(DOCS.parent / "hub_sdk", ignore_errors=True)
shutil.rmtree(DOCS / "repos", ignore_errors=True)
# Show command to serve built website
print('Docs built correctly ✅\nServe site at http://localhost:8000 with "python -m http.server --directory site"')
if __name__ == "__main__":
main()

View File

@ -0,0 +1,149 @@
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
"""
Helper file to build Ultralytics Docs reference section.
This script recursively walks through the ultralytics directory and builds an MkDocs reference section of *.md files
composed of classes and functions, and also creates a navigation menu for use in mkdocs.yaml.
Note: Must be run from repository root directory. Do not run from docs directory.
"""
import re
import subprocess
from collections import defaultdict
from pathlib import Path
# Constants
hub_sdk = False
if hub_sdk:
PACKAGE_DIR = Path("/Users/glennjocher/PycharmProjects/hub-sdk/hub_sdk")
REFERENCE_DIR = PACKAGE_DIR.parent / "docs/reference"
GITHUB_REPO = "ultralytics/hub-sdk"
else:
FILE = Path(__file__).resolve()
PACKAGE_DIR = FILE.parents[1] / "ultralytics" # i.e. /Users/glennjocher/PycharmProjects/ultralytics/ultralytics
REFERENCE_DIR = PACKAGE_DIR.parent / "docs/en/reference"
GITHUB_REPO = "ultralytics/ultralytics"
def extract_classes_and_functions(filepath: Path) -> tuple:
"""Extracts class and function names from a given Python file."""
content = filepath.read_text()
class_pattern = r"(?:^|\n)class\s(\w+)(?:\(|:)"
func_pattern = r"(?:^|\n)def\s(\w+)\("
classes = re.findall(class_pattern, content)
functions = re.findall(func_pattern, content)
return classes, functions
def create_markdown(py_filepath: Path, module_path: str, classes: list, functions: list) -> Path:
"""Creates a Markdown file containing the API reference for the given Python module."""
md_filepath = py_filepath.with_suffix(".md")
exists = md_filepath.exists()
# Read existing content and retain header metadata if available
header_content = ""
if exists:
existing_content = md_filepath.read_text()
header_parts = existing_content.split("---")
for part in header_parts:
if "description:" in part or "comments:" in part:
header_content += f"---{part}---\n\n"
if not any(header_content):
header_content = "---\ndescription: TODO ADD DESCRIPTION\nkeywords: TODO ADD KEYWORDS\n---\n\n"
module_name = module_path.replace(".__init__", "")
module_path = module_path.replace(".", "/")
url = f"https://github.com/{GITHUB_REPO}/blob/main/{module_path}.py"
edit = f"https://github.com/{GITHUB_REPO}/edit/main/{module_path}.py"
pretty = url.replace("__init__.py", "\\_\\_init\\_\\_.py") # Properly display __init__.py filenames
title_content = (
f"# Reference for `{module_path}.py`\n\n"
f"!!! note\n\n"
f" This file is available at [{pretty}]({url}). If you spot a problem please help fix it by [contributing]"
f"(https://docs.ultralytics.com/help/contributing/) a [Pull Request]({edit}) 🛠️. Thank you 🙏!\n\n"
)
md_content = ["<br>\n"] + [f"## ::: {module_name}.{class_name}\n\n<br><br><hr><br>\n" for class_name in classes]
md_content.extend(f"## ::: {module_name}.{func_name}\n\n<br><br><hr><br>\n" for func_name in functions)
md_content[-1] = md_content[-1].replace("<hr><br>", "") # Remove last horizontal rule from final entry
md_content = header_content + title_content + "\n".join(md_content)
if not md_content.endswith("\n"):
md_content += "\n"
md_filepath.parent.mkdir(parents=True, exist_ok=True)
md_filepath.write_text(md_content)
if not exists:
# Add new Markdown file to the Git staging area
print(f"Created new file '{md_filepath}'")
subprocess.run(["git", "add", "-f", str(md_filepath)], check=True, cwd=PACKAGE_DIR)
return md_filepath.relative_to(PACKAGE_DIR.parent)
def nested_dict() -> defaultdict:
"""Creates and returns a nested defaultdict."""
return defaultdict(nested_dict)
def sort_nested_dict(d: dict) -> dict:
"""Sorts a nested dictionary recursively."""
return {key: sort_nested_dict(value) if isinstance(value, dict) else value for key, value in sorted(d.items())}
def create_nav_menu_yaml(nav_items: list, save: bool = False) -> None:
"""Creates a YAML file for the navigation menu based on the provided list of items."""
nav_tree = nested_dict()
for item_str in nav_items:
item = Path(item_str)
parts = item.parts
current_level = nav_tree["reference"]
for part in parts[2:-1]: # Skip the first two parts (docs and reference) and the filename
current_level = current_level[part]
md_file_name = parts[-1].replace(".md", "")
current_level[md_file_name] = item
nav_tree_sorted = sort_nested_dict(nav_tree)
def _dict_to_yaml(d, level=0):
"""Converts a nested dictionary to a YAML-formatted string with indentation."""
yaml_str = ""
indent = " " * level
for k, v in d.items():
if isinstance(v, dict):
yaml_str += f"{indent}- {k}:\n{_dict_to_yaml(v, level + 1)}"
else:
yaml_str += f"{indent}- {k}: {str(v).replace('docs/en/', '')}\n"
return yaml_str
# Print updated YAML reference section
print("Scan complete, new mkdocs.yaml reference section is:\n\n", _dict_to_yaml(nav_tree_sorted))
# Save new YAML reference section to file if 'save' is True
if save:
(PACKAGE_DIR.parent / "nav_menu_updated.yml").write_text(_dict_to_yaml(nav_tree_sorted))
def main():
"""Extract class and function names, create Markdown files, and generate a YAML navigation menu."""
nav_items = []
for py_filepath in PACKAGE_DIR.rglob("*.py"):
classes, functions = extract_classes_and_functions(py_filepath)
if classes or functions:
py_filepath_rel = py_filepath.relative_to(PACKAGE_DIR)
md_filepath = REFERENCE_DIR / py_filepath_rel
module_path = f"{PACKAGE_DIR.name}.{py_filepath_rel.with_suffix('').as_posix().replace('/', '.')}"
md_rel_filepath = create_markdown(md_filepath, module_path, classes, functions)
nav_items.append(str(md_rel_filepath))
create_nav_menu_yaml(nav_items)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,34 @@
---
description: Discover what's next for Ultralytics with our under-construction page, previewing new, groundbreaking AI and ML features coming soon.
keywords: Ultralytics, coming soon, under construction, new features, AI updates, ML advancements, YOLO, technology preview
---
# Under Construction 🏗️🌟
Welcome to the [Ultralytics](https://www.ultralytics.com/) "Under Construction" page! Here, we're hard at work developing the next generation of AI and ML innovations. This page serves as a teaser for the exciting updates and new features we're eager to share with you!
## Exciting New Features on the Way 🎉
- **Innovative Breakthroughs:** Get ready for advanced features and services that will transform your AI and ML experience.
- **New Horizons:** Anticipate novel products that redefine AI and ML capabilities.
- **Enhanced Services:** We're upgrading our services for greater efficiency and user-friendliness.
## Stay Updated 🚧
This placeholder page is your first stop for upcoming developments. Keep an eye out for:
- **Newsletter:** Subscribe [here](https://www.ultralytics.com/#newsletter) for the latest news.
- **Social Media:** Follow us [here](https://www.linkedin.com/company/ultralytics) for updates and teasers.
- **Blog:** Visit our [blog](https://www.ultralytics.com/blog) for detailed insights.
## We Value Your Input 🗣️
Your feedback shapes our future releases. Share your thoughts and suggestions [here](https://www.ultralytics.com/survey).
## Thank You, Community! 🌍
Your [contributions](https://docs.ultralytics.com/help/contributing/) inspire our continuous [innovation](https://github.com/ultralytics/ultralytics). Stay tuned for the big reveal of what's next in AI and ML at Ultralytics!
---
Excited for what's coming? Bookmark this page and get ready for a transformative AI and ML journey with Ultralytics! 🛠️🤖

View File

@ -0,0 +1 @@
docs.ultralytics.com

View File

@ -0,0 +1,152 @@
---
comments: true
description: Explore the widely-used Caltech-101 dataset with 9,000 images across 101 categories. Ideal for object recognition tasks in machine learning and computer vision.
keywords: Caltech-101, dataset, object recognition, machine learning, computer vision, YOLO, deep learning, research, AI
---
# Caltech-101 Dataset
The [Caltech-101](https://data.caltech.edu/records/mzrjq-6wc02) dataset is a widely used dataset for object recognition tasks, containing around 9,000 images from 101 object categories. The categories were chosen to reflect a variety of real-world objects, and the images themselves were carefully selected and annotated to provide a challenging benchmark for object recognition algorithms.
## Key Features
- The Caltech-101 dataset comprises around 9,000 color images divided into 101 categories.
- The categories encompass a wide variety of objects, including animals, vehicles, household items, and people.
- The number of images per category varies, with about 40 to 800 images in each category.
- Images are of variable sizes, with most images being medium resolution.
- Caltech-101 is widely used for training and testing in the field of machine learning, particularly for object recognition tasks.
## Dataset Structure
Unlike many other datasets, the Caltech-101 dataset is not formally split into training and testing sets. Users typically create their own splits based on their specific needs. However, a common practice is to use a random subset of images for training (e.g., 30 images per category) and the remaining images for testing.
## Applications
The Caltech-101 dataset is extensively used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in object recognition tasks, such as [Convolutional Neural Networks](https://www.ultralytics.com/glossary/convolutional-neural-network-cnn) (CNNs), [Support Vector Machines](https://www.ultralytics.com/glossary/support-vector-machine-svm) (SVMs), and various other machine learning algorithms. Its wide variety of categories and high-quality images make it an excellent dataset for research and development in the field of [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv).
## Usage
To train a YOLO model on the Caltech-101 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch), you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="caltech101", epochs=100, imgsz=416)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=caltech101 model=yolo11n-cls.pt epochs=100 imgsz=416
```
## Sample Images and Annotations
The Caltech-101 dataset contains high-quality color images of various objects, providing a well-structured dataset for [image classification](https://www.ultralytics.com/glossary/image-classification) tasks. Here are some examples of images from the dataset:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/caltech101-sample-image.avif)
The example showcases the variety and complexity of the objects in the Caltech-101 dataset, emphasizing the significance of a diverse dataset for training robust object recognition models.
## Citations and Acknowledgments
If you use the Caltech-101 dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@article{fei2007learning,
title={Learning generative visual models from few training examples: An incremental Bayesian approach tested on 101 object categories},
author={Fei-Fei, Li and Fergus, Rob and Perona, Pietro},
journal={Computer vision and Image understanding},
volume={106},
number={1},
pages={59--70},
year={2007},
publisher={Elsevier}
}
```
We would like to acknowledge Li Fei-Fei, Rob Fergus, and Pietro Perona for creating and maintaining the Caltech-101 dataset as a valuable resource for the machine learning and computer vision research community. For more information about the Caltech-101 dataset and its creators, visit the [Caltech-101 dataset website](https://data.caltech.edu/records/mzrjq-6wc02).
## FAQ
### What is the Caltech-101 dataset used for in machine learning?
The [Caltech-101](https://data.caltech.edu/records/mzrjq-6wc02) dataset is widely used in machine learning for object recognition tasks. It contains around 9,000 images across 101 categories, providing a challenging benchmark for evaluating object recognition algorithms. Researchers leverage it to train and test models, especially Convolutional [Neural Networks](https://www.ultralytics.com/glossary/neural-network-nn) (CNNs) and Support Vector Machines (SVMs), in computer vision.
### How can I train an Ultralytics YOLO model on the Caltech-101 dataset?
To train an Ultralytics YOLO model on the Caltech-101 dataset, you can use the provided code snippets. For example, to train for 100 epochs:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="caltech101", epochs=100, imgsz=416)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=caltech101 model=yolo11n-cls.pt epochs=100 imgsz=416
```
For more detailed arguments and options, refer to the model [Training](../../modes/train.md) page.
### What are the key features of the Caltech-101 dataset?
The Caltech-101 dataset includes:
- Around 9,000 color images across 101 categories.
- Categories covering a diverse range of objects, including animals, vehicles, and household items.
- Variable number of images per category, typically between 40 and 800.
- Variable image sizes, with most being medium resolution.
These features make it an excellent choice for training and evaluating object recognition models in machine learning and computer vision.
### Why should I cite the Caltech-101 dataset in my research?
Citing the Caltech-101 dataset in your research acknowledges the creators' contributions and provides a reference for others who might use the dataset. The recommended citation is:
!!! quote ""
=== "BibTeX"
```bibtex
@article{fei2007learning,
title={Learning generative visual models from few training examples: An incremental Bayesian approach tested on 101 object categories},
author={Fei-Fei, Li and Fergus, Rob and Perona, Pietro},
journal={Computer vision and Image understanding},
volume={106},
number={1},
pages={59--70},
year={2007},
publisher={Elsevier}
}
```
Citing helps in maintaining the integrity of academic work and assists peers in locating the original resource.
### Can I use Ultralytics HUB for training models on the Caltech-101 dataset?
Yes, you can use [Ultralytics HUB](https://www.ultralytics.com/hub) for training models on the Caltech-101 dataset. Ultralytics HUB provides an intuitive platform for managing datasets, training models, and deploying them without extensive coding. For a detailed guide, refer to the [how to train your custom models with Ultralytics HUB](https://www.ultralytics.com/blog/how-to-train-your-custom-models-with-ultralytics-hub) blog post.

View File

@ -0,0 +1,144 @@
---
comments: true
description: Explore the Caltech-256 dataset, featuring 30,000 images across 257 categories, ideal for training and testing object recognition algorithms.
keywords: Caltech-256 dataset, object classification, image dataset, machine learning, computer vision, deep learning, YOLO, training dataset
---
# Caltech-256 Dataset
The [Caltech-256](https://data.caltech.edu/records/nyy15-4j048) dataset is an extensive collection of images used for object classification tasks. It contains around 30,000 images divided into 257 categories (256 object categories and 1 background category). The images are carefully curated and annotated to provide a challenging and diverse benchmark for object recognition algorithms.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/isc06_9qnM0"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to Train <a href="https://www.ultralytics.com/glossary/image-classification">Image Classification</a> Model using Caltech-256 Dataset with Ultralytics HUB
</p>
## Key Features
- The Caltech-256 dataset comprises around 30,000 color images divided into 257 categories.
- Each category contains a minimum of 80 images.
- The categories encompass a wide variety of real-world objects, including animals, vehicles, household items, and people.
- Images are of variable sizes and resolutions.
- Caltech-256 is widely used for training and testing in the field of machine learning, particularly for object recognition tasks.
## Dataset Structure
Like [Caltech-101](../classify/caltech101.md), the Caltech-256 dataset does not have a formal split between training and testing sets. Users typically create their own splits according to their specific needs. A common practice is to use a random subset of images for training and the remaining images for testing.
## Applications
The Caltech-256 dataset is extensively used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in object recognition tasks, such as [Convolutional Neural Networks](https://www.ultralytics.com/glossary/convolutional-neural-network-cnn) (CNNs), [Support Vector Machines](https://www.ultralytics.com/glossary/support-vector-machine-svm) (SVMs), and various other machine learning algorithms. Its diverse set of categories and high-quality images make it an invaluable dataset for research and development in the field of machine learning and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv).
## Usage
To train a YOLO model on the Caltech-256 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch), you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="caltech256", epochs=100, imgsz=416)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=caltech256 model=yolo11n-cls.pt epochs=100 imgsz=416
```
## Sample Images and Annotations
The Caltech-256 dataset contains high-quality color images of various objects, providing a comprehensive dataset for object recognition tasks. Here are some examples of images from the dataset ([credit](https://ml4a.github.io/demos/tsne_viewer.html)):
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/caltech256-sample-image.avif)
The example showcases the diversity and complexity of the objects in the Caltech-256 dataset, emphasizing the importance of a varied dataset for training robust object recognition models.
## Citations and Acknowledgments
If you use the Caltech-256 dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@article{griffin2007caltech,
title={Caltech-256 object category dataset},
author={Griffin, Gregory and Holub, Alex and Perona, Pietro},
year={2007}
}
```
We would like to acknowledge Gregory Griffin, Alex Holub, and Pietro Perona for creating and maintaining the Caltech-256 dataset as a valuable resource for the [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and computer vision research community. For more information about the Caltech-256 dataset and its creators, visit the [Caltech-256 dataset website](https://data.caltech.edu/records/nyy15-4j048).
## FAQ
### What is the Caltech-256 dataset and why is it important for machine learning?
The [Caltech-256](https://data.caltech.edu/records/nyy15-4j048) dataset is a large image dataset used primarily for object classification tasks in machine learning and computer vision. It consists of around 30,000 color images divided into 257 categories, covering a wide range of real-world objects. The dataset's diverse and high-quality images make it an excellent benchmark for evaluating object recognition algorithms, which is crucial for developing robust machine learning models.
### How can I train a YOLO model on the Caltech-256 dataset using Python or CLI?
To train a YOLO model on the Caltech-256 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch), you can use the following code snippets. Refer to the model [Training](../../modes/train.md) page for additional options.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model
# Train the model
results = model.train(data="caltech256", epochs=100, imgsz=416)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=caltech256 model=yolo11n-cls.pt epochs=100 imgsz=416
```
### What are the most common use cases for the Caltech-256 dataset?
The Caltech-256 dataset is widely used for various object recognition tasks such as:
- Training Convolutional [Neural Networks](https://www.ultralytics.com/glossary/neural-network-nn) (CNNs)
- Evaluating the performance of Support Vector Machines (SVMs)
- Benchmarking new deep learning algorithms
- Developing [object detection](https://www.ultralytics.com/glossary/object-detection) models using frameworks like Ultralytics YOLO
Its diversity and comprehensive annotations make it ideal for research and development in machine learning and computer vision.
### How is the Caltech-256 dataset structured and split for training and testing?
The Caltech-256 dataset does not come with a predefined split for training and testing. Users typically create their own splits according to their specific needs. A common approach is to randomly select a subset of images for training and use the remaining images for testing. This flexibility allows users to tailor the dataset to their specific project requirements and experimental setups.
### Why should I use Ultralytics YOLO for training models on the Caltech-256 dataset?
Ultralytics YOLO models offer several advantages for training on the Caltech-256 dataset:
- **High Accuracy**: YOLO models are known for their state-of-the-art performance in object detection tasks.
- **Speed**: They provide real-time inference capabilities, making them suitable for applications requiring quick predictions.
- **Ease of Use**: With [Ultralytics HUB](https://www.ultralytics.com/hub), users can train, validate, and deploy models without extensive coding.
- **Pretrained Models**: Starting from pretrained models, like `yolo11n-cls.pt`, can significantly reduce training time and improve model [accuracy](https://www.ultralytics.com/glossary/accuracy).
For more details, explore our [comprehensive training guide](../../modes/train.md) and learn about [image classification](../../tasks/classify.md) with Ultralytics YOLO.

View File

@ -0,0 +1,173 @@
---
comments: true
description: Explore the CIFAR-10 dataset, featuring 60,000 color images in 10 classes. Learn about its structure, applications, and how to train models using YOLO.
keywords: CIFAR-10, dataset, machine learning, computer vision, image classification, YOLO, deep learning, neural networks
---
# CIFAR-10 Dataset
The [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) (Canadian Institute For Advanced Research) dataset is a collection of images used widely for [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and computer vision algorithms. It was developed by researchers at the CIFAR institute and consists of 60,000 32x32 color images in 10 different classes.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/fLBbyhPbWzY"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to Train an <a href="https://www.ultralytics.com/glossary/image-classification">Image Classification</a> Model with CIFAR-10 Dataset using Ultralytics YOLO11
</p>
## Key Features
- The CIFAR-10 dataset consists of 60,000 images, divided into 10 classes.
- Each class contains 6,000 images, split into 5,000 for training and 1,000 for testing.
- The images are colored and of size 32x32 pixels.
- The 10 different classes represent airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks.
- CIFAR-10 is commonly used for training and testing in the field of machine learning and computer vision.
## Dataset Structure
The CIFAR-10 dataset is split into two subsets:
1. **Training Set**: This subset contains 50,000 images used for training machine learning models.
2. **Testing Set**: This subset consists of 10,000 images used for testing and benchmarking the trained models.
## Applications
The CIFAR-10 dataset is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in image classification tasks, such as [Convolutional Neural Networks](https://www.ultralytics.com/glossary/convolutional-neural-network-cnn) (CNNs), [Support Vector Machines](https://www.ultralytics.com/glossary/support-vector-machine-svm) (SVMs), and various other machine learning algorithms. The diversity of the dataset in terms of classes and the presence of color images make it a well-rounded dataset for research and development in the field of machine learning and computer vision.
## Usage
To train a YOLO model on the CIFAR-10 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 32x32, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="cifar10", epochs=100, imgsz=32)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=cifar10 model=yolo11n-cls.pt epochs=100 imgsz=32
```
## Sample Images and Annotations
The CIFAR-10 dataset contains color images of various objects, providing a well-structured dataset for image classification tasks. Here are some examples of images from the dataset:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/cifar10-sample-image.avif)
The example showcases the variety and complexity of the objects in the CIFAR-10 dataset, highlighting the importance of a diverse dataset for training robust image classification models.
## Citations and Acknowledgments
If you use the CIFAR-10 dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@TECHREPORT{Krizhevsky09learningmultiple,
author={Alex Krizhevsky},
title={Learning multiple layers of features from tiny images},
institution={},
year={2009}
}
```
We would like to acknowledge Alex Krizhevsky for creating and maintaining the CIFAR-10 dataset as a valuable resource for the machine learning and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) research community. For more information about the CIFAR-10 dataset and its creator, visit the [CIFAR-10 dataset website](https://www.cs.toronto.edu/~kriz/cifar.html).
## FAQ
### How can I train a YOLO model on the CIFAR-10 dataset?
To train a YOLO model on the CIFAR-10 dataset using Ultralytics, you can follow the examples provided for both Python and CLI. Here is a basic example to train your model for 100 epochs with an image size of 32x32 pixels:
!!! example
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="cifar10", epochs=100, imgsz=32)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=cifar10 model=yolo11n-cls.pt epochs=100 imgsz=32
```
For more details, refer to the model [Training](../../modes/train.md) page.
### What are the key features of the CIFAR-10 dataset?
The CIFAR-10 dataset consists of 60,000 color images divided into 10 classes. Each class contains 6,000 images, with 5,000 for training and 1,000 for testing. The images are 32x32 pixels in size and vary across the following categories:
- Airplanes
- Cars
- Birds
- Cats
- Deer
- Dogs
- Frogs
- Horses
- Ships
- Trucks
This diverse dataset is essential for training image classification models in fields such as machine learning and computer vision. For more information, visit the CIFAR-10 sections on [dataset structure](#dataset-structure) and [applications](#applications).
### Why use the CIFAR-10 dataset for image classification tasks?
The CIFAR-10 dataset is an excellent benchmark for image classification due to its diversity and structure. It contains a balanced mix of 60,000 labeled images across 10 different categories, which helps in training robust and generalized models. It is widely used for evaluating deep learning models, including Convolutional [Neural Networks](https://www.ultralytics.com/glossary/neural-network-nn) (CNNs) and other machine learning algorithms. The dataset is relatively small, making it suitable for quick experimentation and algorithm development. Explore its numerous applications in the [applications](#applications) section.
### How is the CIFAR-10 dataset structured?
The CIFAR-10 dataset is structured into two main subsets:
1. **Training Set**: Contains 50,000 images used for training machine learning models.
2. **Testing Set**: Consists of 10,000 images for testing and benchmarking the trained models.
Each subset comprises images categorized into 10 classes, with their annotations readily available for model training and evaluation. For more detailed information, refer to the [dataset structure](#dataset-structure) section.
### How can I cite the CIFAR-10 dataset in my research?
If you use the CIFAR-10 dataset in your research or development projects, make sure to cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@TECHREPORT{Krizhevsky09learningmultiple,
author={Alex Krizhevsky},
title={Learning multiple layers of features from tiny images},
institution={},
year={2009}
}
```
Acknowledging the dataset's creators helps support continued research and development in the field. For more details, see the [citations and acknowledgments](#citations-and-acknowledgments) section.
### What are some practical examples of using the CIFAR-10 dataset?
The CIFAR-10 dataset is often used for training image classification models, such as Convolutional Neural Networks (CNNs) and Support Vector Machines (SVMs). These models can be employed in various computer vision tasks including [object detection](https://www.ultralytics.com/glossary/object-detection), [image recognition](https://www.ultralytics.com/glossary/image-recognition), and automated tagging. To see some practical examples, check the code snippets in the [usage](#usage) section.

View File

@ -0,0 +1,141 @@
---
comments: true
description: Explore the CIFAR-100 dataset, consisting of 60,000 32x32 color images across 100 classes. Ideal for machine learning and computer vision tasks.
keywords: CIFAR-100, dataset, machine learning, computer vision, image classification, deep learning, YOLO, training, testing, Alex Krizhevsky
---
# CIFAR-100 Dataset
The [CIFAR-100](https://www.cs.toronto.edu/~kriz/cifar.html) (Canadian Institute For Advanced Research) dataset is a significant extension of the CIFAR-10 dataset, composed of 60,000 32x32 color images in 100 different classes. It was developed by researchers at the CIFAR institute, offering a more challenging dataset for more complex machine learning and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) tasks.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/6bZeCs0xwO4"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to Train Ultralytics YOLO11 on CIFAR-100 | Step-by-Step Image Classification Tutorial 🚀
</p>
## Key Features
- The CIFAR-100 dataset consists of 60,000 images, divided into 100 classes.
- Each class contains 600 images, split into 500 for training and 100 for testing.
- The images are colored and of size 32x32 pixels.
- The 100 different classes are grouped into 20 coarse categories for higher level classification.
- CIFAR-100 is commonly used for training and testing in the field of machine learning and computer vision.
## Dataset Structure
The CIFAR-100 dataset is split into two subsets:
1. **Training Set**: This subset contains 50,000 images used for training machine learning models.
2. **Testing Set**: This subset consists of 10,000 images used for testing and benchmarking the trained models.
## Applications
The CIFAR-100 dataset is extensively used for training and evaluating deep learning models in [image classification](https://www.ultralytics.com/glossary/image-classification) tasks, such as [Convolutional Neural Networks](https://www.ultralytics.com/glossary/convolutional-neural-network-cnn) (CNNs), [Support Vector Machines](https://www.ultralytics.com/glossary/support-vector-machine-svm) (SVMs), and various other machine learning algorithms. The diversity of the dataset in terms of classes and the presence of color images make it a more challenging and comprehensive dataset for research and development in the field of [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and computer vision.
## Usage
To train a YOLO model on the CIFAR-100 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 32x32, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="cifar100", epochs=100, imgsz=32)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=cifar100 model=yolo11n-cls.pt epochs=100 imgsz=32
```
## Sample Images and Annotations
The CIFAR-100 dataset contains color images of various objects, providing a well-structured dataset for image classification tasks. Here are some examples of images from the dataset:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/cifar100-sample-image.avif)
The example showcases the variety and complexity of the objects in the CIFAR-100 dataset, highlighting the importance of a diverse dataset for training robust image classification models.
## Citations and Acknowledgments
If you use the CIFAR-100 dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@TECHREPORT{Krizhevsky09learningmultiple,
author={Alex Krizhevsky},
title={Learning multiple layers of features from tiny images},
institution={},
year={2009}
}
```
We would like to acknowledge Alex Krizhevsky for creating and maintaining the CIFAR-100 dataset as a valuable resource for the machine learning and computer vision research community. For more information about the CIFAR-100 dataset and its creator, visit the [CIFAR-100 dataset website](https://www.cs.toronto.edu/~kriz/cifar.html).
## FAQ
### What is the CIFAR-100 dataset and why is it significant?
The [CIFAR-100 dataset](https://www.cs.toronto.edu/~kriz/cifar.html) is a large collection of 60,000 32x32 color images classified into 100 classes. Developed by the Canadian Institute For Advanced Research (CIFAR), it provides a challenging dataset ideal for complex machine learning and computer vision tasks. Its significance lies in the diversity of classes and the small size of the images, making it a valuable resource for training and testing [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models, like Convolutional [Neural Networks](https://www.ultralytics.com/glossary/neural-network-nn) (CNNs), using frameworks such as [Ultralytics YOLO](https://docs.ultralytics.com/models/yolo11/).
### How do I train a YOLO model on the CIFAR-100 dataset?
You can train a YOLO model on the CIFAR-100 dataset using either Python or CLI commands. Here's how:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="cifar100", epochs=100, imgsz=32)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=cifar100 model=yolo11n-cls.pt epochs=100 imgsz=32
```
For a comprehensive list of available arguments, please refer to the model [Training](../../modes/train.md) page.
### What are the primary applications of the CIFAR-100 dataset?
The CIFAR-100 dataset is extensively used in training and evaluating deep learning models for image classification. Its diverse set of 100 classes, grouped into 20 coarse categories, provides a challenging environment for testing algorithms such as Convolutional Neural Networks (CNNs), Support Vector Machines (SVMs), and various other machine learning approaches. This dataset is a key resource in research and development within machine learning and computer vision fields, particularly for [object recognition](https://docs.ultralytics.com/tasks/classify/) and classification tasks.
### How is the CIFAR-100 dataset structured?
The CIFAR-100 dataset is split into two main subsets:
1. **Training Set**: Contains 50,000 images used for training machine learning models.
2. **Testing Set**: Consists of 10,000 images used for testing and benchmarking the trained models.
Each of the 100 classes contains 600 images, with 500 images for training and 100 for testing, making it uniquely suited for rigorous academic and industrial research.
### Where can I find sample images and annotations from the CIFAR-100 dataset?
The CIFAR-100 dataset includes a variety of color images of various objects, making it a structured dataset for image classification tasks. You can refer to the documentation page to see [sample images and annotations](#sample-images-and-annotations). These examples highlight the dataset's diversity and complexity, important for training robust image classification models. For more datasets suitable for classification tasks, check out [Ultralytics' classification datasets overview](https://docs.ultralytics.com/datasets/classify/).

View File

@ -0,0 +1,141 @@
---
comments: true
description: Explore the Fashion-MNIST dataset, a modern replacement for MNIST with 70,000 Zalando article images. Ideal for benchmarking machine learning models.
keywords: Fashion-MNIST, image classification, Zalando dataset, machine learning, deep learning, CNN, dataset overview
---
# Fashion-MNIST Dataset
The [Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset is a database of Zalando's article images—consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. Fashion-MNIST is intended to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) algorithms.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/eX5ad6udQ9Q"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to do <a href="https://www.ultralytics.com/glossary/image-classification">Image Classification</a> on Fashion MNIST Dataset using Ultralytics YOLO11
</p>
## Key Features
- Fashion-MNIST contains 60,000 training images and 10,000 testing images of Zalando's article images.
- The dataset comprises grayscale images of size 28x28 pixels.
- Each pixel has a single pixel-value associated with it, indicating the lightness or darkness of that pixel, with higher numbers meaning darker. This pixel-value is an integer between 0 and 255.
- Fashion-MNIST is widely used for training and testing in the field of machine learning, especially for image classification tasks.
## Dataset Structure
The Fashion-MNIST dataset is split into two subsets:
1. **Training Set**: This subset contains 60,000 images used for training machine learning models.
2. **Testing Set**: This subset consists of 10,000 images used for testing and benchmarking the trained models.
## Labels
Each training and test example is assigned to one of the following labels:
```
0. T-shirt/top
1. Trouser
2. Pullover
3. Dress
4. Coat
5. Sandal
6. Shirt
7. Sneaker
8. Bag
9. Ankle boot
```
## Applications
The Fashion-MNIST dataset is widely used for training and evaluating deep learning models in image classification tasks, such as [Convolutional Neural Networks](https://www.ultralytics.com/glossary/convolutional-neural-network-cnn) (CNNs), [Support Vector Machines](https://www.ultralytics.com/glossary/support-vector-machine-svm) (SVMs), and various other machine learning algorithms. The dataset's simple and well-structured format makes it an essential resource for researchers and practitioners in the field of machine learning and computer vision.
## Usage
To train a CNN model on the Fashion-MNIST dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 28x28, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="fashion-mnist", epochs=100, imgsz=28)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=fashion-mnist model=yolo11n-cls.pt epochs=100 imgsz=28
```
## Sample Images and Annotations
The Fashion-MNIST dataset contains grayscale images of Zalando's article images, providing a well-structured dataset for image classification tasks. Here are some examples of images from the dataset:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/fashion-mnist-sample.avif)
The example showcases the variety and complexity of the images in the Fashion-MNIST dataset, highlighting the importance of a diverse dataset for training robust image classification models.
## Acknowledgments
If you use the Fashion-MNIST dataset in your research or development work, please acknowledge the dataset by linking to the [GitHub repository](https://github.com/zalandoresearch/fashion-mnist). This dataset was made available by Zalando Research.
## FAQ
### What is the Fashion-MNIST dataset and how is it different from MNIST?
The [Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset is a collection of 70,000 grayscale images of Zalando's article images, intended as a modern replacement for the original MNIST dataset. It serves as a benchmark for machine learning models in the context of image classification tasks. Unlike MNIST, which contains handwritten digits, Fashion-MNIST consists of 28x28-pixel images categorized into 10 fashion-related classes, such as T-shirt/top, trouser, and ankle boot.
### How can I train a YOLO model on the Fashion-MNIST dataset?
To train an Ultralytics YOLO model on the Fashion-MNIST dataset, you can use both Python and CLI commands. Here's a quick example to get you started:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a pretrained model
model = YOLO("yolo11n-cls.pt")
# Train the model on Fashion-MNIST
results = model.train(data="fashion-mnist", epochs=100, imgsz=28)
```
=== "CLI"
```bash
yolo classify train data=fashion-mnist model=yolo11n-cls.pt epochs=100 imgsz=28
```
For more detailed training parameters, refer to the [Training page](../../modes/train.md).
### Why should I use the Fashion-MNIST dataset for benchmarking my machine learning models?
The [Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset is widely recognized in the [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) community as a robust alternative to MNIST. It offers a more complex and varied set of images, making it an excellent choice for benchmarking image classification models. The dataset's structure, comprising 60,000 training images and 10,000 testing images, each labeled with one of 10 classes, makes it ideal for evaluating the performance of different machine learning algorithms in a more challenging context.
### Can I use Ultralytics YOLO for image classification tasks like Fashion-MNIST?
Yes, Ultralytics YOLO models can be used for image classification tasks, including those involving the Fashion-MNIST dataset. YOLO11, for example, supports various vision tasks such as detection, segmentation, and classification. To get started with image classification tasks, refer to the [Classification page](https://docs.ultralytics.com/tasks/classify/).
### What are the key features and structure of the Fashion-MNIST dataset?
The Fashion-MNIST dataset is divided into two main subsets: 60,000 training images and 10,000 testing images. Each image is a 28x28-pixel grayscale picture representing one of 10 fashion-related classes. The simplicity and well-structured format make it ideal for training and evaluating models in machine learning and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) tasks. For more details on the dataset structure, see the [Dataset Structure section](#dataset-structure).
### How can I acknowledge the use of the Fashion-MNIST dataset in my research?
If you utilize the Fashion-MNIST dataset in your research or development projects, it's important to acknowledge it by linking to the [GitHub repository](https://github.com/zalandoresearch/fashion-mnist). This helps in attributing the data to Zalando Research, who made the dataset available for public use.

View File

@ -0,0 +1,132 @@
---
comments: true
description: Explore the extensive ImageNet dataset and discover its role in advancing deep learning in computer vision. Access pretrained models and training examples.
keywords: ImageNet, deep learning, visual recognition, computer vision, pretrained models, YOLO, dataset, object detection, image classification
---
# ImageNet Dataset
[ImageNet](https://www.image-net.org/) is a large-scale database of annotated images designed for use in visual object recognition research. It contains over 14 million images, with each image annotated using WordNet synsets, making it one of the most extensive resources available for training [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) tasks.
## ImageNet Pretrained Models
{% include "macros/yolo-cls-perf.md" %}
## Key Features
- ImageNet contains over 14 million high-resolution images spanning thousands of object categories.
- The dataset is organized according to the WordNet hierarchy, with each synset representing a category.
- ImageNet is widely used for training and benchmarking in the field of computer vision, particularly for [image classification](https://www.ultralytics.com/glossary/image-classification) and [object detection](https://www.ultralytics.com/glossary/object-detection) tasks.
- The annual ImageNet Large Scale Visual Recognition Challenge (ILSVRC) has been instrumental in advancing computer vision research.
## Dataset Structure
The ImageNet dataset is organized using the WordNet hierarchy. Each node in the hierarchy represents a category, and each category is described by a synset (a collection of synonymous terms). The images in ImageNet are annotated with one or more synsets, providing a rich resource for training models to recognize various objects and their relationships.
## ImageNet Large Scale Visual Recognition Challenge (ILSVRC)
The annual [ImageNet Large Scale Visual Recognition Challenge (ILSVRC)](https://image-net.org/challenges/LSVRC/) has been an important event in the field of computer vision. It has provided a platform for researchers and developers to evaluate their algorithms and models on a large-scale dataset with standardized evaluation metrics. The ILSVRC has led to significant advancements in the development of deep learning models for image classification, object detection, and other computer vision tasks.
## Applications
The ImageNet dataset is widely used for training and evaluating deep learning models in various computer vision tasks, such as image classification, object detection, and object localization. Some popular deep learning architectures, such as [AlexNet](https://en.wikipedia.org/wiki/AlexNet), [VGG](https://arxiv.org/abs/1409.1556), and [ResNet](https://arxiv.org/abs/1512.03385), were developed and benchmarked using the ImageNet dataset.
## Usage
To train a deep learning model on the ImageNet dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 224x224, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="imagenet", epochs=100, imgsz=224)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=imagenet model=yolo11n-cls.pt epochs=100 imgsz=224
```
## Sample Images and Annotations
The ImageNet dataset contains high-resolution images spanning thousands of object categories, providing a diverse and extensive dataset for training and evaluating computer vision models. Here are some examples of images from the dataset:
![Dataset sample images](https://github.com/ultralytics/docs/releases/download/0/imagenet-sample-images.avif)
The example showcases the variety and complexity of the images in the ImageNet dataset, highlighting the importance of a diverse dataset for training robust computer vision models.
## Citations and Acknowledgments
If you use the ImageNet dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@article{ILSVRC15,
author = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},
title={ImageNet Large Scale Visual Recognition Challenge},
year={2015},
journal={International Journal of Computer Vision (IJCV)},
volume={115},
number={3},
pages={211-252}
}
```
We would like to acknowledge the ImageNet team, led by Olga Russakovsky, Jia Deng, and Li Fei-Fei, for creating and maintaining the ImageNet dataset as a valuable resource for the [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and computer vision research community. For more information about the ImageNet dataset and its creators, visit the [ImageNet website](https://www.image-net.org/).
## FAQ
### What is the ImageNet dataset and how is it used in computer vision?
The [ImageNet dataset](https://www.image-net.org/) is a large-scale database consisting of over 14 million high-resolution images categorized using WordNet synsets. It is extensively used in visual object recognition research, including image classification and object detection. The dataset's annotations and sheer volume provide a rich resource for training deep learning models. Notably, models like AlexNet, VGG, and ResNet have been trained and benchmarked using ImageNet, showcasing its role in advancing computer vision.
### How can I use a pretrained YOLO model for image classification on the ImageNet dataset?
To use a pretrained Ultralytics YOLO model for image classification on the ImageNet dataset, follow these steps:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="imagenet", epochs=100, imgsz=224)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=imagenet model=yolo11n-cls.pt epochs=100 imgsz=224
```
For more in-depth training instruction, refer to our [Training page](../../modes/train.md).
### Why should I use the Ultralytics YOLO11 pretrained models for my ImageNet dataset projects?
Ultralytics YOLO11 pretrained models offer state-of-the-art performance in terms of speed and [accuracy](https://www.ultralytics.com/glossary/accuracy) for various computer vision tasks. For example, the YOLO11n-cls model, with a top-1 accuracy of 70.0% and a top-5 accuracy of 89.4%, is optimized for real-time applications. Pretrained models reduce the computational resources required for training from scratch and accelerate development cycles. Learn more about the performance metrics of YOLO11 models in the [ImageNet Pretrained Models section](#imagenet-pretrained-models).
### How is the ImageNet dataset structured, and why is it important?
The ImageNet dataset is organized using the WordNet hierarchy, where each node in the hierarchy represents a category described by a synset (a collection of synonymous terms). This structure allows for detailed annotations, making it ideal for training models to recognize a wide variety of objects. The diversity and annotation richness of ImageNet make it a valuable dataset for developing robust and generalizable deep learning models. More about this organization can be found in the [Dataset Structure](#dataset-structure) section.
### What role does the ImageNet Large Scale Visual Recognition Challenge (ILSVRC) play in computer vision?
The annual [ImageNet Large Scale Visual Recognition Challenge (ILSVRC)](https://image-net.org/challenges/LSVRC/) has been pivotal in driving advancements in computer vision by providing a competitive platform for evaluating algorithms on a large-scale, standardized dataset. It offers standardized evaluation metrics, fostering innovation and development in areas such as image classification, object detection, and [image segmentation](https://www.ultralytics.com/glossary/image-segmentation). The challenge has continuously pushed the boundaries of what is possible with deep learning and computer vision technologies.

View File

@ -0,0 +1,129 @@
---
comments: true
description: Discover ImageNet10 a compact version of ImageNet for rapid model testing and CI checks. Perfect for quick evaluations in computer vision tasks.
keywords: ImageNet10, ImageNet, Ultralytics, CI tests, sanity checks, training pipelines, computer vision, deep learning, dataset
---
# ImageNet10 Dataset
The [ImageNet10](https://github.com/ultralytics/assets/releases/download/v0.0.0/imagenet10.zip) dataset is a small-scale subset of the [ImageNet](https://www.image-net.org/) database, developed by [Ultralytics](https://www.ultralytics.com/) and designed for CI tests, sanity checks, and fast testing of training pipelines. This dataset is composed of the first image in the training set and the first image from the validation set of the first 10 classes in ImageNet. Although significantly smaller, it retains the structure and diversity of the original ImageNet dataset.
## Key Features
- ImageNet10 is a compact version of ImageNet, with 20 images representing the first 10 classes of the original dataset.
- The dataset is organized according to the WordNet hierarchy, mirroring the structure of the full ImageNet dataset.
- It is ideally suited for CI tests, sanity checks, and rapid testing of training pipelines in [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) tasks.
- Although not designed for model benchmarking, it can provide a quick indication of a model's basic functionality and correctness.
## Dataset Structure
The ImageNet10 dataset, like the original [ImageNet](../classify/imagenet.md), is organized using the WordNet hierarchy. Each of the 10 classes in ImageNet10 is described by a synset (a collection of synonymous terms). The images in ImageNet10 are annotated with one or more synsets, providing a compact resource for testing models to recognize various objects and their relationships.
## Applications
The ImageNet10 dataset is useful for quickly testing and debugging computer vision models and pipelines. Its small size allows for rapid iteration, making it ideal for [continuous integration](../../help/CI.md) tests and sanity checks. It can also be used for fast preliminary testing of new models or changes to existing models before moving on to full-scale testing with the complete [ImageNet dataset](../classify/imagenet.md).
## Usage
To test a deep learning model on the ImageNet10 dataset with an image size of 224x224, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Test Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="imagenet10", epochs=5, imgsz=224)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=imagenet10 model=yolo11n-cls.pt epochs=5 imgsz=224
```
## Sample Images and Annotations
The ImageNet10 dataset contains a subset of images from the original ImageNet dataset. These images are chosen to represent the first 10 classes in the dataset, providing a diverse yet compact dataset for quick testing and evaluation.
![Dataset sample images](https://github.com/ultralytics/docs/releases/download/0/imagenet10-sample-images.avif)
The example showcases the variety and complexity of the images in the ImageNet10 dataset, highlighting its usefulness for sanity checks and quick testing of computer vision models.
## Citations and Acknowledgments
If you use the ImageNet10 dataset in your research or development work, please cite the original ImageNet paper:
!!! quote ""
=== "BibTeX"
```bibtex
@article{ILSVRC15,
author = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},
title={ImageNet Large Scale Visual Recognition Challenge},
year={2015},
journal={International Journal of Computer Vision (IJCV)},
volume={115},
number={3},
pages={211-252}
}
```
We would like to acknowledge the ImageNet team, led by Olga Russakovsky, Jia Deng, and Li Fei-Fei, for creating and maintaining the ImageNet dataset. The ImageNet10 dataset, while a compact subset, is a valuable resource for quick testing and debugging in the [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and computer vision research community. For more information about the ImageNet dataset and its creators, visit the [ImageNet website](https://www.image-net.org/).
## FAQ
### What is the ImageNet10 dataset and how is it different from the full ImageNet dataset?
The [ImageNet10](https://github.com/ultralytics/assets/releases/download/v0.0.0/imagenet10.zip) dataset is a compact subset of the original [ImageNet](https://www.image-net.org/) database, created by Ultralytics for rapid CI tests, sanity checks, and training pipeline evaluations. ImageNet10 comprises only 20 images, representing the first image in the training and validation sets of the first 10 classes in ImageNet. Despite its small size, it maintains the structure and diversity of the full dataset, making it ideal for quick testing but not for benchmarking models.
### How can I use the ImageNet10 dataset to test my deep learning model?
To test your deep learning model on the ImageNet10 dataset with an image size of 224x224, use the following code snippets.
!!! example "Test Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="imagenet10", epochs=5, imgsz=224)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=imagenet10 model=yolo11n-cls.pt epochs=5 imgsz=224
```
Refer to the [Training](../../modes/train.md) page for a comprehensive list of available arguments.
### Why should I use the ImageNet10 dataset for CI tests and sanity checks?
The ImageNet10 dataset is designed specifically for CI tests, sanity checks, and quick evaluations in [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) pipelines. Its small size allows for rapid iteration and testing, making it perfect for continuous integration processes where speed is crucial. By maintaining the structural complexity and diversity of the original ImageNet dataset, ImageNet10 provides a reliable indication of a model's basic functionality and correctness without the overhead of processing a large dataset.
### What are the main features of the ImageNet10 dataset?
The ImageNet10 dataset has several key features:
- **Compact Size**: With only 20 images, it allows for rapid testing and debugging.
- **Structured Organization**: Follows the WordNet hierarchy, similar to the full ImageNet dataset.
- **CI and Sanity Checks**: Ideally suited for continuous integration tests and sanity checks.
- **Not for Benchmarking**: While useful for quick model evaluations, it is not designed for extensive benchmarking.
### How does ImageNet10 compare to other small datasets like ImageNette?
While both [ImageNet10](imagenet10.md) and [ImageNette](imagenette.md) are subsets of ImageNet, they serve different purposes. ImageNet10 contains just 20 images (2 per class) from the first 10 classes of ImageNet, making it extremely lightweight for CI testing and quick sanity checks. In contrast, ImageNette contains thousands of images across 10 easily distinguishable classes, making it more suitable for actual model training and development. ImageNet10 is designed for verification of pipeline functionality, while ImageNette is better for meaningful but faster-than-full-ImageNet training experiments.

View File

@ -0,0 +1,193 @@
---
comments: true
description: Explore the ImageNette dataset, a subset of ImageNet with 10 classes for efficient training and evaluation of image classification models. Ideal for ML and CV projects.
keywords: ImageNette dataset, ImageNet subset, image classification, machine learning, deep learning, YOLO, Convolutional Neural Networks, ML dataset, education, training
---
# ImageNette Dataset
The [ImageNette](https://github.com/fastai/imagenette) dataset is a subset of the larger [ImageNet](https://www.image-net.org/) dataset, but it only includes 10 easily distinguishable classes. It was created to provide a quicker, easier-to-use version of ImageNet for software development and education.
## Key Features
- ImageNette contains images from 10 different classes such as tench, English springer, cassette player, chain saw, church, French horn, garbage truck, gas pump, golf ball, parachute.
- The dataset comprises colored images of varying dimensions.
- ImageNette is widely used for training and testing in the field of machine learning, especially for image classification tasks.
## Dataset Structure
The ImageNette dataset is split into two subsets:
1. **Training Set**: This subset contains several thousands of images used for training machine learning models. The exact number varies per class.
2. **Validation Set**: This subset consists of several hundreds of images used for validating and benchmarking the trained models. Again, the exact number varies per class.
## Applications
The ImageNette dataset is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in image classification tasks, such as [Convolutional Neural Networks](https://www.ultralytics.com/glossary/convolutional-neural-network-cnn) (CNNs), and various other machine learning algorithms. The dataset's straightforward format and well-chosen classes make it a handy resource for both beginner and experienced practitioners in the field of [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv).
## Usage
To train a model on the ImageNette dataset for 100 epochs with a standard image size of 224x224, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="imagenette", epochs=100, imgsz=224)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=imagenette model=yolo11n-cls.pt epochs=100 imgsz=224
```
## Sample Images and Annotations
The ImageNette dataset contains colored images of various objects and scenes, providing a diverse dataset for [image classification](https://www.ultralytics.com/glossary/image-classification) tasks. Here are some examples of images from the dataset:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/imagenette-sample-image.avif)
The example showcases the variety and complexity of the images in the ImageNette dataset, highlighting the importance of a diverse dataset for training robust image classification models.
## ImageNette160 and ImageNette320
For faster prototyping and training, the ImageNette dataset is also available in two reduced sizes: [ImageNette160](https://github.com/fastai/imagenette) and [ImageNette320](https://github.com/fastai/imagenette). These datasets maintain the same classes and structure as the full ImageNette dataset, but the images are resized to a smaller dimension. As such, these versions of the dataset are particularly useful for preliminary model testing, or when computational resources are limited.
To use these datasets, simply replace 'imagenette' with 'imagenette160' or 'imagenette320' in the training command. The following code snippets illustrate this:
!!! example "Train Example with ImageNette160"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model with ImageNette160
results = model.train(data="imagenette160", epochs=100, imgsz=160)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model with ImageNette160
yolo classify train data=imagenette160 model=yolo11n-cls.pt epochs=100 imgsz=160
```
!!! example "Train Example with ImageNette320"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model with ImageNette320
results = model.train(data="imagenette320", epochs=100, imgsz=320)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model with ImageNette320
yolo classify train data=imagenette320 model=yolo11n-cls.pt epochs=100 imgsz=320
```
These smaller versions of the dataset allow for rapid iterations during the development process while still providing valuable and realistic image classification tasks.
## Citations and Acknowledgments
If you use the ImageNette dataset in your research or development work, please acknowledge it appropriately. For more information about the ImageNette dataset, visit the [ImageNette dataset GitHub page](https://github.com/fastai/imagenette).
## FAQ
### What is the ImageNette dataset?
The [ImageNette dataset](https://github.com/fastai/imagenette) is a simplified subset of the larger [ImageNet dataset](https://www.image-net.org/), featuring only 10 easily distinguishable classes such as tench, English springer, and French horn. It was created to offer a more manageable dataset for efficient training and evaluation of image classification models. This dataset is particularly useful for quick software development and educational purposes in [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and computer vision.
### How can I use the ImageNette dataset for training a YOLO model?
To train a YOLO model on the ImageNette dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch), you can use the following commands. Make sure to have the Ultralytics YOLO environment set up.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="imagenette", epochs=100, imgsz=224)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=imagenette model=yolo11n-cls.pt epochs=100 imgsz=224
```
For more details, see the [Training](../../modes/train.md) documentation page.
### Why should I use ImageNette for image classification tasks?
The ImageNette dataset is advantageous for several reasons:
- **Quick and Simple**: It contains only 10 classes, making it less complex and time-consuming compared to larger datasets.
- **Educational Use**: Ideal for learning and teaching the basics of image classification since it requires less computational power and time.
- **Versatility**: Widely used to train and benchmark various machine learning models, especially in image classification.
For more details on model training and dataset management, explore the [Dataset Structure](#dataset-structure) section.
### Can the ImageNette dataset be used with different image sizes?
Yes, the ImageNette dataset is also available in two resized versions: ImageNette160 and ImageNette320. These versions help in faster prototyping and are especially useful when computational resources are limited.
!!! example "Train Example with ImageNette160"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt")
# Train the model with ImageNette160
results = model.train(data="imagenette160", epochs=100, imgsz=160)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model with ImageNette160
yolo classify train data=imagenette160 model=yolo11n-cls.pt epochs=100 imgsz=160
```
For more information, refer to [Training with ImageNette160 and ImageNette320](#imagenette160-and-imagenette320).
### What are some practical applications of the ImageNette dataset?
The ImageNette dataset is extensively used in:
- **Educational Settings**: To educate beginners in machine learning and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv).
- **Software Development**: For rapid prototyping and development of image classification models.
- **Deep Learning Research**: To evaluate and benchmark the performance of various deep learning models, especially Convolutional [Neural Networks](https://www.ultralytics.com/glossary/neural-network-nn) (CNNs).
Explore the [Applications](#applications) section for detailed use cases.

View File

@ -0,0 +1,153 @@
---
comments: true
description: Explore the ImageWoof dataset, a challenging subset of ImageNet focusing on 10 dog breeds, designed to enhance image classification models. Learn more on Ultralytics Docs.
keywords: ImageWoof dataset, ImageNet subset, dog breeds, image classification, deep learning, machine learning, Ultralytics, training dataset, noisy labels
---
# ImageWoof Dataset
The [ImageWoof](https://github.com/fastai/imagenette) dataset is a subset of the [ImageNet](imagenet.md) consisting of 10 classes that are challenging to classify, since they're all dog breeds. It was created as a more difficult task for [image classification](https://www.ultralytics.com/glossary/image-classification) algorithms to solve, aiming at encouraging development of more advanced models.
## Key Features
- ImageWoof contains images of 10 different dog breeds: Australian terrier, Border terrier, Samoyed, Beagle, Shih-Tzu, English foxhound, Rhodesian ridgeback, Dingo, Golden retriever, and Old English sheepdog.
- The dataset provides images at various resolutions (full size, 320px, 160px), accommodating for different computational capabilities and research needs.
- It also includes a version with noisy labels, providing a more realistic scenario where labels might not always be reliable.
## Dataset Structure
The ImageWoof dataset structure is based on the dog breed classes, with each breed having its own directory of images. Similar to other classification datasets, it follows a split-directory format with separate folders for training and validation sets.
## Applications
The ImageWoof dataset is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in image classification tasks, especially when it comes to more complex and similar classes. The dataset's challenge lies in the subtle differences between the dog breeds, pushing the limits of model's performance and generalization. It's particularly valuable for:
- Benchmarking classification model performance on fine-grained categories
- Testing model robustness against similar-looking classes
- Developing algorithms that can distinguish subtle visual differences
- Evaluating transfer learning capabilities from general to specific domains
## Usage
To train a CNN model on the ImageWoof dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 224x224, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="imagewoof", epochs=100, imgsz=224)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=imagewoof model=yolo11n-cls.pt epochs=100 imgsz=224
```
## Dataset Variants
ImageWoof dataset comes in three different sizes to accommodate various research needs and computational capabilities:
1. **Full Size (imagewoof)**: This is the original version of the ImageWoof dataset. It contains full-sized images and is ideal for final training and performance benchmarking.
2. **Medium Size (imagewoof320)**: This version contains images resized to have a maximum edge length of 320 pixels. It's suitable for faster training without significantly sacrificing model performance.
3. **Small Size (imagewoof160)**: This version contains images resized to have a maximum edge length of 160 pixels. It's designed for rapid prototyping and experimentation where training speed is a priority.
To use these variants in your training, simply replace 'imagewoof' in the dataset argument with 'imagewoof320' or 'imagewoof160'. For example:
!!! example
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# For medium-sized dataset
model.train(data="imagewoof320", epochs=100, imgsz=224)
# For small-sized dataset
model.train(data="imagewoof160", epochs=100, imgsz=224)
```
=== "CLI"
```bash
# Load a pretrained model and train on the medium-sized dataset
yolo classify train model=yolo11n-cls.pt data=imagewoof320 epochs=100 imgsz=224
```
It's important to note that using smaller images will likely yield lower performance in terms of classification accuracy. However, it's an excellent way to iterate quickly in the early stages of model development and prototyping.
## Sample Images and Annotations
The ImageWoof dataset contains colorful images of various dog breeds, providing a challenging dataset for image classification tasks. Here are some examples of images from the dataset:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/imagewoof-dataset-sample.avif)
The example showcases the subtle differences and similarities among the different dog breeds in the ImageWoof dataset, highlighting the complexity and difficulty of the classification task.
## Citations and Acknowledgments
If you use the ImageWoof dataset in your research or development work, please make sure to acknowledge the creators of the dataset by linking to the [official dataset repository](https://github.com/fastai/imagenette).
We would like to acknowledge the [FastAI](https://www.fast.ai/) team for creating and maintaining the ImageWoof dataset as a valuable resource for the [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) research community. For more information about the ImageWoof dataset, visit the [ImageWoof dataset repository](https://github.com/fastai/imagenette).
## FAQ
### What is the ImageWoof dataset in Ultralytics?
The [ImageWoof](https://github.com/fastai/imagenette) dataset is a challenging subset of ImageNet focusing on 10 specific dog breeds. Created to push the limits of image classification models, it features breeds like Beagle, Shih-Tzu, and Golden Retriever. The dataset includes images at various resolutions (full size, 320px, 160px) and even noisy labels for more realistic training scenarios. This complexity makes ImageWoof ideal for developing more advanced deep learning models.
### How can I train a model using the ImageWoof dataset with Ultralytics YOLO?
To train a [Convolutional Neural Network](https://www.ultralytics.com/glossary/convolutional-neural-network-cnn) (CNN) model on the ImageWoof dataset using Ultralytics YOLO for 100 epochs at an image size of 224x224, you can use the following code:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
model = YOLO("yolo11n-cls.pt") # Load a pretrained model
results = model.train(data="imagewoof", epochs=100, imgsz=224)
```
=== "CLI"
```bash
yolo classify train data=imagewoof model=yolo11n-cls.pt epochs=100 imgsz=224
```
For more details on available training arguments, refer to the [Training](../../modes/train.md) page.
### What versions of the ImageWoof dataset are available?
The ImageWoof dataset comes in three sizes:
1. **Full Size (imagewoof)**: Ideal for final training and benchmarking, containing full-sized images.
2. **Medium Size (imagewoof320)**: Resized images with a maximum edge length of 320 pixels, suited for faster training.
3. **Small Size (imagewoof160)**: Resized images with a maximum edge length of 160 pixels, perfect for rapid prototyping.
Use these versions by replacing 'imagewoof' in the dataset argument accordingly. Note, however, that smaller images may yield lower classification [accuracy](https://www.ultralytics.com/glossary/accuracy) but can be useful for quicker iterations.
### How do noisy labels in the ImageWoof dataset benefit training?
Noisy labels in the ImageWoof dataset simulate real-world conditions where labels might not always be accurate. Training models with this data helps develop robustness and generalization in image classification tasks. This prepares the models to handle ambiguous or mislabeled data effectively, which is often encountered in practical applications.
### What are the key challenges of using the ImageWoof dataset?
The primary challenge of the ImageWoof dataset lies in the subtle differences among the dog breeds it includes. Since it focuses on 10 closely related breeds, distinguishing between them requires more advanced and fine-tuned image classification models. This makes ImageWoof an excellent benchmark to test the capabilities and improvements of [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models.

View File

@ -0,0 +1,206 @@
---
comments: true
description: Learn how to structure datasets for YOLO classification tasks. Detailed folder structure and usage examples for effective training.
keywords: YOLO, image classification, dataset structure, CIFAR-10, Ultralytics, machine learning, training data, model evaluation
---
# Image Classification Datasets Overview
## Dataset Structure for YOLO Classification Tasks
For [Ultralytics](https://www.ultralytics.com/) YOLO classification tasks, the dataset must be organized in a specific split-directory structure under the `root` directory to facilitate proper training, testing, and optional validation processes. This structure includes separate directories for training (`train`) and testing (`test`) phases, with an optional directory for validation (`val`).
Each of these directories should contain one subdirectory for each class in the dataset. The subdirectories are named after the corresponding class and contain all the images for that class. Ensure that each image file is named uniquely and stored in a common format such as JPEG or PNG.
### Folder Structure Example
Consider the [CIFAR-10](cifar10.md) dataset as an example. The folder structure should look like this:
```
cifar-10-/
|
|-- train/
| |-- airplane/
| | |-- 10008_airplane.png
| | |-- 10009_airplane.png
| | |-- ...
| |
| |-- automobile/
| | |-- 1000_automobile.png
| | |-- 1001_automobile.png
| | |-- ...
| |
| |-- bird/
| | |-- 10014_bird.png
| | |-- 10015_bird.png
| | |-- ...
| |
| |-- ...
|
|-- test/
| |-- airplane/
| | |-- 10_airplane.png
| | |-- 11_airplane.png
| | |-- ...
| |
| |-- automobile/
| | |-- 100_automobile.png
| | |-- 101_automobile.png
| | |-- ...
| |
| |-- bird/
| | |-- 1000_bird.png
| | |-- 1001_bird.png
| | |-- ...
| |
| |-- ...
|
|-- val/ (optional)
| |-- airplane/
| | |-- 105_airplane.png
| | |-- 106_airplane.png
| | |-- ...
| |
| |-- automobile/
| | |-- 102_automobile.png
| | |-- 103_automobile.png
| | |-- ...
| |
| |-- bird/
| | |-- 1045_bird.png
| | |-- 1046_bird.png
| | |-- ...
| |
| |-- ...
```
This structured approach ensures that the model can effectively learn from well-organized classes during the training phase and accurately evaluate performance during testing and validation phases.
## Usage
!!! example
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="path/to/dataset", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=path/to/data model=yolo11n-cls.pt epochs=100 imgsz=640
```
## Supported Datasets
Ultralytics supports the following datasets with automatic download:
- [Caltech 101](caltech101.md): A dataset containing images of 101 object categories for [image classification](https://www.ultralytics.com/glossary/image-classification) tasks.
- [Caltech 256](caltech256.md): An extended version of Caltech 101 with 256 object categories and more challenging images.
- [CIFAR-10](cifar10.md): A dataset of 60K 32x32 color images in 10 classes, with 6K images per class.
- [CIFAR-100](cifar100.md): An extended version of CIFAR-10 with 100 object categories and 600 images per class.
- [Fashion-MNIST](fashion-mnist.md): A dataset consisting of 70,000 grayscale images of 10 fashion categories for image classification tasks.
- [ImageNet](imagenet.md): A large-scale dataset for [object detection](https://www.ultralytics.com/glossary/object-detection) and image classification with over 14 million images and 20,000 categories.
- [ImageNet-10](imagenet10.md): A smaller subset of ImageNet with 10 categories for faster experimentation and testing.
- [Imagenette](imagenette.md): A smaller subset of ImageNet that contains 10 easily distinguishable classes for quicker training and testing.
- [Imagewoof](imagewoof.md): A more challenging subset of ImageNet containing 10 dog breed categories for image classification tasks.
- [MNIST](mnist.md): A dataset of 70,000 grayscale images of handwritten digits for image classification tasks.
- [MNIST160](mnist.md): First 8 images of each MNIST category from the MNIST dataset. Dataset contains 160 images total.
### Adding your own dataset
If you have your own dataset and would like to use it for training classification models with Ultralytics YOLO, ensure that it follows the format specified above under "Dataset Structure" and then point your `data` argument to the dataset directory when initializing your training script.
## FAQ
### How do I structure my dataset for YOLO classification tasks?
To structure your dataset for Ultralytics YOLO classification tasks, you should follow a specific split-directory format. Organize your dataset into separate directories for `train`, `test`, and optionally `val`. Each of these directories should contain subdirectories named after each class, with the corresponding images inside. This facilitates smooth training and evaluation processes. For an example, consider the [CIFAR-10](cifar10.md) dataset format:
```
cifar-10-/
|-- train/
| |-- airplane/
| |-- automobile/
| |-- bird/
| ...
|-- test/
| |-- airplane/
| |-- automobile/
| |-- bird/
| ...
|-- val/ (optional)
| |-- airplane/
| |-- automobile/
| |-- bird/
| ...
```
For more details, visit the [Dataset Structure for YOLO Classification Tasks](#dataset-structure-for-yolo-classification-tasks) section.
### What datasets are supported by Ultralytics YOLO for image classification?
Ultralytics YOLO supports automatic downloading of several datasets for image classification, including [Caltech 101](caltech101.md), [Caltech 256](caltech256.md), [CIFAR-10](cifar10.md), [CIFAR-100](cifar100.md), [Fashion-MNIST](fashion-mnist.md), [ImageNet](imagenet.md), [ImageNet-10](imagenet10.md), [Imagenette](imagenette.md), [Imagewoof](imagewoof.md), and [MNIST](mnist.md). These datasets are structured in a way that makes them easy to use with YOLO. Each dataset's page provides further details about its structure and applications.
### How do I add my own dataset for YOLO image classification?
To use your own dataset with Ultralytics YOLO, ensure it follows the specified directory format required for the classification task, with separate `train`, `test`, and optionally `val` directories, and subdirectories for each class containing the respective images. Once your dataset is structured correctly, point the `data` argument to your dataset's root directory when initializing the training script. Here's an example in Python:
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="path/to/your/dataset", epochs=100, imgsz=640)
```
More details can be found in the [Adding your own dataset](#adding-your-own-dataset) section.
### Why should I use Ultralytics YOLO for image classification?
Ultralytics YOLO offers several benefits for image classification, including:
- **Pretrained Models**: Load pretrained models like `yolo11n-cls.pt` to jump-start your training process.
- **Ease of Use**: Simple API and CLI commands for training and evaluation.
- **High Performance**: State-of-the-art [accuracy](https://www.ultralytics.com/glossary/accuracy) and speed, ideal for real-time applications.
- **Support for Multiple Datasets**: Seamless integration with various popular datasets like [CIFAR-10](cifar10.md), [ImageNet](imagenet.md), and more.
- **Community and Support**: Access to extensive documentation and an active community for troubleshooting and improvements.
For additional insights and real-world applications, you can explore [Ultralytics YOLO](https://www.ultralytics.com/yolo).
### How can I train a model using Ultralytics YOLO?
Training a model using Ultralytics YOLO can be done easily in both Python and CLI. Here's an example:
!!! example
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model
# Train the model
results = model.train(data="path/to/dataset", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=path/to/data model=yolo11n-cls.pt epochs=100 imgsz=640
```
These examples demonstrate the straightforward process of training a YOLO model using either approach. For more information, visit the [Usage](#usage) section and the [Train](https://docs.ultralytics.com/tasks/classify/#train) page for classification tasks.

View File

@ -0,0 +1,138 @@
---
comments: true
description: Explore the MNIST dataset, a cornerstone in machine learning for handwritten digit recognition. Learn about its structure, features, and applications.
keywords: MNIST, dataset, handwritten digits, image classification, deep learning, machine learning, training set, testing set, NIST
---
# MNIST Dataset
The [MNIST](https://en.wikipedia.org/wiki/MNIST_database) (Modified National Institute of Standards and Technology) dataset is a large database of handwritten digits that is commonly used for training various image processing systems and machine learning models. It was created by "re-mixing" the samples from NIST's original datasets and has become a benchmark for evaluating the performance of [image classification](https://www.ultralytics.com/glossary/image-classification) algorithms.
## Key Features
- MNIST contains 60,000 training images and 10,000 testing images of handwritten digits.
- The dataset comprises grayscale images of size 28×28 pixels.
- The images are normalized to fit into a 28×28 pixel [bounding box](https://www.ultralytics.com/glossary/bounding-box) and anti-aliased, introducing grayscale levels.
- MNIST is widely used for training and testing in the field of machine learning, especially for image classification tasks.
## Dataset Structure
The MNIST dataset is split into two subsets:
1. **Training Set**: This subset contains 60,000 images of handwritten digits used for training machine learning models.
2. **Testing Set**: This subset consists of 10,000 images used for testing and benchmarking the trained models.
Each image in the dataset is labeled with the corresponding digit (0-9), making it a supervised learning dataset ideal for classification tasks.
## Extended MNIST (EMNIST)
Extended MNIST (EMNIST) is a newer dataset developed and released by NIST to be the successor to MNIST. While MNIST included images only of handwritten digits, EMNIST includes all the images from NIST Special Database 19, which is a large database of handwritten uppercase and lowercase letters as well as digits. The images in EMNIST were converted into the same 28×28 pixel format, by the same process, as were the MNIST images. Accordingly, tools that work with the older, smaller MNIST dataset will likely work unmodified with EMNIST.
## Applications
The MNIST dataset is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in image classification tasks, such as [Convolutional Neural Networks](https://www.ultralytics.com/glossary/convolutional-neural-network-cnn) (CNNs), [Support Vector Machines](https://www.ultralytics.com/glossary/support-vector-machine-svm) (SVMs), and various other machine learning algorithms. The dataset's simple and well-structured format makes it an essential resource for researchers and practitioners in the field of [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv).
Some common applications include:
- Benchmarking new classification algorithms
- Educational purposes for teaching machine learning concepts
- Prototyping image recognition systems
- Testing model optimization techniques
## Usage
To train a CNN model on the MNIST dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 32×32, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="mnist", epochs=100, imgsz=32)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=mnist model=yolo11n-cls.pt epochs=100 imgsz=28
```
## Sample Images and Annotations
The MNIST dataset contains grayscale images of handwritten digits, providing a well-structured dataset for image classification tasks. Here are some examples of images from the dataset:
![Dataset sample image](https://upload.wikimedia.org/wikipedia/commons/2/27/MnistExamples.png)
The example showcases the variety and complexity of the handwritten digits in the MNIST dataset, highlighting the importance of a diverse dataset for training robust image classification models.
## Citations and Acknowledgments
If you use the MNIST dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@article{lecun2010mnist,
title={MNIST handwritten digit database},
author={LeCun, Yann and Cortes, Corinna and Burges, CJ},
journal={ATT Labs [Online]. Available: http://yann.lecun.com/exdb/mnist},
volume={2},
year={2010}
}
```
We would like to acknowledge Yann LeCun, Corinna Cortes, and Christopher J.C. Burges for creating and maintaining the MNIST dataset as a valuable resource for the machine learning and computer vision research community. For more information about the MNIST dataset and its creators, visit the [MNIST dataset website](https://en.wikipedia.org/wiki/MNIST_database).
## FAQ
### What is the MNIST dataset, and why is it important in machine learning?
The [MNIST](https://en.wikipedia.org/wiki/MNIST_database) dataset, or Modified National Institute of Standards and Technology dataset, is a widely-used collection of handwritten digits designed for training and testing image classification systems. It includes 60,000 training images and 10,000 testing images, all of which are grayscale and 28×28 pixels in size. The dataset's importance lies in its role as a standard benchmark for evaluating image classification algorithms, helping researchers and engineers to compare methods and track progress in the field.
### How can I use Ultralytics YOLO to train a model on the MNIST dataset?
To train a model on the MNIST dataset using Ultralytics YOLO, you can follow these steps:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="mnist", epochs=100, imgsz=32)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=mnist model=yolo11n-cls.pt epochs=100 imgsz=28
```
For a detailed list of available training arguments, refer to the [Training](../../modes/train.md) page.
### What is the difference between the MNIST and EMNIST datasets?
The MNIST dataset contains only handwritten digits, whereas the Extended MNIST (EMNIST) dataset includes both digits and uppercase and lowercase letters. EMNIST was developed as a successor to MNIST and utilizes the same 28×28 pixel format for the images, making it compatible with tools and models designed for the original MNIST dataset. This broader range of characters in EMNIST makes it useful for a wider variety of machine learning applications.
### Can I use Ultralytics HUB to train models on custom datasets like MNIST?
Yes, you can use [Ultralytics HUB](https://docs.ultralytics.com/hub/) to train models on custom datasets like MNIST. Ultralytics HUB offers a user-friendly interface for uploading datasets, training models, and managing projects without needing extensive coding knowledge. For more details on how to get started, check out the [Ultralytics HUB Quickstart](https://docs.ultralytics.com/hub/quickstart/) page.
### How does MNIST compare to other image classification datasets?
MNIST is simpler than many modern datasets like [CIFAR-10](../classify/cifar10.md) or [ImageNet](../classify/imagenet.md), making it ideal for beginners and quick experimentation. While more complex datasets offer greater challenges with color images and diverse object categories, MNIST remains valuable for its simplicity, small file size, and historical significance in the development of machine learning algorithms. For more advanced classification tasks, consider using [Fashion-MNIST](../classify/fashion-mnist.md), which maintains the same structure but features clothing items instead of digits.

View File

@ -0,0 +1,147 @@
---
comments: true
description: Explore our African Wildlife Dataset featuring images of buffalo, elephant, rhino, and zebra for training computer vision models. Ideal for research and conservation.
keywords: African Wildlife Dataset, South African animals, object detection, computer vision, YOLO11, wildlife research, conservation, dataset
---
# African Wildlife Dataset
This dataset showcases four common animal classes typically found in South African nature reserves. It includes images of African wildlife such as buffalo, elephant, rhino, and zebra, providing valuable insights into their characteristics. Essential for training [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) algorithms, this dataset aids in identifying animals in various habitats, from zoos to forests, and supports wildlife research.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/biIW5Z6GYl0"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> African Wildlife Animals Detection using Ultralytics YOLO11
</p>
## Dataset Structure
The African wildlife objects detection dataset is split into three subsets:
- **Training set**: Contains 1052 images, each with corresponding annotations.
- **Validation set**: Includes 225 images, each with paired annotations.
- **Testing set**: Comprises 227 images, each with paired annotations.
## Applications
This dataset can be applied in various computer vision tasks such as [object detection](https://www.ultralytics.com/glossary/object-detection), object tracking, and research. Specifically, it can be used to train and evaluate models for identifying African wildlife objects in images, which can have applications in wildlife conservation, ecological research, and monitoring efforts in natural reserves and protected areas. Additionally, it can serve as a valuable resource for educational purposes, enabling students and researchers to study and understand the characteristics and behaviors of different animal species.
## Dataset YAML
A YAML (Yet Another Markup Language) file defines the dataset configuration, including paths, classes, and other pertinent details. For the African wildlife dataset, the `african-wildlife.yaml` file is located at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/african-wildlife.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/african-wildlife.yaml).
!!! example "ultralytics/cfg/datasets/african-wildlife.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/african-wildlife.yaml"
```
## Usage
To train a YOLO11n model on the African wildlife dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, use the provided code samples. For a comprehensive list of available parameters, refer to the model's [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="african-wildlife.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=african-wildlife.yaml model=yolo11n.pt epochs=100 imgsz=640
```
!!! example "Inference Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("path/to/best.pt") # load a brain-tumor fine-tuned model
# Inference using the model
results = model.predict("https://ultralytics.com/assets/african-wildlife-sample.jpg")
```
=== "CLI"
```bash
# Start prediction with a finetuned *.pt model
yolo detect predict model='path/to/best.pt' imgsz=640 source="https://ultralytics.com/assets/african-wildlife-sample.jpg"
```
## Sample Images and Annotations
The African wildlife dataset comprises a wide variety of images showcasing diverse animal species and their natural habitats. Below are examples of images from the dataset, each accompanied by its corresponding annotations.
![African wildlife dataset sample image](https://github.com/ultralytics/docs/releases/download/0/african-wildlife-dataset-sample.avif)
- **Mosaiced Image**: Here, we present a training batch consisting of mosaiced dataset images. Mosaicing, a training technique, combines multiple images into one, enriching batch diversity. This method helps enhance the model's ability to generalize across different object sizes, aspect ratios, and contexts.
This example illustrates the variety and complexity of images in the African wildlife dataset, emphasizing the benefits of including mosaicing during the training process.
## Citations and Acknowledgments
The dataset has been released available under the [AGPL-3.0 License](https://github.com/ultralytics/ultralytics/blob/main/LICENSE).
## FAQ
### What is the African Wildlife Dataset, and how can it be used in computer vision projects?
The African Wildlife Dataset includes images of four common animal species found in South African nature reserves: buffalo, elephant, rhino, and zebra. It is a valuable resource for training computer vision algorithms in object detection and animal identification. The dataset supports various tasks like object tracking, research, and conservation efforts. For more information on its structure and applications, refer to the [Dataset Structure](#dataset-structure) section and [Applications](#applications) of the dataset.
### How do I train a YOLO11 model using the African Wildlife Dataset?
You can train a YOLO11 model on the African Wildlife Dataset by using the `african-wildlife.yaml` configuration file. Below is an example of how to train the YOLO11n model for 100 epochs with an image size of 640:
!!! example
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="african-wildlife.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=african-wildlife.yaml model=yolo11n.pt epochs=100 imgsz=640
```
For additional training parameters and options, refer to the [Training](../../modes/train.md) documentation.
### Where can I find the YAML configuration file for the African Wildlife Dataset?
The YAML configuration file for the African Wildlife Dataset, named `african-wildlife.yaml`, can be found at [this GitHub link](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/african-wildlife.yaml). This file defines the dataset configuration, including paths, classes, and other details crucial for training [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models. See the [Dataset YAML](#dataset-yaml) section for more details.
### Can I see sample images and annotations from the African Wildlife Dataset?
Yes, the African Wildlife Dataset includes a wide variety of images showcasing diverse animal species in their natural habitats. You can view sample images and their corresponding annotations in the [Sample Images and Annotations](#sample-images-and-annotations) section. This section also illustrates the use of mosaicing technique to combine multiple images into one for enriched batch diversity, enhancing the model's generalization ability.
### How can the African Wildlife Dataset be used to support wildlife conservation and research?
The African Wildlife Dataset is ideal for supporting wildlife conservation and research by enabling the training and evaluation of models to identify African wildlife in different habitats. These models can assist in [monitoring animal populations](https://docs.ultralytics.com/solutions/), studying their behavior, and recognizing conservation needs. Additionally, the dataset can be utilized for educational purposes, helping students and researchers understand the characteristics and behaviors of different animal species. More details can be found in the [Applications](#applications) section.

View File

@ -0,0 +1,153 @@
---
comments: true
description: Explore the comprehensive Argoverse dataset by Argo AI for 3D tracking, motion forecasting, and stereo depth estimation in autonomous driving research.
keywords: Argoverse dataset, autonomous driving, 3D tracking, motion forecasting, stereo depth estimation, Argo AI, LiDAR point clouds, high-resolution images, HD maps
---
# Argoverse Dataset
The [Argoverse](https://www.argoverse.org/) dataset is a collection of data designed to support research in autonomous driving tasks, such as 3D tracking, motion forecasting, and stereo depth estimation. Developed by Argo AI, the dataset provides a wide range of high-quality sensor data, including high-resolution images, LiDAR point clouds, and map data.
!!! note
The Argoverse dataset `*.zip` file required for training was removed from Amazon S3 after the shutdown of Argo AI by Ford, but we have made it available for manual download on [Google Drive](https://drive.google.com/file/d/1st9qW3BeIwQsnR0t8mRpvbsSWIo16ACi/view?usp=drive_link).
## Key Features
- Argoverse contains over 290K labeled 3D object tracks and 5 million object instances across 1,263 distinct scenes.
- The dataset includes high-resolution camera images, LiDAR point clouds, and richly annotated HD maps.
- Annotations include 3D bounding boxes for objects, object tracks, and trajectory information.
- Argoverse provides multiple subsets for different tasks, such as 3D tracking, motion forecasting, and stereo depth estimation.
## Dataset Structure
The Argoverse dataset is organized into three main subsets:
1. **Argoverse 3D Tracking**: This subset contains 113 scenes with over 290K labeled 3D object tracks, focusing on 3D object tracking tasks. It includes LiDAR point clouds, camera images, and sensor calibration information.
2. **Argoverse Motion Forecasting**: This subset consists of 324K vehicle trajectories collected from 60 hours of driving data, suitable for motion forecasting tasks.
3. **Argoverse Stereo Depth Estimation**: This subset is designed for stereo depth estimation tasks and includes over 10K stereo image pairs with corresponding LiDAR point clouds for ground truth depth estimation.
## Applications
The Argoverse dataset is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in autonomous driving tasks such as 3D object tracking, motion forecasting, and stereo depth estimation. The dataset's diverse set of sensor data, object annotations, and map information make it a valuable resource for researchers and practitioners in the field of autonomous driving.
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. For the case of the Argoverse dataset, the `Argoverse.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/Argoverse.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/Argoverse.yaml).
!!! example "ultralytics/cfg/datasets/Argoverse.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/Argoverse.yaml"
```
## Usage
To train a YOLO11n model on the Argoverse dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="Argoverse.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=Argoverse.yaml model=yolo11n.pt epochs=100 imgsz=640
```
## Sample Data and Annotations
The Argoverse dataset contains a diverse set of sensor data, including camera images, LiDAR point clouds, and HD map information, providing rich context for autonomous driving tasks. Here are some examples of data from the dataset, along with their corresponding annotations:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/argoverse-3d-tracking-sample.avif)
- **Argoverse 3D Tracking**: This image demonstrates an example of 3D object tracking, where objects are annotated with 3D bounding boxes. The dataset provides LiDAR point clouds and camera images to facilitate the development of models for this task.
The example showcases the variety and complexity of the data in the Argoverse dataset and highlights the importance of high-quality sensor data for autonomous driving tasks.
## Citations and Acknowledgments
If you use the Argoverse dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@inproceedings{chang2019argoverse,
title={Argoverse: 3D Tracking and Forecasting with Rich Maps},
author={Chang, Ming-Fang and Lambert, John and Sangkloy, Patsorn and Singh, Jagjeet and Bak, Slawomir and Hartnett, Andrew and Wang, Dequan and Carr, Peter and Lucey, Simon and Ramanan, Deva and others},
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages={8748--8757},
year={2019}
}
```
We would like to acknowledge Argo AI for creating and maintaining the Argoverse dataset as a valuable resource for the autonomous driving research community. For more information about the Argoverse dataset and its creators, visit the [Argoverse dataset website](https://www.argoverse.org/).
## FAQ
### What is the Argoverse dataset and its key features?
The [Argoverse](https://www.argoverse.org/) dataset, developed by Argo AI, supports autonomous driving research. It includes over 290K labeled 3D object tracks and 5 million object instances across 1,263 distinct scenes. The dataset provides high-resolution camera images, LiDAR point clouds, and annotated HD maps, making it valuable for tasks like 3D tracking, motion forecasting, and stereo depth estimation.
### How can I train an Ultralytics YOLO model using the Argoverse dataset?
To train a YOLO11 model with the Argoverse dataset, use the provided YAML configuration file and the following code:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="Argoverse.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=Argoverse.yaml model=yolo11n.pt epochs=100 imgsz=640
```
For a detailed explanation of the arguments, refer to the model [Training](../../modes/train.md) page.
### What types of data and annotations are available in the Argoverse dataset?
The Argoverse dataset includes various sensor data types such as high-resolution camera images, LiDAR point clouds, and HD map data. Annotations include 3D bounding boxes, object tracks, and trajectory information. These comprehensive annotations are essential for accurate model training in tasks like 3D object tracking, motion forecasting, and stereo depth estimation.
### How is the Argoverse dataset structured?
The dataset is divided into three main subsets:
1. **Argoverse 3D Tracking**: Contains 113 scenes with over 290K labeled 3D object tracks, focusing on 3D object tracking tasks. It includes LiDAR point clouds, camera images, and sensor calibration information.
2. **Argoverse Motion Forecasting**: Consists of 324K vehicle trajectories collected from 60 hours of driving data, suitable for motion forecasting tasks.
3. **Argoverse Stereo Depth Estimation**: Includes over 10K stereo image pairs with corresponding LiDAR point clouds for ground truth depth estimation.
### Where can I download the Argoverse dataset now that it has been removed from Amazon S3?
The Argoverse dataset `*.zip` file, previously available on Amazon S3, can now be manually downloaded from [Google Drive](https://drive.google.com/file/d/1st9qW3BeIwQsnR0t8mRpvbsSWIo16ACi/view?usp=drive_link).
### What is the YAML configuration file used for with the Argoverse dataset?
A YAML file contains the dataset's paths, classes, and other essential information. For the Argoverse dataset, the configuration file, `Argoverse.yaml`, can be found at the following link: [Argoverse.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/Argoverse.yaml).
For more information about YAML configurations, see our [datasets](../index.md) guide.

View File

@ -0,0 +1,198 @@
---
comments: true
description: Explore the brain tumor detection dataset with MRI/CT images. Essential for training AI models for early diagnosis and treatment planning.
keywords: brain tumor dataset, MRI scans, CT scans, brain tumor detection, medical imaging, AI in healthcare, computer vision, early diagnosis, treatment planning
---
# Brain Tumor Dataset
<a href="https://colab.research.google.com/github/ultralytics/notebooks/blob/main/notebooks/how-to-train-ultralytics-yolo-on-brain-tumor-detection-dataset.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open Brain Tumor Dataset In Colab"></a>
A brain tumor detection dataset consists of medical images from MRI or CT scans, containing information about brain tumor presence, location, and characteristics. This dataset is essential for training [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) algorithms to automate brain tumor identification, aiding in early diagnosis and treatment planning in [healthcare applications](https://www.ultralytics.com/solutions/ai-in-healthcare).
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/ogTBBD8McRk"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Brain Tumor Detection using Ultralytics HUB
</p>
## Dataset Structure
The brain tumor dataset is divided into two subsets:
- **Training set**: Consisting of 893 images, each accompanied by corresponding annotations.
- **Testing set**: Comprising 223 images, with annotations paired for each one.
The dataset contains two classes:
- **Negative**: Images without brain tumors
- **Positive**: Images with brain tumors
## Applications
The application of brain tumor detection using computer vision enables [early diagnosis](https://www.ultralytics.com/blog/ai-and-radiology-a-new-era-of-precision-and-efficiency), treatment planning, and monitoring of tumor progression. By analyzing medical imaging data like MRI or CT scans, [computer vision systems](https://docs.ultralytics.com/tasks/detect/) assist in accurately identifying brain tumors, aiding in timely medical intervention and personalized treatment strategies.
Medical professionals can leverage this technology to:
- Reduce diagnostic time and improve accuracy
- Assist in surgical planning by precisely locating tumors
- Monitor treatment effectiveness over time
- Support research in oncology and neurology
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the brain tumor dataset, the `brain-tumor.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/brain-tumor.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/brain-tumor.yaml).
!!! example "ultralytics/cfg/datasets/brain-tumor.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/brain-tumor.yaml"
```
## Usage
To train a [YOLO11](https://docs.ultralytics.com/models/yolo11/) model on the brain tumor dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, utilize the provided code snippets. For a detailed list of available arguments, consult the model's [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="brain-tumor.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=brain-tumor.yaml model=yolo11n.pt epochs=100 imgsz=640
```
!!! example "Inference Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("path/to/best.pt") # load a brain-tumor fine-tuned model
# Inference using the model
results = model.predict("https://ultralytics.com/assets/brain-tumor-sample.jpg")
```
=== "CLI"
```bash
# Start prediction with a finetuned *.pt model
yolo detect predict model='path/to/best.pt' imgsz=640 source="https://ultralytics.com/assets/brain-tumor-sample.jpg"
```
## Sample Images and Annotations
The brain tumor dataset encompasses a wide array of medical images featuring brain scans with and without tumors. Presented below are examples of images from the dataset, accompanied by their respective annotations.
![Brain tumor dataset sample image](https://github.com/ultralytics/docs/releases/download/0/brain-tumor-dataset-sample-image.avif)
- **Mosaiced Image**: Displayed here is a training batch comprising mosaiced dataset images. Mosaicing, a training technique, consolidates multiple images into one, enhancing batch diversity. This approach aids in improving the model's capacity to generalize across various tumor sizes, shapes, and locations within brain scans.
This example highlights the diversity and intricacy of images within the brain tumor dataset, underscoring the advantages of incorporating mosaicing during the training phase for [medical image analysis](https://www.ultralytics.com/blog/using-yolo11-for-tumor-detection-in-medical-imaging).
## Citations and Acknowledgments
The dataset has been made available under the [AGPL-3.0 License](https://github.com/ultralytics/ultralytics/blob/main/LICENSE).
If you use this dataset in your research or development work, please cite it appropriately:
!!! quote ""
=== "BibTeX"
```bibtex
@dataset{Ultralytics_Brain_Tumor_Dataset_2023,
author = {Ultralytics},
title = {Brain Tumor Detection Dataset},
year = {2023},
publisher = {Ultralytics},
url = {https://docs.ultralytics.com/datasets/detect/brain-tumor/}
}
```
## FAQ
### What is the structure of the brain tumor dataset available in Ultralytics documentation?
The brain tumor dataset is divided into two subsets: the **training set** consists of 893 images with corresponding annotations, while the **testing set** comprises 223 images with paired annotations. This structured division aids in developing robust and accurate computer vision models for detecting brain tumors. For more information on the dataset structure, visit the [Dataset Structure](#dataset-structure) section.
### How can I train a YOLO11 model on the brain tumor dataset using Ultralytics?
You can train a YOLO11 model on the brain tumor dataset for 100 epochs with an image size of 640px using both Python and CLI methods. Below are the examples for both:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="brain-tumor.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=brain-tumor.yaml model=yolo11n.pt epochs=100 imgsz=640
```
For a detailed list of available arguments, refer to the [Training](../../modes/train.md) page.
### What are the benefits of using the brain tumor dataset for AI in healthcare?
Using the brain tumor dataset in AI projects enables early diagnosis and treatment planning for brain tumors. It helps in automating brain tumor identification through computer vision, facilitating accurate and timely medical interventions, and supporting personalized treatment strategies. This application holds significant potential in improving patient outcomes and medical efficiencies. For more insights on AI applications in healthcare, see [Ultralytics' healthcare solutions](https://www.ultralytics.com/solutions/ai-in-healthcare).
### How do I perform inference using a fine-tuned YOLO11 model on the brain tumor dataset?
Inference using a fine-tuned YOLO11 model can be performed with either Python or CLI approaches. Here are the examples:
!!! example "Inference Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("path/to/best.pt") # load a brain-tumor fine-tuned model
# Inference using the model
results = model.predict("https://ultralytics.com/assets/brain-tumor-sample.jpg")
```
=== "CLI"
```bash
# Start prediction with a finetuned *.pt model
yolo detect predict model='path/to/best.pt' imgsz=640 source="https://ultralytics.com/assets/brain-tumor-sample.jpg"
```
### Where can I find the YAML configuration for the brain tumor dataset?
The YAML configuration file for the brain tumor dataset can be found at [brain-tumor.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/brain-tumor.yaml). This file includes paths, classes, and additional relevant information necessary for training and evaluating models on this dataset.

View File

@ -0,0 +1,173 @@
---
comments: true
description: Explore the COCO dataset for object detection and segmentation. Learn about its structure, usage, pretrained models, and key features.
keywords: COCO dataset, object detection, segmentation, benchmarking, computer vision, pose estimation, YOLO models, COCO annotations
---
# COCO Dataset
The [COCO](https://cocodataset.org/#home) (Common Objects in Context) dataset is a large-scale object detection, segmentation, and captioning dataset. It is designed to encourage research on a wide variety of object categories and is commonly used for benchmarking [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models. It is an essential dataset for researchers and developers working on object detection, segmentation, and pose estimation tasks.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/uDrn9QZJ2lk"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Ultralytics COCO Dataset Overview
</p>
## COCO Pretrained Models
{% include "macros/yolo-det-perf.md" %}
## Key Features
- COCO contains 330K images, with 200K images having annotations for object detection, segmentation, and captioning tasks.
- The dataset comprises 80 object categories, including common objects like cars, bicycles, and animals, as well as more specific categories such as umbrellas, handbags, and sports equipment.
- Annotations include object bounding boxes, segmentation masks, and captions for each image.
- COCO provides standardized evaluation metrics like [mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP) for object detection, and mean Average [Recall](https://www.ultralytics.com/glossary/recall) (mAR) for segmentation tasks, making it suitable for comparing model performance.
## Dataset Structure
The COCO dataset is split into three subsets:
1. **Train2017**: This subset contains 118K images for training object detection, segmentation, and captioning models.
2. **Val2017**: This subset has 5K images used for validation purposes during model training.
3. **Test2017**: This subset consists of 20K images used for testing and benchmarking the trained models. Ground truth annotations for this subset are not publicly available, and the results are submitted to the [COCO evaluation server](https://codalab.lisn.upsaclay.fr/competitions/7384) for performance evaluation.
## Applications
The COCO dataset is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in object detection (such as [Ultralytics YOLO](../../models/yolo11.md), [Faster R-CNN](https://arxiv.org/abs/1506.01497), and [SSD](https://arxiv.org/abs/1512.02325)), [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation) (such as [Mask R-CNN](https://arxiv.org/abs/1703.06870)), and keypoint detection (such as [OpenPose](https://arxiv.org/abs/1812.08008)). The dataset's diverse set of object categories, large number of annotated images, and standardized evaluation metrics make it an essential resource for computer vision researchers and practitioners.
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO dataset, the `coco.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml).
!!! example "ultralytics/cfg/datasets/coco.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/coco.yaml"
```
## Usage
To train a YOLO11n model on the COCO dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=coco.yaml model=yolo11n.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
The COCO dataset contains a diverse set of images with various object categories and complex scenes. Here are some examples of images from the dataset, along with their corresponding annotations:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/mosaiced-coco-dataset-sample.avif)
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the COCO dataset and the benefits of using mosaicing during the training process.
## Citations and Acknowledgments
If you use the COCO dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{lin2015microsoft,
title={Microsoft COCO: Common Objects in Context},
author={Tsung-Yi Lin and Michael Maire and Serge Belongie and Lubomir Bourdev and Ross Girshick and James Hays and Pietro Perona and Deva Ramanan and C. Lawrence Zitnick and Piotr Dollár},
year={2015},
eprint={1405.0312},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home).
## FAQ
### What is the COCO dataset and why is it important for computer vision?
The [COCO dataset](https://cocodataset.org/#home) (Common Objects in Context) is a large-scale dataset used for [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and captioning. It contains 330K images with detailed annotations for 80 object categories, making it essential for benchmarking and training computer vision models. Researchers use COCO due to its diverse categories and standardized evaluation metrics like mean Average [Precision](https://www.ultralytics.com/glossary/precision) (mAP).
### How can I train a YOLO model using the COCO dataset?
To train a YOLO11 model using the COCO dataset, you can use the following code snippets:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=coco.yaml model=yolo11n.pt epochs=100 imgsz=640
```
Refer to the [Training page](../../modes/train.md) for more details on available arguments.
### What are the key features of the COCO dataset?
The COCO dataset includes:
- 330K images, with 200K annotated for object detection, segmentation, and captioning.
- 80 object categories ranging from common items like cars and animals to specific ones like handbags and sports equipment.
- Standardized evaluation metrics for object detection (mAP) and segmentation (mean Average Recall, mAR).
- **Mosaicing** technique in training batches to enhance model generalization across various object sizes and contexts.
### Where can I find pretrained YOLO11 models trained on the COCO dataset?
Pretrained YOLO11 models on the COCO dataset can be downloaded from the links provided in the documentation. Examples include:
- [YOLO11n](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt)
- [YOLO11s](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt)
- [YOLO11m](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt)
- [YOLO11l](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l.pt)
- [YOLO11x](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt)
These models vary in size, mAP, and inference speed, providing options for different performance and resource requirements.
### How is the COCO dataset structured and how do I use it?
The COCO dataset is split into three subsets:
1. **Train2017**: 118K images for training.
2. **Val2017**: 5K images for validation during training.
3. **Test2017**: 20K images for benchmarking trained models. Results need to be submitted to the [COCO evaluation server](https://codalab.lisn.upsaclay.fr/competitions/7384) for performance evaluation.
The dataset's YAML configuration file is available at [coco.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml), which defines paths, classes, and dataset details.

View File

@ -0,0 +1,152 @@
---
comments: true
description: Explore the Ultralytics COCO128 dataset, a versatile and manageable set of 128 images perfect for testing object detection models and training pipelines.
keywords: COCO128, Ultralytics, dataset, object detection, YOLO11, training, validation, machine learning, computer vision
---
# COCO128 Dataset
## Introduction
[Ultralytics](https://www.ultralytics.com/) COCO128 is a small, but versatile [object detection](https://www.ultralytics.com/glossary/object-detection) dataset composed of the first 128 images of the COCO train 2017 set. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 128 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/uDrn9QZJ2lk"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Ultralytics COCO Dataset Overview
</p>
This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics).
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO128 dataset, the `coco128.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco128.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco128.yaml).
!!! example "ultralytics/cfg/datasets/coco128.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/coco128.yaml"
```
## Usage
To train a YOLO11n model on the COCO128 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco128.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=coco128.yaml model=yolo11n.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
Here are some examples of images from the COCO128 dataset, along with their corresponding annotations:
<img src="https://github.com/ultralytics/docs/releases/download/0/mosaiced-training-batch-1.avif" alt="Dataset sample image" width="800">
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the COCO128 dataset and the benefits of using mosaicing during the training process.
## Citations and Acknowledgments
If you use the COCO dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{lin2015microsoft,
title={Microsoft COCO: Common Objects in Context},
author={Tsung-Yi Lin and Michael Maire and Serge Belongie and Lubomir Bourdev and Ross Girshick and James Hays and Pietro Perona and Deva Ramanan and C. Lawrence Zitnick and Piotr Dollár},
year={2015},
eprint={1405.0312},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home).
## FAQ
### What is the Ultralytics COCO128 dataset used for?
The Ultralytics COCO128 dataset is a compact subset containing the first 128 images from the COCO train 2017 dataset. It's primarily used for testing and debugging [object detection](https://www.ultralytics.com/glossary/object-detection) models, experimenting with new detection approaches, and validating training pipelines before scaling to larger datasets. Its manageable size makes it perfect for quick iterations while still providing enough diversity to be a meaningful test case.
### How do I train a YOLO11 model using the COCO128 dataset?
To train a YOLO11 model on the COCO128 dataset, you can use either Python or CLI commands. Here's how:
=== "Python"
````python
from ultralytics import YOLO
# Load a pretrained model
model = YOLO("yolo11n.pt")
# Train the model
results = model.train(data="coco128.yaml", epochs=100, imgsz=640)
```
=== "CLI"
`bash
yolo detect train data=coco128.yaml model=yolo11n.pt epochs=100 imgsz=640
`
For more training options and parameters, refer to the [Training](../../modes/train.md) documentation.
### What are the benefits of using mosaic augmentation with COCO128?
Mosaic augmentation, as shown in the sample images, combines multiple training images into a single composite image. This technique offers several benefits when training with COCO128:
- Increases the variety of objects and contexts within each training batch
- Improves model generalization across different object sizes and aspect ratios
- Enhances detection performance for objects at various scales
- Maximizes the utility of a small dataset by creating more diverse training samples
This technique is particularly valuable for smaller datasets like COCO128, helping models learn more robust features from limited data.
### How does COCO128 compare to other COCO dataset variants?
COCO128 (128 images) sits between [COCO8](../detect/coco8.md) (8 images) and the full [COCO](../detect/coco.md) dataset (118K+ images) in terms of size:
- **COCO8**: Contains just 8 images (4 train, 4 val) - ideal for quick tests and debugging
- **COCO128**: Contains 128 images - balanced between size and diversity
- **Full COCO**: Contains 118K+ training images - comprehensive but resource-intensive
COCO128 provides a good middle ground, offering more diversity than COCO8 while remaining much more manageable than the full COCO dataset for experimentation and initial model development.
### Can I use COCO128 for tasks other than object detection?
While COCO128 is primarily designed for object detection, the dataset's annotations can be adapted for other computer vision tasks:
- **Instance segmentation**: Using the segmentation masks provided in the annotations
- **Keypoint detection**: For images containing people with keypoint annotations
- **Transfer learning**: As a starting point for fine-tuning models for custom tasks
For specialized tasks like [segmentation](../../tasks/segment.md), consider using purpose-built variants like [COCO8-seg](../segment/coco8-seg.md) which include the appropriate annotations.
````

View File

@ -0,0 +1,135 @@
---
comments: true
description: Explore the Ultralytics COCO8 dataset, a versatile and manageable set of 8 images perfect for testing object detection models and training pipelines.
keywords: COCO8, Ultralytics, dataset, object detection, YOLO11, training, validation, machine learning, computer vision
---
# COCO8 Dataset
## Introduction
[Ultralytics](https://www.ultralytics.com/) COCO8 is a small, but versatile [object detection](https://www.ultralytics.com/glossary/object-detection) dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/uDrn9QZJ2lk"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Ultralytics COCO Dataset Overview
</p>
This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics).
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO8 dataset, the `coco8.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8.yaml).
!!! example "ultralytics/cfg/datasets/coco8.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/coco8.yaml"
```
## Usage
To train a YOLO11n model on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
Here are some examples of images from the COCO8 dataset, along with their corresponding annotations:
<img src="https://github.com/ultralytics/docs/releases/download/0/mosaiced-training-batch-1.avif" alt="Dataset sample image" width="800">
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the COCO8 dataset and the benefits of using mosaicing during the training process.
## Citations and Acknowledgments
If you use the COCO dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{lin2015microsoft,
title={Microsoft COCO: Common Objects in Context},
author={Tsung-Yi Lin and Michael Maire and Serge Belongie and Lubomir Bourdev and Ross Girshick and James Hays and Pietro Perona and Deva Ramanan and C. Lawrence Zitnick and Piotr Dollár},
year={2015},
eprint={1405.0312},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home).
## FAQ
### What is the Ultralytics COCO8 dataset used for?
The Ultralytics COCO8 dataset is a compact yet versatile object detection dataset consisting of the first 8 images from the COCO train 2017 set, with 4 images for training and 4 for validation. It is designed for testing and debugging object detection models and experimentation with new detection approaches. Despite its small size, COCO8 offers enough diversity to act as a sanity check for your training pipelines before deploying larger datasets. For more details, view the [COCO8 dataset](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8.yaml).
### How do I train a YOLO11 model using the COCO8 dataset?
To train a YOLO11 model using the COCO8 dataset, you can employ either Python or CLI commands. Here's how you can start:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640
```
For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
### Why should I use Ultralytics HUB for managing my COCO8 training?
Ultralytics HUB is an all-in-one web tool designed to simplify the training and deployment of YOLO models, including the Ultralytics YOLO11 models on the COCO8 dataset. It offers cloud training, real-time tracking, and seamless dataset management. HUB allows you to start training with a single click and avoids the complexities of manual setups. Discover more about [Ultralytics HUB](https://hub.ultralytics.com/) and its benefits.
### What are the benefits of using mosaic augmentation in training with the COCO8 dataset?
Mosaic augmentation, demonstrated in the COCO8 dataset, combines multiple images into a single image during training. This technique increases the variety of objects and scenes in each training batch, improving the model's ability to generalize across different object sizes, aspect ratios, and contexts. This results in a more robust object detection model. For more details, refer to the [training guide](#usage).
### How can I validate my YOLO11 model trained on the COCO8 dataset?
Validation of your YOLO11 model trained on the COCO8 dataset can be performed using the model's validation commands. You can invoke the validation mode via CLI or Python script to evaluate the model's performance using precise metrics. For detailed instructions, visit the [Validation](../../modes/val.md) page.

View File

@ -0,0 +1,145 @@
---
comments: true
description: Explore the Global Wheat Head Dataset to develop accurate wheat head detection models. Includes training images, annotations, and usage for crop management.
keywords: Global Wheat Head Dataset, wheat head detection, wheat phenotyping, crop management, deep learning, object detection, training datasets
---
# Global Wheat Head Dataset
The [Global Wheat Head Dataset](https://www.global-wheat.com/) is a collection of images designed to support the development of accurate wheat head detection models for applications in wheat phenotyping and crop management. Wheat heads, also known as spikes, are the grain-bearing parts of the wheat plant. Accurate estimation of wheat head density and size is essential for assessing crop health, maturity, and yield potential. The dataset, created by a collaboration of nine research institutes from seven countries, covers multiple growing regions to ensure models generalize well across different environments.
## Key Features
- The dataset contains over 3,000 training images from Europe (France, UK, Switzerland) and North America (Canada).
- It includes approximately 1,000 test images from Australia, Japan, and China.
- Images are outdoor field images, capturing the natural variability in wheat head appearances.
- Annotations include wheat head bounding boxes to support [object detection](https://docs.ultralytics.com/tasks/detect/) tasks.
## Dataset Structure
The Global Wheat Head Dataset is organized into two main subsets:
1. **Training Set**: This subset contains over 3,000 images from Europe and North America. The images are labeled with wheat head bounding boxes, providing ground truth for training object detection models.
2. **Test Set**: This subset consists of approximately 1,000 images from Australia, Japan, and China. These images are used for evaluating the performance of trained models on unseen genotypes, environments, and observational conditions.
## Applications
The Global Wheat Head Dataset is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in wheat head detection tasks. The dataset's diverse set of images, capturing a wide range of appearances, environments, and conditions, make it a valuable resource for researchers and practitioners in the field of [plant phenotyping](https://www.ultralytics.com/blog/computer-vision-in-agriculture-transforming-fruit-detection-and-precision-farming) and crop management.
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. For the case of the Global Wheat Head Dataset, the `GlobalWheat2020.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/GlobalWheat2020.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/GlobalWheat2020.yaml).
!!! example "ultralytics/cfg/datasets/GlobalWheat2020.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/GlobalWheat2020.yaml"
```
## Usage
To train a YOLO11n model on the Global Wheat Head Dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="GlobalWheat2020.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=GlobalWheat2020.yaml model=yolo11n.pt epochs=100 imgsz=640
```
## Sample Data and Annotations
The Global Wheat Head Dataset contains a diverse set of outdoor field images, capturing the natural variability in wheat head appearances, environments, and conditions. Here are some examples of data from the dataset, along with their corresponding annotations:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/wheat-head-detection-sample.avif)
- **Wheat Head Detection**: This image demonstrates an example of wheat head detection, where wheat heads are annotated with bounding boxes. The dataset provides a variety of images to facilitate the development of models for this task.
The example showcases the variety and complexity of the data in the Global Wheat Head Dataset and highlights the importance of accurate wheat head detection for applications in wheat phenotyping and crop management.
## Citations and Acknowledgments
If you use the Global Wheat Head Dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@article{david2020global,
title={Global Wheat Head Detection (GWHD) Dataset: A Large and Diverse Dataset of High-Resolution RGB-Labelled Images to Develop and Benchmark Wheat Head Detection Methods},
author={David, Etienne and Madec, Simon and Sadeghi-Tehran, Pouria and Aasen, Helge and Zheng, Bangyou and Liu, Shouyang and Kirchgessner, Norbert and Ishikawa, Goro and Nagasawa, Koichi and Badhon, Minhajul and others},
journal={arXiv preprint arXiv:2005.02162},
year={2020}
}
```
We would like to acknowledge the researchers and institutions that contributed to the creation and maintenance of the Global Wheat Head Dataset as a valuable resource for the plant phenotyping and crop management research community. For more information about the dataset and its creators, visit the [Global Wheat Head Dataset website](https://www.global-wheat.com/).
## FAQ
### What is the Global Wheat Head Dataset used for?
The Global Wheat Head Dataset is primarily used for developing and training deep learning models aimed at wheat head detection. This is crucial for applications in [wheat phenotyping](https://www.ultralytics.com/blog/from-farm-to-table-how-ai-drives-innovation-in-agriculture) and crop management, allowing for more accurate estimations of wheat head density, size, and overall crop yield potential. Accurate detection methods help in assessing crop health and maturity, essential for efficient crop management.
### How do I train a YOLO11n model on the Global Wheat Head Dataset?
To train a YOLO11n model on the Global Wheat Head Dataset, you can use the following code snippets. Make sure you have the `GlobalWheat2020.yaml` configuration file specifying dataset paths and classes:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a pre-trained model (recommended for training)
model = YOLO("yolo11n.pt")
# Train the model
results = model.train(data="GlobalWheat2020.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=GlobalWheat2020.yaml model=yolo11n.pt epochs=100 imgsz=640
```
For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
### What are the key features of the Global Wheat Head Dataset?
Key features of the Global Wheat Head Dataset include:
- Over 3,000 training images from Europe (France, UK, Switzerland) and North America (Canada).
- Approximately 1,000 test images from Australia, Japan, and China.
- High variability in wheat head appearances due to different growing environments.
- Detailed annotations with wheat head bounding boxes to aid [object detection](https://www.ultralytics.com/glossary/object-detection) models.
These features facilitate the development of robust models capable of generalization across multiple regions.
### Where can I find the configuration YAML file for the Global Wheat Head Dataset?
The configuration YAML file for the Global Wheat Head Dataset, named `GlobalWheat2020.yaml`, is available on GitHub. You can access it at this [link](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/GlobalWheat2020.yaml). This file contains necessary information about dataset paths, classes, and other configuration details needed for model training in [Ultralytics YOLO](https://docs.ultralytics.com/models/yolo11/).
### Why is wheat head detection important in crop management?
Wheat head detection is critical in crop management because it enables accurate estimation of wheat head density and size, which are essential for evaluating crop health, maturity, and yield potential. By leveraging [deep learning models](https://docs.ultralytics.com/models/) trained on datasets like the Global Wheat Head Dataset, farmers and researchers can better monitor and manage crops, leading to improved productivity and optimized resource use in agricultural practices. This technological advancement supports [sustainable agriculture](https://www.ultralytics.com/blog/real-time-crop-health-monitoring-with-ultralytics-yolo11) and food security initiatives.
For more information on applications of AI in agriculture, visit [AI in Agriculture](https://www.ultralytics.com/solutions/ai-in-agriculture).

View File

@ -0,0 +1,189 @@
---
comments: true
description: Learn about dataset formats compatible with Ultralytics YOLO for robust object detection. Explore supported datasets and learn how to convert formats.
keywords: Ultralytics, YOLO, object detection datasets, dataset formats, COCO, dataset conversion, training datasets
---
# Object Detection Datasets Overview
Training a robust and accurate [object detection](https://www.ultralytics.com/glossary/object-detection) model requires a comprehensive dataset. This guide introduces various formats of datasets that are compatible with the Ultralytics YOLO model and provides insights into their structure, usage, and how to convert between different formats.
## Supported Dataset Formats
### Ultralytics YOLO format
The Ultralytics YOLO format is a dataset configuration format that allows you to define the dataset root directory, the relative paths to training/validation/testing image directories or `*.txt` files containing image paths, and a dictionary of class names. Here is an example:
```yaml
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
path: ../datasets/coco8 # dataset root dir (absolute or relative; if relative, it's relative to default datasets_dir)
train: images/train # train images (relative to 'path') 4 images
val: images/val # val images (relative to 'path') 4 images
test: # test images (optional)
# Classes (80 COCO classes)
names:
0: person
1: bicycle
2: car
# ...
77: teddy bear
78: hair drier
79: toothbrush
```
Labels for this format should be exported to YOLO format with one `*.txt` file per image. If there are no objects in an image, no `*.txt` file is required. The `*.txt` file should be formatted with one row per object in `class x_center y_center width height` format. Box coordinates must be in **normalized xywh** format (from 0 to 1). If your boxes are in pixels, you should divide `x_center` and `width` by image width, and `y_center` and `height` by image height. Class numbers should be zero-indexed (start with 0).
<p align="center"><img width="750" src="https://github.com/ultralytics/docs/releases/download/0/two-persons-tie.avif" alt="Example labelled image"></p>
The label file corresponding to the above image contains 2 persons (class `0`) and a tie (class `27`):
<p align="center"><img width="428" src="https://github.com/ultralytics/docs/releases/download/0/two-persons-tie-1.avif" alt="Example label file"></p>
When using the Ultralytics YOLO format, organize your training and validation images and labels as shown in the [COCO8 dataset](coco8.md) example below.
<p align="center"><img width="800" src="https://github.com/ultralytics/docs/releases/download/0/two-persons-tie-2.avif" alt="Example dataset directory structure"></p>
## Usage
Here's how you can use these formats to train your model:
!!! example
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640
```
## Supported Datasets
Here is a list of the supported datasets and a brief description for each:
- [Argoverse](argoverse.md): A dataset containing 3D tracking and motion forecasting data from urban environments with rich annotations.
- [COCO](coco.md): Common Objects in Context (COCO) is a large-scale [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and captioning dataset with 80 object categories.
- [LVIS](lvis.md): A large-scale object detection, segmentation, and captioning dataset with 1203 object categories.
- [COCO8](coco8.md): A smaller subset of the first 4 images from COCO train and COCO val, suitable for quick tests.
- [COCO128](coco128.md): A smaller subset of the first 128 images from COCO train and COCO val, suitable for tests.
- [Global Wheat 2020](globalwheat2020.md): A dataset containing images of wheat heads for the Global Wheat Challenge 2020.
- [Objects365](objects365.md): A high-quality, large-scale dataset for object detection with 365 object categories and over 600K annotated images.
- [OpenImagesV7](open-images-v7.md): A comprehensive dataset by Google with 1.7M train images and 42k validation images.
- [SKU-110K](sku-110k.md): A dataset featuring dense object detection in retail environments with over 11K images and 1.7 million [bounding boxes](https://www.ultralytics.com/glossary/bounding-box).
- [VisDrone](visdrone.md): A dataset containing object detection and multi-object tracking data from drone-captured imagery with over 10K images and video sequences.
- [VOC](voc.md): The Pascal Visual Object Classes (VOC) dataset for object detection and segmentation with 20 object classes and over 11K images.
- [xView](xview.md): A dataset for object detection in overhead imagery with 60 object categories and over 1 million annotated objects.
- [Roboflow 100](roboflow-100.md): A diverse object detection benchmark with 100 datasets spanning seven imagery domains for comprehensive model evaluation.
- [Brain-tumor](brain-tumor.md): A dataset for detecting brain tumors includes MRI or CT scan images with details on tumor presence, location, and characteristics.
- [African-wildlife](african-wildlife.md): A dataset featuring images of African wildlife, including buffalo, elephant, rhino, and zebras.
- [Signature](signature.md): A dataset featuring images of various documents with annotated signatures, supporting document verification and fraud detection research.
- [Medical-pills](medical-pills.md): A dataset featuring images of medical-pills, annotated for applications such as pharmaceutical quality assurance, pill sorting, and regulatory compliance.
### Adding your own dataset
If you have your own dataset and would like to use it for training detection models with Ultralytics YOLO format, ensure that it follows the format specified above under "Ultralytics YOLO format". Convert your annotations to the required format and specify the paths, number of classes, and class names in the YAML configuration file.
## Port or Convert Label Formats
### COCO Dataset Format to YOLO Format
You can easily convert labels from the popular [COCO dataset](coco.md) format to the YOLO format using the following code snippet:
!!! example
=== "Python"
```python
from ultralytics.data.converter import convert_coco
convert_coco(labels_dir="path/to/coco/annotations/")
```
This conversion tool can be used to convert the COCO dataset or any dataset in the COCO format to the Ultralytics YOLO format. The process transforms the JSON-based COCO annotations into the simpler text-based YOLO format, making it compatible with [Ultralytics YOLO models](../../models/yolo11.md).
Remember to double-check if the dataset you want to use is compatible with your model and follows the necessary format conventions. Properly formatted datasets are crucial for training successful object detection models.
## FAQ
### What is the Ultralytics YOLO dataset format and how to structure it?
The Ultralytics YOLO format is a structured configuration for defining datasets in your training projects. It involves setting paths to your training, validation, and testing images and corresponding labels. For example:
```yaml
path: ../datasets/coco8 # dataset root directory
train: images/train # training images (relative to 'path')
val: images/val # validation images (relative to 'path')
test: # optional test images
names:
0: person
1: bicycle
2: car
# ...
```
Labels are saved in `*.txt` files with one file per image, formatted as `class x_center y_center width height` with normalized coordinates. For a detailed guide, see the [COCO8 dataset example](coco8.md).
### How do I convert a COCO dataset to the YOLO format?
You can convert a COCO dataset to the YOLO format using the [Ultralytics conversion tools](../../reference/data/converter.md). Here's a quick method:
```python
from ultralytics.data.converter import convert_coco
convert_coco(labels_dir="path/to/coco/annotations/")
```
This code will convert your COCO annotations to YOLO format, enabling seamless integration with Ultralytics YOLO models. For additional details, visit the [Port or Convert Label Formats](#port-or-convert-label-formats) section.
### Which datasets are supported by Ultralytics YOLO for object detection?
Ultralytics YOLO supports a wide range of datasets, including:
- [Argoverse](argoverse.md)
- [COCO](coco.md)
- [LVIS](lvis.md)
- [COCO8](coco8.md)
- [Global Wheat 2020](globalwheat2020.md)
- [Objects365](objects365.md)
- [OpenImagesV7](open-images-v7.md)
Each dataset page provides detailed information on the structure and usage tailored for efficient YOLO11 training. Explore the full list in the [Supported Datasets](#supported-datasets) section.
### How do I start training a YOLO11 model using my dataset?
To start training a YOLO11 model, ensure your dataset is formatted correctly and the paths are defined in a YAML file. Use the following script to begin training:
!!! example
=== "Python"
```python
from ultralytics import YOLO
model = YOLO("yolo11n.pt") # Load a pretrained model
results = model.train(data="path/to/your_dataset.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
yolo detect train data=path/to/your_dataset.yaml model=yolo11n.pt epochs=100 imgsz=640
```
Refer to the [Usage](#usage) section for more details on utilizing different modes, including CLI commands.
### Where can I find practical examples of using Ultralytics YOLO for object detection?
Ultralytics provides numerous examples and practical guides for using YOLO11 in diverse applications. For a comprehensive overview, visit the [Ultralytics Blog](https://www.ultralytics.com/blog) where you can find case studies, detailed tutorials, and community stories showcasing object detection, segmentation, and more with YOLO11. For specific examples, check the [Usage](../../modes/predict.md) section in the documentation.

View File

@ -0,0 +1,159 @@
---
comments: true
description: Discover the LVIS dataset by Facebook AI Research, a benchmark for object detection and instance segmentation with a large, diverse vocabulary. Learn how to utilize it.
keywords: LVIS dataset, object detection, instance segmentation, Facebook AI Research, YOLO, computer vision, model training, LVIS examples
---
# LVIS Dataset
The [LVIS dataset](https://www.lvisdataset.org/) is a large-scale, fine-grained vocabulary-level annotation dataset developed and released by Facebook AI Research (FAIR). It is primarily used as a research benchmark for object detection and [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation) with a large vocabulary of categories, aiming to drive further advancements in computer vision field.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/cfTKj96TjSE"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> YOLO World training workflow with LVIS dataset
</p>
<p align="center">
<img width="640" src="https://github.com/ultralytics/docs/releases/download/0/lvis-dataset-example-images.avif" alt="LVIS Dataset example images">
</p>
## Key Features
- LVIS contains 160k images and 2M instance annotations for object detection, segmentation, and captioning tasks.
- The dataset comprises 1203 object categories, including common objects like cars, bicycles, and animals, as well as more specific categories such as umbrellas, handbags, and sports equipment.
- Annotations include object bounding boxes, segmentation masks, and captions for each image.
- LVIS provides standardized evaluation metrics like [mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP) for object detection, and mean Average [Recall](https://www.ultralytics.com/glossary/recall) (mAR) for segmentation tasks, making it suitable for comparing model performance.
- LVIS uses exactly the same images as [COCO](./coco.md) dataset, but with different splits and different annotations.
## Dataset Structure
The LVIS dataset is split into three subsets:
1. **Train**: This subset contains 100k images for training object detection, segmentation, and captioning models.
2. **Val**: This subset has 20k images used for validation purposes during model training.
3. **Minival**: This subset is exactly the same as COCO val2017 set which has 5k images used for validation purposes during model training.
4. **Test**: This subset consists of 20k images used for testing and benchmarking the trained models. Ground truth annotations for this subset are not publicly available, and the results are submitted to the [LVIS evaluation server](https://eval.ai/web/challenges/challenge-page/675/overview) for performance evaluation.
## Applications
The LVIS dataset is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in object detection (such as [YOLO](../../models/yolo11.md), [Faster R-CNN](https://arxiv.org/abs/1506.01497), and [SSD](https://arxiv.org/abs/1512.02325)), instance segmentation (such as [Mask R-CNN](https://arxiv.org/abs/1703.06870)). The dataset's diverse set of object categories, large number of annotated images, and standardized evaluation metrics make it an essential resource for computer vision researchers and practitioners.
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the LVIS dataset, the `lvis.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/lvis.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/lvis.yaml).
!!! example "ultralytics/cfg/datasets/lvis.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/lvis.yaml"
```
## Usage
To train a YOLO11n model on the LVIS dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="lvis.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=lvis.yaml model=yolo11n.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
The LVIS dataset contains a diverse set of images with various object categories and complex scenes. Here are some examples of images from the dataset, along with their corresponding annotations:
![LVIS Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/lvis-mosaiced-training-batch.avif)
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the LVIS dataset and the benefits of using mosaicing during the training process.
## Citations and Acknowledgments
If you use the LVIS dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@inproceedings{gupta2019lvis,
title={LVIS: A Dataset for Large Vocabulary Instance Segmentation},
author={Gupta, Agrim and Dollar, Piotr and Girshick, Ross},
booktitle={Proceedings of the {IEEE} Conference on Computer Vision and Pattern Recognition},
year={2019}
}
```
We would like to acknowledge the LVIS Consortium for creating and maintaining this valuable resource for the [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) community. For more information about the LVIS dataset and its creators, visit the [LVIS dataset website](https://www.lvisdataset.org/).
## FAQ
### What is the LVIS dataset, and how is it used in computer vision?
The [LVIS dataset](https://www.lvisdataset.org/) is a large-scale dataset with fine-grained vocabulary-level annotations developed by Facebook AI Research (FAIR). It is primarily used for object detection and instance segmentation, featuring over 1203 object categories and 2 million instance annotations. Researchers and practitioners use it to train and benchmark models like Ultralytics YOLO for advanced computer vision tasks. The dataset's extensive size and diversity make it an essential resource for pushing the boundaries of model performance in detection and segmentation.
### How can I train a YOLO11n model using the LVIS dataset?
To train a YOLO11n model on the LVIS dataset for 100 epochs with an image size of 640, follow the example below. This process utilizes Ultralytics' framework, which offers comprehensive training features.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="lvis.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=lvis.yaml model=yolo11n.pt epochs=100 imgsz=640
```
For detailed training configurations, refer to the [Training](../../modes/train.md) documentation.
### How does the LVIS dataset differ from the COCO dataset?
The images in the LVIS dataset are the same as those in the [COCO dataset](./coco.md), but the two differ in terms of splitting and annotations. LVIS provides a larger and more detailed vocabulary with 1203 object categories compared to COCO's 80 categories. Additionally, LVIS focuses on annotation completeness and diversity, aiming to push the limits of [object detection](https://www.ultralytics.com/glossary/object-detection) and instance segmentation models by offering more nuanced and comprehensive data.
### Why should I use Ultralytics YOLO for training on the LVIS dataset?
Ultralytics YOLO models, including the latest YOLO11, are optimized for real-time object detection with state-of-the-art [accuracy](https://www.ultralytics.com/glossary/accuracy) and speed. They support a wide range of annotations, such as the fine-grained ones provided by the LVIS dataset, making them ideal for advanced computer vision applications. Moreover, Ultralytics offers seamless integration with various [training](../../modes/train.md), [validation](../../modes/val.md), and [prediction](../../modes/predict.md) modes, ensuring efficient model development and deployment.
### Can I see some sample annotations from the LVIS dataset?
Yes, the LVIS dataset includes a variety of images with diverse object categories and complex scenes. Here is an example of a sample image along with its annotations:
![LVIS Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/lvis-mosaiced-training-batch.avif)
This mosaiced image demonstrates a training batch composed of multiple dataset images combined into one. Mosaicing increases the variety of objects and scenes within each training batch, enhancing the model's ability to generalize across different contexts. For more details on the LVIS dataset, explore the [LVIS dataset documentation](#key-features).

View File

@ -0,0 +1,153 @@
---
comments: true
description: Explore the medical-pills detection dataset with labeled images. Essential for training AI models for pharmaceutical identification and automation.
keywords: medical-pills dataset, pill detection, pharmaceutical imaging, AI in healthcare, computer vision, object detection, medical automation, dataset for training
---
# Medical Pills Dataset
<a href="https://colab.research.google.com/github/ultralytics/notebooks/blob/main/notebooks/how-to-train-ultralytics-yolo-on-medical-pills-dataset.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open Medical Pills Dataset In Colab"></a>
The medical-pills detection dataset is a proof-of-concept (POC) dataset, carefully curated to demonstrate the potential of AI in pharmaceutical applications. It contains labeled images specifically designed to train [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) [models](https://docs.ultralytics.com/models/) for identifying medical-pills.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/8gePl_Zcs5c"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to train Ultralytics YOLO11 Model on Medical Pills Detection Dataset in <a href="https://colab.research.google.com/github/ultralytics/notebooks/blob/main/notebooks/how-to-train-ultralytics-yolo-on-medical-pills-dataset.ipynb">Google Colab</a>
</p>
This dataset serves as a foundational resource for automating essential [tasks](https://docs.ultralytics.com/tasks/) such as quality control, packaging automation, and efficient sorting in pharmaceutical workflows. By integrating this dataset into projects, researchers and developers can explore innovative [solutions](https://docs.ultralytics.com/solutions/) that enhance [accuracy](https://www.ultralytics.com/glossary/accuracy), streamline operations, and ultimately contribute to improved healthcare outcomes.
## Dataset Structure
The medical-pills dataset is divided into two subsets:
- **Training set**: Consisting of 92 images, each annotated with the class `pill`.
- **Validation set**: Comprising 23 images with corresponding annotations.
## Applications
Using computer vision for medical-pills detection enables automation in the pharmaceutical industry, supporting tasks like:
- **Pharmaceutical Sorting**: Automating the sorting of pills based on size, shape, or color to enhance production efficiency.
- **AI Research and Development**: Serving as a benchmark for developing and testing computer vision algorithms in pharmaceutical use cases.
- **Digital Inventory Systems**: Powering smart inventory solutions by integrating automated pill recognition for real-time stock monitoring and replenishment planning.
- **Quality Control**: Ensuring consistency in pill production by identifying defects, irregularities, or contamination.
- **Counterfeit Detection**: Helping identify potentially counterfeit medications by analyzing visual characteristics against known standards.
## Dataset YAML
A YAML configuration file is provided to define the dataset's structure, including paths and classes. For the medical-pills dataset, the `medical-pills.yaml` file can be accessed at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/medical-pills.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/medical-pills.yaml).
!!! example "ultralytics/cfg/datasets/medical-pills.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/medical-pills.yaml"
```
## Usage
To train a YOLO11n model on the medical-pills dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, use the following examples. For detailed arguments, refer to the model's [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="medical-pills.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=medical-pills.yaml model=yolo11n.pt epochs=100 imgsz=640
```
!!! example "Inference Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("path/to/best.pt") # load a fine-tuned model
# Inference using the model
results = model.predict("https://ultralytics.com/assets/medical-pills-sample.jpg")
```
=== "CLI"
```bash
# Start prediction with a fine-tuned *.pt model
yolo detect predict model='path/to/best.pt' imgsz=640 source="https://ultralytics.com/assets/medical-pills-sample.jpg"
```
## Sample Images and Annotations
The medical-pills dataset features labeled images showcasing the diversity of pills. Below is an example of a labeled image from the dataset:
![Medical-pills dataset sample image](https://github.com/ultralytics/docs/releases/download/0/medical-pills-dataset-sample-image.avif)
- **Mosaiced Image**: Displayed is a training batch comprising mosaiced dataset images. Mosaicing enhances training diversity by consolidating multiple images into one, improving model generalization.
## Integration with Other Datasets
For more comprehensive pharmaceutical analysis, consider combining the medical-pills dataset with other related datasets like [package-seg](../segment/package-seg.md) for packaging identification or medical imaging datasets like [brain-tumor](brain-tumor.md) to develop end-to-end healthcare AI solutions.
## Citations and Acknowledgments
The dataset is available under the [AGPL-3.0 License](https://github.com/ultralytics/ultralytics/blob/main/LICENSE).
If you use the Medical-pills dataset in your research or development work, please cite it using the mentioned details:
!!! quote ""
=== "BibTeX"
```bibtex
@dataset{Jocher_Ultralytics_Datasets_2024,
author = {Jocher, Glenn and Rizwan, Muhammad},
license = {AGPL-3.0},
month = {Dec},
title = {Ultralytics Datasets: Medical-pills Detection Dataset},
url = {https://docs.ultralytics.com/datasets/detect/medical-pills/},
version = {1.0.0},
year = {2024}
}
```
## FAQ
### What is the structure of the medical-pills dataset?
The dataset includes 92 images for training and 23 images for validation. Each image is annotated with the class `pill`, enabling effective training and evaluation of models for pharmaceutical applications.
### How can I train a YOLO11 model on the medical-pills dataset?
You can train a YOLO11 model for 100 epochs with an image size of 640px using the Python or CLI methods provided. Refer to the [Training Example](#usage) section for detailed instructions and check the [YOLO11 documentation](../../models/yolo11.md) for more information on model capabilities.
### What are the benefits of using the medical-pills dataset in AI projects?
The dataset enables automation in pill detection, contributing to counterfeit prevention, quality assurance, and pharmaceutical process optimization. It also serves as a valuable resource for developing AI solutions that can improve medication safety and supply chain efficiency.
### How do I perform inference on the medical-pills dataset?
Inference can be done using Python or CLI methods with a fine-tuned YOLO11 model. Refer to the [Inference Example](#usage) section for code snippets and the [Predict mode documentation](../../modes/predict.md) for additional options.
### Where can I find the YAML configuration file for the medical-pills dataset?
The YAML file is available at [medical-pills.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/medical-pills.yaml), containing dataset paths, classes, and additional configuration details essential for training models on this dataset.

View File

@ -0,0 +1,141 @@
---
comments: true
description: Explore the Objects365 Dataset with 2M images and 30M bounding boxes across 365 categories. Enhance your object detection models with diverse, high-quality data.
keywords: Objects365 dataset, object detection, machine learning, deep learning, computer vision, annotated images, bounding boxes, YOLO11, high-resolution images, dataset configuration
---
# Objects365 Dataset
The [Objects365](https://www.objects365.org/) dataset is a large-scale, high-quality dataset designed to foster object detection research with a focus on diverse objects in the wild. Created by a team of [Megvii](https://en.megvii.com/) researchers, the dataset offers a wide range of high-resolution images with a comprehensive set of annotated bounding boxes covering 365 object categories.
## Key Features
- Objects365 contains 365 object categories, with 2 million images and over 30 million bounding boxes.
- The dataset includes diverse objects in various scenarios, providing a rich and challenging benchmark for object detection tasks.
- Annotations include bounding boxes for objects, making it suitable for training and evaluating object detection models.
- Objects365 pre-trained models significantly outperform ImageNet pre-trained models, leading to better generalization on various tasks.
## Dataset Structure
The Objects365 dataset is organized into a single set of images with corresponding annotations:
- **Images**: The dataset includes 2 million high-resolution images, each containing a variety of objects across 365 categories.
- **Annotations**: The images are annotated with over 30 million bounding boxes, providing comprehensive ground truth information for [object detection](https://docs.ultralytics.com/tasks/detect/) tasks.
## Applications
The Objects365 dataset is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in object detection tasks. The dataset's diverse set of object categories and high-quality annotations make it a valuable resource for researchers and practitioners in the field of [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv).
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. For the case of the Objects365 Dataset, the `Objects365.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/Objects365.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/Objects365.yaml).
!!! example "ultralytics/cfg/datasets/Objects365.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/Objects365.yaml"
```
## Usage
To train a YOLO11n model on the Objects365 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="Objects365.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=Objects365.yaml model=yolo11n.pt epochs=100 imgsz=640
```
## Sample Data and Annotations
The Objects365 dataset contains a diverse set of high-resolution images with objects from 365 categories, providing rich context for [object detection](https://www.ultralytics.com/glossary/object-detection) tasks. Here are some examples of the images in the dataset:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/objects365-sample-image.avif)
- **Objects365**: This image demonstrates an example of object detection, where objects are annotated with bounding boxes. The dataset provides a wide range of images to facilitate the development of models for this task.
The example showcases the variety and complexity of the data in the Objects365 dataset and highlights the importance of accurate object detection for computer vision applications.
## Citations and Acknowledgments
If you use the Objects365 dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@inproceedings{shao2019objects365,
title={Objects365: A Large-scale, High-quality Dataset for Object Detection},
author={Shao, Shuai and Li, Zeming and Zhang, Tianyuan and Peng, Chao and Yu, Gang and Li, Jing and Zhang, Xiangyu and Sun, Jian},
booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
pages={8425--8434},
year={2019}
}
```
We would like to acknowledge the team of researchers who created and maintain the Objects365 dataset as a valuable resource for the computer vision research community. For more information about the Objects365 dataset and its creators, visit the [Objects365 dataset website](https://www.objects365.org/).
## FAQ
### What is the Objects365 dataset used for?
The [Objects365 dataset](https://www.objects365.org/) is designed for object detection tasks in [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and computer vision. It provides a large-scale, high-quality dataset with 2 million annotated images and 30 million bounding boxes across 365 categories. Leveraging such a diverse dataset helps improve the performance and generalization of object detection models, making it invaluable for research and development in the field.
### How can I train a YOLO11 model on the Objects365 dataset?
To train a YOLO11n model using the Objects365 dataset for 100 epochs with an image size of 640, follow these instructions:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="Objects365.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=Objects365.yaml model=yolo11n.pt epochs=100 imgsz=640
```
Refer to the [Training](../../modes/train.md) page for a comprehensive list of available arguments.
### Why should I use the Objects365 dataset for my object detection projects?
The Objects365 dataset offers several advantages for object detection tasks:
1. **Diversity**: It includes 2 million images with objects in diverse scenarios, covering 365 categories.
2. **High-quality Annotations**: Over 30 million bounding boxes provide comprehensive ground truth data.
3. **Performance**: Models pre-trained on Objects365 significantly outperform those trained on datasets like [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/), leading to better generalization.
### Where can I find the YAML configuration file for the Objects365 dataset?
The YAML configuration file for the Objects365 dataset is available at [Objects365.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/Objects365.yaml). This file contains essential information such as dataset paths and class labels, crucial for setting up your training environment.
### How does the dataset structure of Objects365 enhance object detection modeling?
The [Objects365 dataset](https://www.objects365.org/) is organized with 2 million high-resolution images and comprehensive annotations of over 30 million bounding boxes. This structure ensures a robust dataset for training [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in object detection, offering a wide variety of objects and scenarios. Such diversity and volume help in developing models that are more accurate and capable of generalizing well to real-world applications. For more details on the dataset structure, refer to the [Dataset YAML](#dataset-yaml) section.

View File

@ -0,0 +1,237 @@
---
comments: true
description: Explore the comprehensive Open Images V7 dataset by Google. Learn about its annotations, applications, and use YOLO11 pretrained models for computer vision tasks.
keywords: Open Images V7, Google dataset, computer vision, YOLO11 models, object detection, image segmentation, visual relationships, AI research, Ultralytics
---
# Open Images V7 Dataset
[Open Images V7](https://storage.googleapis.com/openimages/web/index.html) is a versatile and expansive dataset championed by Google. Aimed at propelling research in the realm of [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv), it boasts a vast collection of images annotated with a plethora of data, including image-level labels, object bounding boxes, object segmentation masks, visual relationships, and localized narratives.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/u3pLlgzUeV8"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> <a href="https://www.ultralytics.com/glossary/object-detection">Object Detection</a> using OpenImagesV7 Pretrained Model
</p>
## Open Images V7 Pretrained Models
| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>A100 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| ----------------------------------------------------------------------------------------- | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-oiv7.pt) | 640 | 18.4 | 142.4 | 1.21 | 3.5 | 10.5 |
| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-oiv7.pt) | 640 | 27.7 | 183.1 | 1.40 | 11.4 | 29.7 |
| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-oiv7.pt) | 640 | 33.6 | 408.5 | 2.26 | 26.2 | 80.6 |
| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-oiv7.pt) | 640 | 34.9 | 596.9 | 2.43 | 44.1 | 167.4 |
| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-oiv7.pt) | 640 | 36.3 | 860.6 | 3.56 | 68.7 | 260.6 |
You can use these pretrained models for inference or fine-tuning as follows.
!!! example "Pretrained Model Usage Example"
=== "Python"
```python
from ultralytics import YOLO
# Load an Open Images Dataset V7 pretrained YOLOv8n model
model = YOLO("yolov8n-oiv7.pt")
# Run prediction
results = model.predict(source="image.jpg")
# Start training from the pretrained checkpoint
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Predict using an Open Images Dataset V7 pretrained model
yolo detect predict source=image.jpg model=yolov8n-oiv7.pt
# Start training from an Open Images Dataset V7 pretrained checkpoint
yolo detect train data=coco8.yaml model=yolov8n-oiv7.pt epochs=100 imgsz=640
```
![Open Images V7 classes visual](https://github.com/ultralytics/docs/releases/download/0/open-images-v7-classes-visual.avif)
## Key Features
- Encompasses ~9M images annotated in various ways to suit multiple computer vision tasks.
- Houses a staggering 16M bounding boxes across 600 object classes in 1.9M images. These boxes are primarily hand-drawn by experts ensuring high [precision](https://www.ultralytics.com/glossary/precision).
- Visual relationship annotations totaling 3.3M are available, detailing 1,466 unique relationship triplets, object properties, and human activities.
- V5 introduced segmentation masks for 2.8M objects across 350 classes.
- V6 introduced 675k localized narratives that amalgamate voice, text, and mouse traces highlighting described objects.
- V7 introduced 66.4M point-level labels on 1.4M images, spanning 5,827 classes.
- Encompasses 61.4M image-level labels across a diverse set of 20,638 classes.
- Provides a unified platform for [image classification](https://www.ultralytics.com/glossary/image-classification), object detection, relationship detection, [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), and multimodal image descriptions.
## Dataset Structure
Open Images V7 is structured in multiple components catering to varied computer vision challenges:
- **Images**: About 9 million images, often showcasing intricate scenes with an average of 8.3 objects per image.
- **Bounding Boxes**: Over 16 million boxes that demarcate objects across 600 categories.
- **Segmentation Masks**: These detail the exact boundary of 2.8M objects across 350 classes.
- **Visual Relationships**: 3.3M annotations indicating object relationships, properties, and actions.
- **Localized Narratives**: 675k descriptions combining voice, text, and mouse traces.
- **Point-Level Labels**: 66.4M labels across 1.4M images, suitable for zero/few-shot [semantic segmentation](https://www.ultralytics.com/glossary/semantic-segmentation).
## Applications
Open Images V7 is a cornerstone for training and evaluating state-of-the-art models in various computer vision tasks. The dataset's broad scope and high-quality annotations make it indispensable for researchers and developers specializing in [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv).
Some key applications include:
- **Advanced Object Detection**: Train models to identify and locate multiple objects in complex scenes with high accuracy.
- **Semantic Understanding**: Develop systems that comprehend visual relationships between objects.
- **Image Segmentation**: Create precise pixel-level masks for objects, enabling detailed scene analysis.
- **Multi-modal Learning**: Combine visual data with text descriptions for richer AI understanding.
- **Zero-shot Learning**: Leverage the extensive class coverage to identify objects not seen during training.
## Dataset YAML
Typically, datasets come with a YAML (Yet Another Markup Language) file that delineates the dataset's configuration. For the case of Open Images V7, a hypothetical `OpenImagesV7.yaml` might exist. For accurate paths and configurations, one should refer to the dataset's official repository or documentation.
!!! example "OpenImagesV7.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/open-images-v7.yaml"
```
## Usage
To train a YOLO11n model on the Open Images V7 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! warning
The complete Open Images V7 dataset comprises 1,743,042 training images and 41,620 validation images, requiring approximately **561 GB of storage space** upon download.
Executing the commands provided below will trigger an automatic download of the full dataset if it's not already present locally. Before running the below example it's crucial to:
- Verify that your device has enough storage capacity.
- Ensure a robust and speedy internet connection.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a COCO-pretrained YOLO11n model
model = YOLO("yolo11n.pt")
# Train the model on the Open Images V7 dataset
results = model.train(data="open-images-v7.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Train a COCO-pretrained YOLO11n model on the Open Images V7 dataset
yolo detect train data=open-images-v7.yaml model=yolo11n.pt epochs=100 imgsz=640
```
## Sample Data and Annotations
Illustrations of the dataset help provide insights into its richness:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/oidv7-all-in-one-example-ab.avif)
- **Open Images V7**: This image exemplifies the depth and detail of annotations available, including bounding boxes, relationships, and segmentation masks.
Researchers can gain invaluable insights into the array of computer vision challenges that the dataset addresses, from basic object detection to intricate relationship identification. The [diversity of annotations](https://docs.ultralytics.com/datasets/explorer/) makes Open Images V7 particularly valuable for developing models that can understand complex visual scenes.
## Citations and Acknowledgments
For those employing Open Images V7 in their work, it's prudent to cite the relevant papers and acknowledge the creators:
!!! quote ""
=== "BibTeX"
```bibtex
@article{OpenImages,
author = {Alina Kuznetsova and Hassan Rom and Neil Alldrin and Jasper Uijlings and Ivan Krasin and Jordi Pont-Tuset and Shahab Kamali and Stefan Popov and Matteo Malloci and Alexander Kolesnikov and Tom Duerig and Vittorio Ferrari},
title = {The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale},
year = {2020},
journal = {IJCV}
}
```
A heartfelt acknowledgment goes out to the Google AI team for creating and maintaining the Open Images V7 dataset. For a deep dive into the dataset and its offerings, navigate to the [official Open Images V7 website](https://storage.googleapis.com/openimages/web/index.html).
## FAQ
### What is the Open Images V7 dataset?
Open Images V7 is an extensive and versatile dataset created by Google, designed to advance research in computer vision. It includes image-level labels, object bounding boxes, object segmentation masks, visual relationships, and localized narratives, making it ideal for various computer vision tasks such as object detection, segmentation, and relationship detection.
### How do I train a YOLO11 model on the Open Images V7 dataset?
To train a YOLO11 model on the Open Images V7 dataset, you can use both Python and CLI commands. Here's an example of training the YOLO11n model for 100 epochs with an image size of 640:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a COCO-pretrained YOLO11n model
model = YOLO("yolo11n.pt")
# Train the model on the Open Images V7 dataset
results = model.train(data="open-images-v7.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Train a COCO-pretrained YOLO11n model on the Open Images V7 dataset
yolo detect train data=open-images-v7.yaml model=yolo11n.pt epochs=100 imgsz=640
```
For more details on arguments and settings, refer to the [Training](../../modes/train.md) page.
### What are some key features of the Open Images V7 dataset?
The Open Images V7 dataset includes approximately 9 million images with various annotations:
- **Bounding Boxes**: 16 million bounding boxes across 600 object classes.
- **Segmentation Masks**: Masks for 2.8 million objects across 350 classes.
- **Visual Relationships**: 3.3 million annotations indicating relationships, properties, and actions.
- **Localized Narratives**: 675,000 descriptions combining voice, text, and mouse traces.
- **Point-Level Labels**: 66.4 million labels across 1.4 million images.
- **Image-Level Labels**: 61.4 million labels across 20,638 classes.
### What pretrained models are available for the Open Images V7 dataset?
Ultralytics provides several YOLOv8 pretrained models for the Open Images V7 dataset, each with different sizes and performance metrics:
| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>A100 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| ----------------------------------------------------------------------------------------- | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-oiv7.pt) | 640 | 18.4 | 142.4 | 1.21 | 3.5 | 10.5 |
| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-oiv7.pt) | 640 | 27.7 | 183.1 | 1.40 | 11.4 | 29.7 |
| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-oiv7.pt) | 640 | 33.6 | 408.5 | 2.26 | 26.2 | 80.6 |
| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-oiv7.pt) | 640 | 34.9 | 596.9 | 2.43 | 44.1 | 167.4 |
| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-oiv7.pt) | 640 | 36.3 | 860.6 | 3.56 | 68.7 | 260.6 |
### What applications can the Open Images V7 dataset be used for?
The Open Images V7 dataset supports a variety of computer vision tasks including:
- **[Image Classification](https://www.ultralytics.com/glossary/image-classification)**
- **Object Detection**
- **Instance Segmentation**
- **Visual Relationship Detection**
- **Multimodal Image Descriptions**
Its comprehensive annotations and broad scope make it suitable for training and evaluating advanced [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models, as highlighted in practical use cases detailed in our [applications](#applications) section.

View File

@ -0,0 +1,220 @@
---
comments: true
description: Explore the Roboflow 100 dataset featuring 100 diverse datasets designed to test object detection models across various domains, from healthcare to video games.
keywords: Roboflow 100, Ultralytics, object detection, dataset, benchmarking, machine learning, computer vision, diverse datasets, model evaluation
---
# Roboflow 100 Dataset
Roboflow 100, developed by [Roboflow](https://roboflow.com/?ref=ultralytics) and sponsored by Intel, is a groundbreaking [object detection](../../tasks/detect.md) benchmark. It includes 100 diverse datasets sampled from over 90,000 public datasets. This benchmark is designed to test the adaptability of models to various domains, including healthcare, aerial imagery, and video games.
<p align="center">
<img width="640" src="https://github.com/ultralytics/docs/releases/download/0/roboflow-100-overview.avif" alt="Roboflow 100 Overview">
</p>
## Key Features
- Includes 100 datasets across seven domains: Aerial, Video games, Microscopic, Underwater, Documents, Electromagnetic, and Real World.
- The benchmark comprises 224,714 images across 805 classes, thanks to over 11,170 hours of labeling efforts.
- All images are resized to 640x640 pixels, with a focus on eliminating class ambiguity and filtering out underrepresented classes.
- Annotations include bounding boxes for objects, making it suitable for [training](../../modes/train.md) and evaluating object detection models.
## Dataset Structure
The Roboflow 100 dataset is organized into seven categories, each with a distinct set of datasets, images, and classes:
- **Aerial**: Consists of 7 datasets with a total of 9,683 images, covering 24 distinct classes.
- **Video Games**: Includes 7 datasets, featuring 11,579 images across 88 classes.
- **Microscopic**: Comprises 11 datasets with 13,378 images, spanning 28 classes.
- **Underwater**: Contains 5 datasets, encompassing 18,003 images in 39 classes.
- **Documents**: Consists of 8 datasets with 24,813 images, divided into 90 classes.
- **Electromagnetic**: Made up of 12 datasets, totaling 36,381 images in 41 classes.
- **Real World**: The largest category with 50 datasets, offering 110,615 images across 495 classes.
This structure enables a diverse and extensive testing ground for [object detection](https://www.ultralytics.com/glossary/object-detection) models, reflecting real-world application scenarios.
## Benchmarking
Dataset benchmarking evaluates [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) model performance on specific datasets using standardized metrics like [accuracy](https://www.ultralytics.com/glossary/accuracy), [mean average precision](https://www.ultralytics.com/glossary/mean-average-precision-map) and [F1-score](https://www.ultralytics.com/glossary/f1-score).
!!! tip "Benchmarking"
Benchmarking results will be stored in "ultralytics-benchmarks/evaluation.txt"
!!! example "Benchmarking example"
=== "Python"
```python
import os
import shutil
from pathlib import Path
from ultralytics.utils.benchmarks import RF100Benchmark
# Initialize RF100Benchmark and set API key
benchmark = RF100Benchmark()
benchmark.set_key(api_key="YOUR_ROBOFLOW_API_KEY")
# Parse dataset and define file paths
names, cfg_yamls = benchmark.parse_dataset()
val_log_file = Path("ultralytics-benchmarks") / "validation.txt"
eval_log_file = Path("ultralytics-benchmarks") / "evaluation.txt"
# Run benchmarks on each dataset in RF100
for ind, path in enumerate(cfg_yamls):
path = Path(path)
if path.exists():
# Fix YAML file and run training
benchmark.fix_yaml(str(path))
os.system(f"yolo detect train data={path} model=yolo11s.pt epochs=1 batch=16")
# Run validation and evaluate
os.system(f"yolo detect val data={path} model=runs/detect/train/weights/best.pt > {val_log_file} 2>&1")
benchmark.evaluate(str(path), str(val_log_file), str(eval_log_file), ind)
# Remove the 'runs' directory
runs_dir = Path.cwd() / "runs"
shutil.rmtree(runs_dir)
else:
print("YAML file path does not exist")
continue
print("RF100 Benchmarking completed!")
```
## Applications
Roboflow 100 is invaluable for various applications related to [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) and [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl). Researchers and engineers can use this benchmark to:
- Evaluate the performance of object detection models in a multi-domain context.
- Test the adaptability of models to real-world scenarios beyond common object recognition.
- Benchmark the capabilities of object detection models across diverse datasets, including those in healthcare, aerial imagery, and video games.
- Compare model performance across different [neural network](https://www.ultralytics.com/glossary/neural-network-nn) architectures and optimization techniques.
- Identify domain-specific challenges that require specialized model training approaches.
For more ideas and inspiration on real-world applications, be sure to check out [our guides on real-world projects](../../guides/index.md).
## Usage
The Roboflow 100 dataset is available on both [GitHub](https://github.com/roboflow/roboflow-100-benchmark) and [Roboflow Universe](https://universe.roboflow.com/roboflow-100?ref=ultralytics).
You can access it directly from the Roboflow 100 GitHub repository. In addition, on Roboflow Universe, you have the flexibility to download individual datasets by simply clicking the export button within each dataset.
## Sample Data and Annotations
Roboflow 100 consists of datasets with diverse images and videos captured from various angles and domains. Here's a look at examples of annotated images in the RF100 benchmark.
<p align="center">
<img width="640" src="https://github.com/ultralytics/docs/releases/download/0/sample-data-annotations.avif" alt="Sample Data and Annotations">
</p>
The diversity in the Roboflow 100 benchmark that can be seen above is a significant advancement from traditional benchmarks which often focus on optimizing a single metric within a limited domain. This comprehensive approach helps develop more robust and versatile [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models that can perform well across different scenarios.
## Citations and Acknowledgments
If you use the Roboflow 100 dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{2211.13523,
Author = {Floriana Ciaglia and Francesco Saverio Zuppichini and Paul Guerrie and Mark McQuade and Jacob Solawetz},
Title = {Roboflow 100: A Rich, Multi-Domain Object Detection Benchmark},
Eprint = {arXiv:2211.13523},
}
```
Our thanks go to the Roboflow team and all the contributors for their hard work in creating and sustaining the Roboflow 100 dataset.
If you are interested in exploring more datasets to enhance your object detection and machine learning projects, feel free to visit [our comprehensive dataset collection](../index.md).
## FAQ
### What is the Roboflow 100 dataset, and why is it significant for object detection?
The **Roboflow 100** dataset, developed by [Roboflow](https://roboflow.com/?ref=ultralytics) and sponsored by Intel, is a crucial [object detection](../../tasks/detect.md) benchmark. It features 100 diverse datasets from over 90,000 public datasets, covering domains such as healthcare, aerial imagery, and video games. This diversity ensures that models can adapt to various real-world scenarios, enhancing their robustness and performance across different applications and environments.
### How can I use the Roboflow 100 dataset for benchmarking my object detection models?
To use the Roboflow 100 dataset for benchmarking, you can implement the [RF100Benchmark class](../../reference/utils/benchmarks.md) from the Ultralytics library. Here's a brief example:
!!! example "Benchmarking example"
=== "Python"
```python
import os
import shutil
from pathlib import Path
from ultralytics.utils.benchmarks import RF100Benchmark
# Initialize RF100Benchmark and set API key
benchmark = RF100Benchmark()
benchmark.set_key(api_key="YOUR_ROBOFLOW_API_KEY")
# Parse dataset and define file paths
names, cfg_yamls = benchmark.parse_dataset()
val_log_file = Path("ultralytics-benchmarks") / "validation.txt"
eval_log_file = Path("ultralytics-benchmarks") / "evaluation.txt"
# Run benchmarks on each dataset in RF100
for ind, path in enumerate(cfg_yamls):
path = Path(path)
if path.exists():
# Fix YAML file and run training
benchmark.fix_yaml(str(path))
os.system(f"yolo detect train data={path} model=yolo11n.pt epochs=1 batch=16")
# Run validation and evaluate
os.system(f"yolo detect val data={path} model=runs/detect/train/weights/best.pt > {val_log_file} 2>&1")
benchmark.evaluate(str(path), str(val_log_file), str(eval_log_file), ind)
# Remove 'runs' directory
runs_dir = Path.cwd() / "runs"
shutil.rmtree(runs_dir)
else:
print("YAML file path does not exist")
continue
print("RF100 Benchmarking completed!")
```
### Which domains are covered by the Roboflow 100 dataset?
The **Roboflow 100** dataset spans seven domains, each providing unique challenges and applications for [object detection](https://www.ultralytics.com/glossary/object-detection) models:
1. **Aerial**: 7 datasets, 9,683 images, 24 classes
2. **Video Games**: 7 datasets, 11,579 images, 88 classes
3. **Microscopic**: 11 datasets, 13,378 images, 28 classes
4. **Underwater**: 5 datasets, 18,003 images, 39 classes
5. **Documents**: 8 datasets, 24,813 images, 90 classes
6. **Electromagnetic**: 12 datasets, 36,381 images, 41 classes
7. **Real World**: 50 datasets, 110,615 images, 495 classes
This setup allows for extensive and varied testing of models across different real-world applications, making it an excellent resource for developing versatile computer vision systems.
### How do I access and download the Roboflow 100 dataset?
The **Roboflow 100** dataset is accessible on [GitHub](https://github.com/roboflow/roboflow-100-benchmark) and [Roboflow Universe](https://universe.roboflow.com/roboflow-100?ref=ultralytics). You can download the entire dataset from GitHub or select individual datasets on Roboflow Universe using the export button. This flexibility allows you to focus on specific domains or use cases that are most relevant to your research or application.
### What should I include when citing the Roboflow 100 dataset in my research?
When using the Roboflow 100 dataset in your research, ensure to properly cite it. Here is the recommended citation:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{2211.13523,
Author = {Floriana Ciaglia and Francesco Saverio Zuppichini and Paul Guerrie and Mark McQuade and Jacob Solawetz},
Title = {Roboflow 100: A Rich, Multi-Domain Object Detection Benchmark},
Eprint = {arXiv:2211.13523},
}
```
For more details, you can refer to our [comprehensive dataset collection](../index.md) or explore other [detection datasets](../detect/index.md) available through Ultralytics.

View File

@ -0,0 +1,179 @@
---
comments: true
description: Discover the Signature Detection Dataset for training models to identify and verify human signatures in various documents. Perfect for document verification and fraud prevention.
keywords: Signature Detection Dataset, document verification, fraud detection, computer vision, YOLO11, Ultralytics, annotated signatures, training dataset
---
# Signature Detection Dataset
This dataset focuses on detecting human written signatures within documents. It includes a variety of document types with annotated signatures, providing valuable insights for applications in document verification and fraud detection. Essential for training [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) algorithms, this dataset aids in identifying signatures in various document formats, supporting research and practical applications in document analysis.
## Dataset Structure
The signature detection dataset is split into two subsets:
- **Training set**: Contains 143 images, each with corresponding annotations.
- **Validation set**: Includes 35 images, each with paired annotations.
## Applications
This dataset can be applied in various computer vision tasks such as [object detection](https://www.ultralytics.com/glossary/object-detection), [object tracking](https://docs.ultralytics.com/modes/track/), and document analysis. Specifically, it can be used to train and evaluate models for identifying signatures in documents, which has significant applications in:
- **Document Verification**: Automating the verification process for legal and financial documents
- **Fraud Detection**: Identifying potentially forged or unauthorized signatures
- **Digital Document Processing**: Streamlining workflows in administrative and legal sectors
- **Banking and Finance**: Enhancing security in check processing and loan document verification
- **Archival Research**: Supporting historical document analysis and cataloging
Additionally, it serves as a valuable resource for educational purposes, enabling students and researchers to study signature characteristics across different document types.
## Dataset YAML
A YAML (Yet Another Markup Language) file defines the dataset configuration, including paths and classes information. For the signature detection dataset, the `signature.yaml` file is located at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/signature.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/signature.yaml).
!!! example "ultralytics/cfg/datasets/signature.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/signature.yaml"
```
## Usage
To train a YOLO11n model on the signature detection dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, use the provided code samples. For a comprehensive list of available parameters, refer to the model's [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="signature.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=signature.yaml model=yolo11n.pt epochs=100 imgsz=640
```
!!! example "Inference Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("path/to/best.pt") # load a signature-detection fine-tuned model
# Inference using the model
results = model.predict("https://ultralytics.com/assets/signature-s.mp4", conf=0.75)
```
=== "CLI"
```bash
# Start prediction with a finetuned *.pt model
yolo detect predict model='path/to/best.pt' imgsz=640 source="https://ultralytics.com/assets/signature-s.mp4" conf=0.75
```
## Sample Images and Annotations
The signature detection dataset comprises a wide variety of images showcasing different document types and annotated signatures. Below are examples of images from the dataset, each accompanied by its corresponding annotations.
![Signature detection dataset sample image](https://github.com/ultralytics/docs/releases/download/0/signature-detection-mosaiced-sample.avif)
- **Mosaiced Image**: Here, we present a training batch consisting of mosaiced dataset images. Mosaicing, a training technique, combines multiple images into one, enriching batch diversity. This method helps enhance the model's ability to generalize across different signature sizes, aspect ratios, and contexts.
This example illustrates the variety and complexity of images in the signature Detection Dataset, emphasizing the benefits of including mosaicing during the training process.
## Citations and Acknowledgments
The dataset has been released available under the [AGPL-3.0 License](https://github.com/ultralytics/ultralytics/blob/main/LICENSE).
## FAQ
### What is the Signature Detection Dataset, and how can it be used?
The Signature Detection Dataset is a collection of annotated images aimed at detecting human signatures within various document types. It can be applied in computer vision tasks such as [object detection](https://www.ultralytics.com/glossary/object-detection) and tracking, primarily for document verification, fraud detection, and archival research. This dataset helps train models to recognize signatures in different contexts, making it valuable for both research and practical applications in [smart document analysis](https://www.ultralytics.com/blog/using-ultralytics-yolo11-for-smart-document-analysis).
### How do I train a YOLO11n model on the Signature Detection Dataset?
To train a YOLO11n model on the Signature Detection Dataset, follow these steps:
1. Download the `signature.yaml` dataset configuration file from [signature.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/signature.yaml).
2. Use the following Python script or CLI command to start training:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a pretrained model
model = YOLO("yolo11n.pt")
# Train the model
results = model.train(data="signature.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
yolo detect train data=signature.yaml model=yolo11n.pt epochs=100 imgsz=640
```
For more details, refer to the [Training](../../modes/train.md) page.
### What are the main applications of the Signature Detection Dataset?
The Signature Detection Dataset can be used for:
1. **Document Verification**: Automatically verifying the presence and authenticity of human signatures in documents.
2. **Fraud Detection**: Identifying forged or fraudulent signatures in legal and financial documents.
3. **Archival Research**: Assisting historians and archivists in the digital analysis and cataloging of historical documents.
4. **Education**: Supporting academic research and teaching in the fields of computer vision and [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml).
5. **Financial Services**: Enhancing security in banking transactions and loan processing by verifying signature authenticity.
### How can I perform inference using a model trained on the Signature Detection Dataset?
To perform inference using a model trained on the Signature Detection Dataset, follow these steps:
1. Load your fine-tuned model.
2. Use the below Python script or CLI command to perform inference:
!!! example "Inference Example"
=== "Python"
```python
from ultralytics import YOLO
# Load the fine-tuned model
model = YOLO("path/to/best.pt")
# Perform inference
results = model.predict("https://ultralytics.com/assets/signature-s.mp4", conf=0.75)
```
=== "CLI"
```bash
yolo detect predict model='path/to/best.pt' imgsz=640 source="https://ultralytics.com/assets/signature-s.mp4" conf=0.75
```
### What is the structure of the Signature Detection Dataset, and where can I find more information?
The Signature Detection Dataset is divided into two subsets:
- **Training Set**: Contains 143 images with annotations.
- **Validation Set**: Includes 35 images with annotations.
For detailed information, you can refer to the [Dataset Structure](#dataset-structure) section. Additionally, view the complete dataset configuration in the `signature.yaml` file located at [signature.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/signature.yaml).

View File

@ -0,0 +1,189 @@
---
comments: true
description: Explore the SKU-110k dataset of densely packed retail shelf images, perfect for training and evaluating deep learning models in object detection tasks.
keywords: SKU-110k, dataset, object detection, retail shelf images, deep learning, computer vision, model training
---
# SKU-110k Dataset
The [SKU-110k](https://github.com/eg4000/SKU110K_CVPR19) dataset is a collection of densely packed retail shelf images, designed to support research in [object detection](https://www.ultralytics.com/glossary/object-detection) tasks. Developed by Eran Goldman et al., the dataset contains over 110,000 unique store keeping unit (SKU) categories with densely packed objects, often looking similar or even identical, positioned in proximity.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/_gRqR-miFPE"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to Train YOLOv10 on SKU-110k Dataset using Ultralytics | Retail Dataset
</p>
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/densely-packed-retail-shelf.avif)
## Key Features
- SKU-110k contains images of store shelves from around the world, featuring densely packed objects that pose challenges for state-of-the-art object detectors.
- The dataset includes over 110,000 unique SKU categories, providing a diverse range of object appearances.
- Annotations include bounding boxes for objects and SKU category labels.
## Dataset Structure
The SKU-110k dataset is organized into three main subsets:
1. **Training set**: This subset contains 8,219 images and annotations used for training object detection models.
2. **Validation set**: This subset consists of 588 images and annotations used for model validation during training.
3. **Test set**: This subset includes 2,936 images designed for the final evaluation of trained object detection models.
## Applications
The SKU-110k dataset is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in object detection tasks, especially in densely packed scenes such as retail shelf displays. Its applications include:
- Retail inventory management and automation
- Product recognition in e-commerce platforms
- Planogram compliance verification
- Self-checkout systems in stores
- Robotic picking and sorting in warehouses
The dataset's diverse set of SKU categories and densely packed object arrangements make it a valuable resource for researchers and practitioners in the field of [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv).
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. For the case of the SKU-110K dataset, the `SKU-110K.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/SKU-110K.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/SKU-110K.yaml).
!!! example "ultralytics/cfg/datasets/SKU-110K.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/SKU-110K.yaml"
```
## Usage
To train a YOLO11n model on the SKU-110K dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="SKU-110K.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=SKU-110K.yaml model=yolo11n.pt epochs=100 imgsz=640
```
## Sample Data and Annotations
The SKU-110k dataset contains a diverse set of retail shelf images with densely packed objects, providing rich context for object detection tasks. Here are some examples of data from the dataset, along with their corresponding annotations:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/densely-packed-retail-shelf-1.avif)
- **Densely packed retail shelf image**: This image demonstrates an example of densely packed objects in a retail shelf setting. Objects are annotated with bounding boxes and SKU category labels.
The example showcases the variety and complexity of the data in the SKU-110k dataset and highlights the importance of high-quality data for object detection tasks. The dense arrangement of products presents unique challenges for detection algorithms, making this dataset particularly valuable for developing robust retail-focused computer vision solutions.
## Citations and Acknowledgments
If you use the SKU-110k dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@inproceedings{goldman2019dense,
author = {Eran Goldman and Roei Herzig and Aviv Eisenschtat and Jacob Goldberger and Tal Hassner},
title = {Precise Detection in Densely Packed Scenes},
booktitle = {Proc. Conf. Comput. Vision Pattern Recognition (CVPR)},
year = {2019}
}
```
We would like to acknowledge Eran Goldman et al. for creating and maintaining the SKU-110k dataset as a valuable resource for the computer vision research community. For more information about the SKU-110k dataset and its creators, visit the [SKU-110k dataset GitHub repository](https://github.com/eg4000/SKU110K_CVPR19).
## FAQ
### What is the SKU-110k dataset and why is it important for object detection?
The SKU-110k dataset consists of densely packed retail shelf images designed to aid research in object detection tasks. Developed by Eran Goldman et al., it includes over 110,000 unique SKU categories. Its importance lies in its ability to challenge state-of-the-art object detectors with diverse object appearances and proximity, making it an invaluable resource for researchers and practitioners in computer vision. Learn more about the dataset's structure and applications in our [SKU-110k Dataset](#sku-110k-dataset) section.
### How do I train a YOLO11 model using the SKU-110k dataset?
Training a YOLO11 model on the SKU-110k dataset is straightforward. Here's an example to train a YOLO11n model for 100 epochs with an image size of 640:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="SKU-110K.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=SKU-110K.yaml model=yolo11n.pt epochs=100 imgsz=640
```
For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
### What are the main subsets of the SKU-110k dataset?
The SKU-110k dataset is organized into three main subsets:
1. **Training set**: Contains 8,219 images and annotations used for training object detection models.
2. **Validation set**: Consists of 588 images and annotations used for model validation during training.
3. **Test set**: Includes 2,936 images designed for the final evaluation of trained object detection models.
Refer to the [Dataset Structure](#dataset-structure) section for more details.
### How do I configure the SKU-110k dataset for training?
The SKU-110k dataset configuration is defined in a YAML file, which includes details about the dataset's paths, classes, and other relevant information. The `SKU-110K.yaml` file is maintained at [SKU-110K.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/SKU-110K.yaml). For example, you can train a model using this configuration as shown in our [Usage](#usage) section.
### What are the key features of the SKU-110k dataset in the context of [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl)?
The SKU-110k dataset features images of store shelves from around the world, showcasing densely packed objects that pose significant challenges for object detectors:
- Over 110,000 unique SKU categories
- Diverse object appearances
- Annotations include bounding boxes and SKU category labels
These features make the SKU-110k dataset particularly valuable for training and evaluating deep learning models in object detection tasks. For more details, see the [Key Features](#key-features) section.
### How do I cite the SKU-110k dataset in my research?
If you use the SKU-110k dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@inproceedings{goldman2019dense,
author = {Eran Goldman and Roei Herzig and Aviv Eisenschtat and Jacob Goldberger and Tal Hassner},
title = {Precise Detection in Densely Packed Scenes},
booktitle = {Proc. Conf. Comput. Vision Pattern Recognition (CVPR)},
year = {2019}
}
```
More information about the dataset can be found in the [Citations and Acknowledgments](#citations-and-acknowledgments) section.

View File

@ -0,0 +1,179 @@
---
comments: true
description: Explore the VisDrone Dataset, a large-scale benchmark for drone-based image and video analysis with over 2.6 million annotations for objects like pedestrians and vehicles.
keywords: VisDrone, drone dataset, computer vision, object detection, object tracking, crowd counting, machine learning, deep learning
---
# VisDrone Dataset
The [VisDrone Dataset](https://github.com/VisDrone/VisDrone-Dataset) is a large-scale benchmark created by the AISKYEYE team at the Lab of [Machine Learning](https://www.ultralytics.com/glossary/machine-learning-ml) and Data Mining, Tianjin University, China. It contains carefully annotated ground truth data for various computer vision tasks related to drone-based image and video analysis.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/28JV4rbzklM"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to Train Ultralytics YOLO Models on the VisDrone Dataset for Drone Image Analysis
</p>
VisDrone is composed of 288 video clips with 261,908 frames and 10,209 static images, captured by various drone-mounted cameras. The dataset covers a wide range of aspects, including location (14 different cities across China), environment (urban and rural), objects (pedestrians, vehicles, bicycles, etc.), and density (sparse and crowded scenes). The dataset was collected using various drone platforms under different scenarios and weather and lighting conditions. These frames are manually annotated with over 2.6 million bounding boxes of targets such as pedestrians, cars, bicycles, and tricycles. Attributes like scene visibility, object class, and occlusion are also provided for better data utilization.
## Dataset Structure
The VisDrone dataset is organized into five main subsets, each focusing on a specific task:
1. **Task 1**: Object detection in images
2. **Task 2**: Object detection in videos
3. **Task 3**: Single-object tracking
4. **Task 4**: [Multi-object tracking](../index.md#multi-object-tracking)
5. **Task 5**: Crowd counting
## Applications
The VisDrone dataset is widely used for training and evaluating deep learning models in drone-based [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) tasks such as object detection, object tracking, and crowd counting. The dataset's diverse set of sensor data, object annotations, and attributes make it a valuable resource for researchers and practitioners in the field of drone-based computer vision.
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the Visdrone dataset, the `VisDrone.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/VisDrone.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/VisDrone.yaml).
!!! example "ultralytics/cfg/datasets/VisDrone.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/VisDrone.yaml"
```
## Usage
To train a YOLO11n model on the VisDrone dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="VisDrone.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=VisDrone.yaml model=yolo11n.pt epochs=100 imgsz=640
```
## Sample Data and Annotations
The VisDrone dataset contains a diverse set of images and videos captured by drone-mounted cameras. Here are some examples of data from the dataset, along with their corresponding annotations:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/visdrone-object-detection-sample.avif)
- **Task 1**: [Object detection](https://www.ultralytics.com/glossary/object-detection) in images - This image demonstrates an example of object detection in images, where objects are annotated with [bounding boxes](https://www.ultralytics.com/glossary/bounding-box). The dataset provides a wide variety of images taken from different locations, environments, and densities to facilitate the development of models for this task.
The example showcases the variety and complexity of the data in the VisDrone dataset and highlights the importance of high-quality sensor data for drone-based computer vision tasks.
## Citations and Acknowledgments
If you use the VisDrone dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@ARTICLE{9573394,
author={Zhu, Pengfei and Wen, Longyin and Du, Dawei and Bian, Xiao and Fan, Heng and Hu, Qinghua and Ling, Haibin},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
title={Detection and Tracking Meet Drones Challenge},
year={2021},
volume={},
number={},
pages={1-1},
doi={10.1109/TPAMI.2021.3119563}}
```
We would like to acknowledge the AISKYEYE team at the Lab of Machine Learning and [Data Mining](https://www.ultralytics.com/glossary/data-mining), Tianjin University, China, for creating and maintaining the VisDrone dataset as a valuable resource for the drone-based computer vision research community. For more information about the VisDrone dataset and its creators, visit the [VisDrone Dataset GitHub repository](https://github.com/VisDrone/VisDrone-Dataset).
## FAQ
### What is the VisDrone Dataset and what are its key features?
The [VisDrone Dataset](https://github.com/VisDrone/VisDrone-Dataset) is a large-scale benchmark created by the AISKYEYE team at Tianjin University, China. It is designed for various computer vision tasks related to drone-based image and video analysis. Key features include:
- **Composition**: 288 video clips with 261,908 frames and 10,209 static images.
- **Annotations**: Over 2.6 million bounding boxes for objects like pedestrians, cars, bicycles, and tricycles.
- **Diversity**: Collected across 14 cities, in urban and rural settings, under different weather and lighting conditions.
- **Tasks**: Split into five main tasks—object detection in images and videos, single-object and multi-object tracking, and crowd counting.
### How can I use the VisDrone Dataset to train a YOLO11 model with Ultralytics?
To train a YOLO11 model on the VisDrone dataset for 100 epochs with an image size of 640, you can follow these steps:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a pretrained model
model = YOLO("yolo11n.pt")
# Train the model
results = model.train(data="VisDrone.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=VisDrone.yaml model=yolo11n.pt epochs=100 imgsz=640
```
For additional configuration options, please refer to the model [Training](../../modes/train.md) page.
### What are the main subsets of the VisDrone dataset and their applications?
The VisDrone dataset is divided into five main subsets, each tailored for a specific computer vision task:
1. **Task 1**: Object detection in images.
2. **Task 2**: Object detection in videos.
3. **Task 3**: Single-object tracking.
4. **Task 4**: Multi-object tracking.
5. **Task 5**: Crowd counting.
These subsets are widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in drone-based applications such as surveillance, traffic monitoring, and public safety.
### Where can I find the configuration file for the VisDrone dataset in Ultralytics?
The configuration file for the VisDrone dataset, `VisDrone.yaml`, can be found in the Ultralytics repository at the following link:
[VisDrone.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/VisDrone.yaml).
### How can I cite the VisDrone dataset if I use it in my research?
If you use the VisDrone dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@ARTICLE{9573394,
author={Zhu, Pengfei and Wen, Longyin and Du, Dawei and Bian, Xiao and Fan, Heng and Hu, Qinghua and Ling, Haibin},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
title={Detection and Tracking Meet Drones Challenge},
year={2021},
volume={},
number={},
pages={1-1},
doi={10.1109/TPAMI.2021.3119563}
}
```

View File

@ -0,0 +1,137 @@
---
comments: true
description: Discover the PASCAL VOC dataset, essential for object detection, segmentation, and classification. Learn key features, applications, and usage tips.
keywords: PASCAL VOC, VOC dataset, object detection, segmentation, classification, YOLO, Faster R-CNN, Mask R-CNN, image annotations, computer vision
---
# VOC Dataset
The [PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/) (Visual Object Classes) dataset is a well-known object detection, segmentation, and classification dataset. It is designed to encourage research on a wide variety of object categories and is commonly used for benchmarking computer vision models. It is an essential dataset for researchers and developers working on object detection, segmentation, and classification tasks.
## Key Features
- VOC dataset includes two main challenges: VOC2007 and VOC2012.
- The dataset comprises 20 object categories, including common objects like cars, bicycles, and animals, as well as more specific categories such as boats, sofas, and dining tables.
- Annotations include object bounding boxes and class labels for object detection and classification tasks, and segmentation masks for the segmentation tasks.
- VOC provides standardized evaluation metrics like [mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP) for object detection and classification, making it suitable for comparing model performance.
## Dataset Structure
The VOC dataset is split into three subsets:
1. **Train**: This subset contains images for training object detection, segmentation, and classification models.
2. **Validation**: This subset has images used for validation purposes during model training.
3. **Test**: This subset consists of images used for testing and benchmarking the trained models. Ground truth annotations for this subset are not publicly available, and the results are submitted to the [PASCAL VOC evaluation server](http://host.robots.ox.ac.uk:8080/leaderboard/displaylb.php) for performance evaluation.
## Applications
The VOC dataset is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in object detection (such as [Ultralytics YOLO](https://docs.ultralytics.com/models/yolo11/), [Faster R-CNN](https://arxiv.org/abs/1506.01497), and [SSD](https://arxiv.org/abs/1512.02325)), [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation) (such as [Mask R-CNN](https://arxiv.org/abs/1703.06870)), and [image classification](https://www.ultralytics.com/glossary/image-classification). The dataset's diverse set of object categories, large number of annotated images, and standardized evaluation metrics make it an essential resource for [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) researchers and practitioners.
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the VOC dataset, the `VOC.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/VOC.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/VOC.yaml).
!!! example "ultralytics/cfg/datasets/VOC.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/VOC.yaml"
```
## Usage
To train a YOLO11n model on the VOC dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="VOC.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=VOC.yaml model=yolo11n.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
The VOC dataset contains a diverse set of images with various object categories and complex scenes. Here are some examples of images from the dataset, along with their corresponding annotations:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/mosaiced-voc-dataset-sample.avif)
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the VOC dataset and the benefits of using mosaicing during the training process.
## Citations and Acknowledgments
If you use the VOC dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{everingham2010pascal,
title={The PASCAL Visual Object Classes (VOC) Challenge},
author={Mark Everingham and Luc Van Gool and Christopher K. I. Williams and John Winn and Andrew Zisserman},
year={2010},
eprint={0909.5206},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
We would like to acknowledge the PASCAL VOC Consortium for creating and maintaining this valuable resource for the [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) community. For more information about the VOC dataset and its creators, visit the [PASCAL VOC dataset website](http://host.robots.ox.ac.uk/pascal/VOC/).
## FAQ
### What is the PASCAL VOC dataset and why is it important for computer vision tasks?
The [PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/) (Visual Object Classes) dataset is a renowned benchmark for [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and classification in computer vision. It includes comprehensive annotations like bounding boxes, class labels, and segmentation masks across 20 different object categories. Researchers use it widely to evaluate the performance of models like Faster R-CNN, YOLO, and Mask R-CNN due to its standardized evaluation metrics such as mean Average Precision (mAP).
### How do I train a YOLO11 model using the VOC dataset?
To train a YOLO11 model with the VOC dataset, you need the dataset configuration in a YAML file. Here's an example to start training a YOLO11n model for 100 epochs with an image size of 640:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="VOC.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=VOC.yaml model=yolo11n.pt epochs=100 imgsz=640
```
### What are the primary challenges included in the VOC dataset?
The VOC dataset includes two main challenges: VOC2007 and VOC2012. These challenges test object detection, segmentation, and classification across 20 diverse object categories. Each image is meticulously annotated with bounding boxes, class labels, and segmentation masks. The challenges provide standardized metrics like mAP, facilitating the comparison and benchmarking of different computer vision models.
### How does the PASCAL VOC dataset enhance model benchmarking and evaluation?
The PASCAL VOC dataset enhances model benchmarking and evaluation through its detailed annotations and standardized metrics like mean Average [Precision](https://www.ultralytics.com/glossary/precision) (mAP). These metrics are crucial for assessing the performance of object detection and classification models. The dataset's diverse and complex images ensure comprehensive model evaluation across various real-world scenarios.
### How do I use the VOC dataset for [semantic segmentation](https://www.ultralytics.com/glossary/semantic-segmentation) in YOLO models?
To use the VOC dataset for semantic segmentation tasks with YOLO models, you need to configure the dataset properly in a YAML file. The YAML file defines paths and classes needed for training segmentation models. Check the VOC dataset YAML configuration file at [VOC.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/VOC.yaml) for detailed setups. For segmentation tasks, you would use a segmentation-specific model like `yolo11n-seg.pt` instead of the detection model.

View File

@ -0,0 +1,179 @@
---
comments: true
description: Explore the xView dataset, a rich resource of 1M+ object instances in high-resolution satellite imagery. Enhance detection, learning efficiency, and more.
keywords: xView dataset, overhead imagery, satellite images, object detection, high resolution, bounding boxes, computer vision, TensorFlow, PyTorch, dataset structure
---
# xView Dataset
The [xView](http://xviewdataset.org/) dataset is one of the largest publicly available datasets of overhead imagery, containing images from complex scenes around the world annotated using bounding boxes. The goal of the xView dataset is to accelerate progress in four [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) frontiers:
1. Reduce minimum resolution for detection.
2. Improve learning efficiency.
3. Enable discovery of more object classes.
4. Improve detection of fine-grained classes.
xView builds on the success of challenges like [Common Objects in Context (COCO)](../detect/coco.md) and aims to leverage computer vision to analyze the growing amount of available imagery from space in order to understand the visual world in new ways and address a range of important applications.
## Key Features
- xView contains over 1 million object instances across 60 classes.
- The dataset has a resolution of 0.3 meters, providing higher resolution imagery than most public satellite imagery datasets.
- xView features a diverse collection of small, rare, fine-grained, and multi-type objects with [bounding box](https://www.ultralytics.com/glossary/bounding-box) annotation.
- Comes with a pre-trained baseline model using the [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) object detection API and an example for [PyTorch](https://www.ultralytics.com/glossary/pytorch).
## Dataset Structure
The xView dataset is composed of satellite images collected from WorldView-3 satellites at a 0.3m ground sample distance. It contains over 1 million objects across 60 classes in over 1,400 km² of imagery. The dataset is particularly valuable for [remote sensing](https://www.ultralytics.com/blog/using-computer-vision-to-analyse-satellite-imagery) applications and environmental monitoring.
## Applications
The xView dataset is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models for object detection in overhead imagery. The dataset's diverse set of object classes and high-resolution imagery make it a valuable resource for researchers and practitioners in the field of computer vision, especially for satellite imagery analysis. Applications include:
- Military and defense reconnaissance
- Urban planning and development
- Environmental monitoring
- Disaster response and assessment
- Infrastructure mapping and management
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the xView dataset, the `xView.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/xView.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/xView.yaml).
!!! example "ultralytics/cfg/datasets/xView.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/xView.yaml"
```
## Usage
To train a model on the xView dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="xView.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=xView.yaml model=yolo11n.pt epochs=100 imgsz=640
```
## Sample Data and Annotations
The xView dataset contains high-resolution satellite images with a diverse set of objects annotated using bounding boxes. Here are some examples of data from the dataset, along with their corresponding annotations:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/overhead-imagery-object-detection.avif)
- **Overhead Imagery**: This image demonstrates an example of [object detection](https://www.ultralytics.com/glossary/object-detection) in overhead imagery, where objects are annotated with bounding boxes. The dataset provides high-resolution satellite images to facilitate the development of models for this task.
The example showcases the variety and complexity of the data in the xView dataset and highlights the importance of high-quality satellite imagery for object detection tasks.
## Related Datasets
If you're working with satellite imagery, you might also be interested in exploring these related datasets:
- [DOTA-v2](../obb/dota-v2.md): A dataset for oriented object detection in aerial images
- [VisDrone](../detect/visdrone.md): A dataset for object detection and tracking in drone-captured imagery
- [Argoverse](../detect/argoverse.md): A dataset for autonomous driving with 3D tracking annotations
## Citations and Acknowledgments
If you use the xView dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{lam2018xview,
title={xView: Objects in Context in Overhead Imagery},
author={Darius Lam and Richard Kuzma and Kevin McGee and Samuel Dooley and Michael Laielli and Matthew Klaric and Yaroslav Bulatov and Brendan McCord},
year={2018},
eprint={1802.07856},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
We would like to acknowledge the [Defense Innovation Unit](https://www.diu.mil/) (DIU) and the creators of the xView dataset for their valuable contribution to the computer vision research community. For more information about the xView dataset and its creators, visit the [xView dataset website](http://xviewdataset.org/).
## FAQ
### What is the xView dataset and how does it benefit computer vision research?
The [xView](http://xviewdataset.org/) dataset is one of the largest publicly available collections of high-resolution overhead imagery, containing over 1 million object instances across 60 classes. It is designed to enhance various facets of computer vision research such as reducing the minimum resolution for detection, improving learning efficiency, discovering more object classes, and advancing fine-grained object detection.
### How can I use Ultralytics YOLO to train a model on the xView dataset?
To train a model on the xView dataset using [Ultralytics YOLO](https://docs.ultralytics.com/models/yolo11/), follow these steps:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="xView.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo detect train data=xView.yaml model=yolo11n.pt epochs=100 imgsz=640
```
For detailed arguments and settings, refer to the model [Training](../../modes/train.md) page.
### What are the key features of the xView dataset?
The xView dataset stands out due to its comprehensive set of features:
- Over 1 million object instances across 60 distinct classes.
- High-resolution imagery at 0.3 meters.
- Diverse object types including small, rare, and fine-grained objects, all annotated with bounding boxes.
- Availability of a pre-trained baseline model and examples in [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) and PyTorch.
### What is the dataset structure of xView, and how is it annotated?
The xView dataset comprises high-resolution satellite images collected from WorldView-3 satellites at a 0.3m ground sample distance. It encompasses over 1 million objects across 60 classes in approximately 1,400 km² of imagery. Each object within the dataset is annotated with bounding boxes, making it ideal for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models for object detection in overhead imagery. For a detailed overview, you can look at the dataset structure section [here](#dataset-structure).
### How do I cite the xView dataset in my research?
If you utilize the xView dataset in your research, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{lam2018xview,
title={xView: Objects in Context in Overhead Imagery},
author={Darius Lam and Richard Kuzma and Kevin McGee and Samuel Dooley and Michael Laielli and Matthew Klaric and Yaroslav Bulatov and Brendan McCord},
year={2018},
eprint={1802.07856},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
For more information about the xView dataset, visit the official [xView dataset website](http://xviewdataset.org/).

View File

@ -0,0 +1,396 @@
---
comments: true
description: Explore the Ultralytics Explorer API for dataset exploration with SQL queries, vector similarity search, and semantic search. Learn installation and usage tips.
keywords: Ultralytics, Explorer API, dataset exploration, SQL queries, similarity search, semantic search, Python API, LanceDB, embeddings, data analysis
---
# Ultralytics Explorer API
!!! warning "Community Note ⚠️"
As of **`ultralytics>=8.3.10`**, Ultralytics explorer support has been deprecated. But don't worry! You can now access similar and even enhanced functionality through [Ultralytics HUB](https://hub.ultralytics.com/), our intuitive no-code platform designed to streamline your workflow. With Ultralytics HUB, you can continue exploring, visualizing, and managing your data effortlessly, all without writing a single line of code. Make sure to check it out and take advantage of its powerful features!🚀
## Introduction
<a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/docs/en/datasets/explorer/explorer.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
The Explorer API is a Python API for exploring your datasets. It supports filtering and searching your dataset using SQL queries, vector similarity search and semantic search.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/3VryynorQeo?start=279"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Ultralytics Explorer API Overview
</p>
## Installation
Explorer depends on external libraries for some of its functionality. These are automatically installed on usage. To manually install these dependencies, use the following command:
```bash
pip install ultralytics[explorer]
```
## Usage
```python
from ultralytics import Explorer
# Create an Explorer object
explorer = Explorer(data="coco128.yaml", model="yolo11n.pt")
# Create embeddings for your dataset
explorer.create_embeddings_table()
# Search for similar images to a given image/images
dataframe = explorer.get_similar(img="path/to/image.jpg")
# Or search for similar images to a given index/indices
dataframe = explorer.get_similar(idx=0)
```
!!! note
[Embeddings](https://www.ultralytics.com/glossary/embeddings) table for a given dataset and model pair is only created once and reused. These use [LanceDB](https://lancedb.github.io/lancedb/) under the hood, which scales on-disk, so you can create and reuse embeddings for large datasets like COCO without running out of memory.
In case you want to force update the embeddings table, you can pass `force=True` to `create_embeddings_table` method.
You can directly access the LanceDB table object to perform advanced analysis. Learn more about it in the [Working with Embeddings Table section](#4-working-with-embeddings-table)
## 1. Similarity Search
Similarity search is a technique for finding similar images to a given image. It is based on the idea that similar images will have similar embeddings. Once the embeddings table is built, you can get run semantic search in any of the following ways:
- On a given index or list of indices in the dataset: `exp.get_similar(idx=[1,10], limit=10)`
- On any image or list of images not in the dataset: `exp.get_similar(img=["path/to/img1", "path/to/img2"], limit=10)`
In case of multiple inputs, the aggregate of their embeddings is used.
You get a pandas dataframe with the `limit` number of most similar data points to the input, along with their distance in the embedding space. You can use this dataset to perform further filtering
!!! example "Semantic Search"
=== "Using Images"
```python
from ultralytics import Explorer
# create an Explorer object
exp = Explorer(data="coco128.yaml", model="yolo11n.pt")
exp.create_embeddings_table()
similar = exp.get_similar(img="https://ultralytics.com/images/bus.jpg", limit=10)
print(similar.head())
# Search using multiple indices
similar = exp.get_similar(
img=["https://ultralytics.com/images/bus.jpg", "https://ultralytics.com/images/bus.jpg"],
limit=10,
)
print(similar.head())
```
=== "Using Dataset Indices"
```python
from ultralytics import Explorer
# create an Explorer object
exp = Explorer(data="coco128.yaml", model="yolo11n.pt")
exp.create_embeddings_table()
similar = exp.get_similar(idx=1, limit=10)
print(similar.head())
# Search using multiple indices
similar = exp.get_similar(idx=[1, 10], limit=10)
print(similar.head())
```
### Plotting Similar Images
You can also plot the similar images using the `plot_similar` method. This method takes the same arguments as `get_similar` and plots the similar images in a grid.
!!! example "Plotting Similar Images"
=== "Using Images"
```python
from ultralytics import Explorer
# create an Explorer object
exp = Explorer(data="coco128.yaml", model="yolo11n.pt")
exp.create_embeddings_table()
plt = exp.plot_similar(img="https://ultralytics.com/images/bus.jpg", limit=10)
plt.show()
```
=== "Using Dataset Indices"
```python
from ultralytics import Explorer
# create an Explorer object
exp = Explorer(data="coco128.yaml", model="yolo11n.pt")
exp.create_embeddings_table()
plt = exp.plot_similar(idx=1, limit=10)
plt.show()
```
## 2. Ask AI (Natural Language Querying)
This allows you to write how you want to filter your dataset using natural language. You don't have to be proficient in writing SQL queries. Our AI powered query generator will automatically do that under the hood. For example - you can say - "show me 100 images with exactly one person and 2 dogs. There can be other objects too" and it'll internally generate the query and show you those results.
Note: This works using LLMs under the hood so the results are probabilistic and might get things wrong sometimes
!!! example "Ask AI"
```python
from ultralytics import Explorer
from ultralytics.data.explorer import plot_query_result
# create an Explorer object
exp = Explorer(data="coco128.yaml", model="yolo11n.pt")
exp.create_embeddings_table()
df = exp.ask_ai("show me 100 images with exactly one person and 2 dogs. There can be other objects too")
print(df.head())
# plot the results
plt = plot_query_result(df)
plt.show()
```
## 3. SQL Querying
You can run SQL queries on your dataset using the `sql_query` method. This method takes a SQL query as input and returns a pandas dataframe with the results.
!!! example "SQL Query"
```python
from ultralytics import Explorer
# create an Explorer object
exp = Explorer(data="coco128.yaml", model="yolo11n.pt")
exp.create_embeddings_table()
df = exp.sql_query("WHERE labels LIKE '%person%' AND labels LIKE '%dog%'")
print(df.head())
```
### Plotting SQL Query Results
You can also plot the results of a SQL query using the `plot_sql_query` method. This method takes the same arguments as `sql_query` and plots the results in a grid.
!!! example "Plotting SQL Query Results"
```python
from ultralytics import Explorer
# create an Explorer object
exp = Explorer(data="coco128.yaml", model="yolo11n.pt")
exp.create_embeddings_table()
# plot the SQL Query
exp.plot_sql_query("WHERE labels LIKE '%person%' AND labels LIKE '%dog%' LIMIT 10")
```
## 4. Working with Embeddings Table
You can also work with the embeddings table directly. Once the embeddings table is created, you can access it using the `Explorer.table`
!!! tip
Explorer works on [LanceDB](https://lancedb.github.io/lancedb/) tables internally. You can access this table directly, using `Explorer.table` object and run raw queries, push down pre- and post-filters, etc.
```python
from ultralytics import Explorer
exp = Explorer()
exp.create_embeddings_table()
table = exp.table
```
Here are some examples of what you can do with the table:
### Get raw Embeddings
!!! example
```python
from ultralytics import Explorer
exp = Explorer()
exp.create_embeddings_table()
table = exp.table
embeddings = table.to_pandas()["vector"]
print(embeddings)
```
### Advanced Querying with pre- and post-filters
!!! example
```python
from ultralytics import Explorer
exp = Explorer(model="yolo11n.pt")
exp.create_embeddings_table()
table = exp.table
# Dummy embedding
embedding = [i for i in range(256)]
rs = table.search(embedding).metric("cosine").where("").limit(10)
```
### Create Vector Index
When using large datasets, you can also create a dedicated vector index for faster querying. This is done using the `create_index` method on LanceDB table.
```python
table.create_index(num_partitions=..., num_sub_vectors=...)
```
Find more details on the type vector indices available and parameters [here](https://lancedb.github.io/lancedb/ann_indexes/#types-of-index) In the future, we will add support for creating vector indices directly from Explorer API.
## 5. Embeddings Applications
You can use the embeddings table to perform a variety of exploratory analysis. Here are some examples:
### Similarity Index
Explorer comes with a `similarity_index` operation:
- It tries to estimate how similar each data point is with the rest of the dataset.
- It does that by counting how many image embeddings lie closer than `max_dist` to the current image in the generated embedding space, considering `top_k` similar images at a time.
It returns a pandas dataframe with the following columns:
- `idx`: Index of the image in the dataset
- `im_file`: Path to the image file
- `count`: Number of images in the dataset that are closer than `max_dist` to the current image
- `sim_im_files`: List of paths to the `count` similar images
!!! tip
For a given dataset, model, `max_dist` & `top_k` the similarity index once generated will be reused. In case, your dataset has changed, or you simply need to regenerate the similarity index, you can pass `force=True`.
!!! example "Similarity Index"
```python
from ultralytics import Explorer
exp = Explorer()
exp.create_embeddings_table()
sim_idx = exp.similarity_index()
```
You can use similarity index to build custom conditions to filter out the dataset. For example, you can filter out images that are not similar to any other image in the dataset using the following code:
```python
import numpy as np
sim_count = np.array(sim_idx["count"])
sim_idx["im_file"][sim_count > 30]
```
### Visualize Embedding Space
You can also visualize the embedding space using the plotting tool of your choice. For example here is a simple example using matplotlib:
```python
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
# Reduce dimensions using PCA to 3 components for visualization in 3D
pca = PCA(n_components=3)
reduced_data = pca.fit_transform(embeddings)
# Create a 3D scatter plot using Matplotlib Axes3D
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection="3d")
# Scatter plot
ax.scatter(reduced_data[:, 0], reduced_data[:, 1], reduced_data[:, 2], alpha=0.5)
ax.set_title("3D Scatter Plot of Reduced 256-Dimensional Data (PCA)")
ax.set_xlabel("Component 1")
ax.set_ylabel("Component 2")
ax.set_zlabel("Component 3")
plt.show()
```
Start creating your own CV dataset exploration reports using the Explorer API. For inspiration, check out the [VOC Exploration Example](explorer.md).
## Apps Built Using Ultralytics Explorer
Try our [GUI Demo](dashboard.md) based on Explorer API
## Coming Soon
- [ ] Merge specific labels from datasets. Example - Import all `person` labels from COCO and `car` labels from Cityscapes
- [ ] Remove images that have a higher similarity index than the given threshold
- [ ] Automatically persist new datasets after merging/removing entries
- [ ] Advanced Dataset Visualizations
## FAQ
### What is the Ultralytics Explorer API used for?
The Ultralytics Explorer API is designed for comprehensive dataset exploration. It allows users to filter and search datasets using SQL queries, vector similarity search, and semantic search. This powerful Python API can handle large datasets, making it ideal for various [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) tasks using Ultralytics models.
### How do I install the Ultralytics Explorer API?
To install the Ultralytics Explorer API along with its dependencies, use the following command:
```bash
pip install ultralytics[explorer]
```
This will automatically install all necessary external libraries for the Explorer API functionality. For additional setup details, refer to the [installation section](#installation) of our documentation.
### How can I use the Ultralytics Explorer API for similarity search?
You can use the Ultralytics Explorer API to perform similarity searches by creating an embeddings table and querying it for similar images. Here's a basic example:
```python
from ultralytics import Explorer
# Create an Explorer object
explorer = Explorer(data="coco128.yaml", model="yolo11n.pt")
explorer.create_embeddings_table()
# Search for similar images to a given image
similar_images_df = explorer.get_similar(img="path/to/image.jpg")
print(similar_images_df.head())
```
For more details, please visit the [Similarity Search section](#1-similarity-search).
### What are the benefits of using LanceDB with Ultralytics Explorer?
LanceDB, used under the hood by Ultralytics Explorer, provides scalable, on-disk embeddings tables. This ensures that you can create and reuse embeddings for large datasets like COCO without running out of memory. These tables are only created once and can be reused, enhancing efficiency in data handling.
### How does the Ask AI feature work in the Ultralytics Explorer API?
The Ask AI feature allows users to filter datasets using natural language queries. This feature leverages LLMs to convert these queries into SQL queries behind the scenes. Here's an example:
```python
from ultralytics import Explorer
# Create an Explorer object
explorer = Explorer(data="coco128.yaml", model="yolo11n.pt")
explorer.create_embeddings_table()
# Query with natural language
query_result = explorer.ask_ai("show me 100 images with exactly one person and 2 dogs. There can be other objects too")
print(query_result.head())
```
For more examples, check out the [Ask AI section](#2-ask-ai-natural-language-querying).

View File

@ -0,0 +1,130 @@
---
comments: true
description: Unlock advanced data exploration with Ultralytics Explorer GUI. Utilize semantic search, run SQL queries, and ask AI for natural language data insights.
keywords: Ultralytics Explorer GUI, semantic search, vector similarity, SQL queries, AI, natural language search, data exploration, machine learning, OpenAI, LLMs
---
# Explorer GUI
!!! warning "Community Note ⚠️"
As of **`ultralytics>=8.3.10`**, Ultralytics explorer support has been deprecated. But don't worry! You can now access similar and even enhanced functionality through [Ultralytics HUB](https://hub.ultralytics.com/), our intuitive no-code platform designed to streamline your workflow. With Ultralytics HUB, you can continue exploring, visualizing, and managing your data effortlessly, all without writing a single line of code. Make sure to check it out and take advantage of its powerful features!🚀
Explorer GUI is like a playground built using [Ultralytics Explorer API](api.md). It allows you to run semantic/vector similarity search, SQL queries and even search using natural language using our ask AI feature powered by LLMs.
<p>
<img width="1709" alt="Explorer Dashboard Screenshot 1" src="https://github.com/ultralytics/docs/releases/download/0/explorer-dashboard-screenshot-1.avif">
</p>
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/3VryynorQeo?start=306"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Ultralytics Explorer Dashboard Overview
</p>
### Installation
```bash
pip install ultralytics[explorer]
```
!!! note
Ask AI feature works using OpenAI, so you'll be prompted to set the api key for OpenAI when you first run the GUI.
You can set it like this - `yolo settings openai_api_key="..."`
## Vector Semantic Similarity Search
[Semantic search](https://www.ultralytics.com/glossary/semantic-search) is a technique for finding similar images to a given image. It is based on the idea that similar images will have similar [embeddings](https://www.ultralytics.com/glossary/embeddings). In the UI, you can select one or more images and search for the images similar to them. This can be useful when you want to find images similar to a given image or a set of images that don't perform as expected.
For example:
In this VOC Exploration dashboard, user selects a couple airplane images like this:
<p>
<img width="1710" alt="Explorer Dashboard Screenshot 2" src="https://github.com/ultralytics/docs/releases/download/0/explorer-dashboard-screenshot-2.avif">
</p>
On performing similarity search, you should see a similar result:
<p>
<img width="1710" alt="Explorer Dashboard Screenshot 3" src="https://github.com/ultralytics/docs/releases/download/0/explorer-dashboard-screenshot-3.avif">
</p>
## Ask AI
This allows you to write how you want to filter your dataset using natural language. You don't have to be proficient in writing SQL queries. Our AI powered query generator will automatically do that under the hood. For example - you can say - "show me 100 images with exactly one person and 2 dogs. There can be other objects too" and it'll internally generate the query and show you those results. Here's an example output when asked to "Show 10 images with exactly 5 persons" and you'll see a result like this:
<p>
<img width="1709" alt="Explorer Dashboard Screenshot 4" src="https://github.com/ultralytics/docs/releases/download/0/explorer-dashboard-screenshot-4.avif">
</p>
Note: This works using [Large Language Models](https://www.ultralytics.com/glossary/large-language-model-llm) under the hood so the results are probabilistic and might get things wrong sometimes
## Run SQL queries on your CV datasets
You can run SQL queries on your dataset to filter it. It also works if you only provide the WHERE clause. Example SQL query would show only the images that have at least one 1 person and 1 dog in them:
```sql
WHERE labels LIKE '%person%' AND labels LIKE '%dog%'
```
<p>
<img width="1707" alt="Explorer Dashboard Screenshot 5" src="https://github.com/ultralytics/docs/releases/download/0/explorer-dashboard-screenshot-5.avif">
</p>
This is a Demo built using the Explorer API. You can use the API to build your own exploratory notebooks or scripts to get insights into your datasets. Learn more about the Explorer API [here](api.md).
## FAQ
### What is Ultralytics Explorer GUI and how do I install it?
Ultralytics Explorer GUI is a powerful interface that unlocks advanced data exploration capabilities using the [Ultralytics Explorer API](api.md). It allows you to run semantic/vector similarity search, SQL queries, and natural language queries using the Ask AI feature powered by [Large Language Models](https://www.ultralytics.com/glossary/large-language-model-llm) (LLMs).
To install the Explorer GUI, you can use pip:
```bash
pip install ultralytics[explorer]
```
Note: To use the Ask AI feature, you'll need to set the OpenAI API key: `yolo settings openai_api_key="..."`.
### How does the semantic search feature in Ultralytics Explorer GUI work?
The semantic search feature in Ultralytics Explorer GUI allows you to find images similar to a given image based on their embeddings. This technique is useful for identifying and exploring images that share visual similarities. To use this feature, select one or more images in the UI and execute a search for similar images. The result will display images that closely resemble the selected ones, facilitating efficient dataset exploration and [anomaly detection](https://www.ultralytics.com/glossary/anomaly-detection).
Learn more about semantic search and other features by visiting the [Feature Overview](#vector-semantic-similarity-search) section.
### Can I use natural language to filter datasets in Ultralytics Explorer GUI?
Yes, with the Ask AI feature powered by large language models (LLMs), you can filter your datasets using natural language queries. You don't need to be proficient in SQL. For instance, you can ask "Show me 100 images with exactly one person and 2 dogs. There can be other objects too," and the AI will generate the appropriate query under the hood to deliver the desired results.
See an example of a natural language query [here](#ask-ai).
### How do I run SQL queries on datasets using Ultralytics Explorer GUI?
Ultralytics Explorer GUI allows you to run SQL queries directly on your dataset to filter and manage data efficiently. To run a query, navigate to the SQL query section in the GUI and write your query. For example, to show images with at least one person and one dog, you could use:
```sql
WHERE labels LIKE '%person%' AND labels LIKE '%dog%'
```
You can also provide only the WHERE clause, making the querying process more flexible.
For more details, refer to the [SQL Queries Section](#run-sql-queries-on-your-cv-datasets).
### What are the benefits of using Ultralytics Explorer GUI for data exploration?
Ultralytics Explorer GUI enhances data exploration with features like semantic search, SQL querying, and natural language interactions through the Ask AI feature. These capabilities allow users to:
- Efficiently find visually similar images.
- Filter datasets using complex SQL queries.
- Utilize AI to perform natural language searches, eliminating the need for advanced SQL expertise.
These features make it a versatile tool for developers, researchers, and data scientists looking to gain deeper insights into their datasets.
Explore more about these features in the [Explorer GUI Documentation](#explorer-gui).

View File

@ -0,0 +1,278 @@
---
comments: true
description: Dive into advanced data exploration with Ultralytics Explorer. Perform semantic searches, execute SQL queries, and leverage AI-powered natural language insights for seamless data analysis.
keywords: Ultralytics Explorer, data exploration, semantic search, vector similarity, SQL queries, AI, natural language queries, machine learning, OpenAI, LLMs, Ultralytics HUB
---
# VOC Exploration Example
<div align="center">
<a href="https://www.ultralytics.com/events/yolovision" target="_blank"><img width="1024%" src="https://github.com/ultralytics/docs/releases/download/0/ultralytics-yolov8-banner.avif" alt="Ultralytics YOLO banner"></a>
<a href="https://docs.ultralytics.com/zh">中文</a> |
<a href="https://docs.ultralytics.com/ko">한국어</a> |
<a href="https://docs.ultralytics.com/ja">日本語</a> |
<a href="https://docs.ultralytics.com/ru">Русский</a> |
<a href="https://docs.ultralytics.com/de">Deutsch</a> |
<a href="https://docs.ultralytics.com/fr">Français</a> |
<a href="https://docs.ultralytics.com/es/">Español</a> |
<a href="https://docs.ultralytics.com/pt">Português</a> |
<a href="https://docs.ultralytics.com/tr">Türkçe</a> |
<a href="https://docs.ultralytics.com/vi">Tiếng Việt</a> |
<a href="https://docs.ultralytics.com/ar">العربية</a>
<br>
<br>
<a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg" alt="Ultralytics CI"></a>
<a href="https://pepy.tech/projects/ultralytics"><img src="https://static.pepy.tech/badge/ultralytics" alt="Ultralytics Downloads"></a>
<a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="Ultralytics YOLO Citation"></a>
<a href="https://discord.com/invite/ultralytics"><img alt="Ultralytics Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a>
<a href="https://community.ultralytics.com/"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a>
<a href="https://reddit.com/r/ultralytics"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
<br>
<a href="https://console.paperspace.com/github/ultralytics/ultralytics"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run Ultralytics on Gradient"></a>
<a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open Ultralytics In Colab"></a>
<a href="https://www.kaggle.com/models/ultralytics/yolo11"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open Ultralytics In Kaggle"></a>
<a href="https://mybinder.org/v2/gh/ultralytics/ultralytics/HEAD?labpath=examples%2Ftutorial.ipynb"><img src="https://mybinder.org/badge_logo.svg" alt="Open Ultralytics In Binder"></a>
<br>
</div>
Welcome to the Ultralytics Explorer API notebook! This notebook serves as the starting point for exploring the various resources available to help you get started with using Ultralytics to explore your datasets using with the power of semantic search. You can utilities out of the box that allow you to examine specific types of labels using vector search or even SQL queries.
Try `yolo explorer` powered by Explorer API
Simply `pip install ultralytics` and run `yolo explorer` in your terminal to run custom queries and semantic search on your datasets right inside your browser!
!!! warning "Community Note ⚠️"
As of **`ultralytics>=8.3.10`**, Ultralytics explorer support has been deprecated. But don't worry! You can now access similar and even enhanced functionality through [Ultralytics HUB](https://hub.ultralytics.com/), our intuitive no-code platform designed to streamline your workflow. With Ultralytics HUB, you can continue exploring, visualizing, and managing your data effortlessly, all without writing a single line of code. Make sure to check it out and take advantage of its powerful features!🚀
## Setup
Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware.
```bash
%pip install ultralytics[explorer] openai
yolo checks
```
## Similarity Search
Utilize the power of vector similarity search to find the similar data points in your dataset along with their distance in the embedding space. Simply create an embeddings table for the given dataset-model pair. It is only needed once, and it is reused automatically.
```python
exp = Explorer("VOC.yaml", model="yolo11n.pt")
exp.create_embeddings_table()
```
One the embeddings table is built, you can get run semantic search in any of the following ways:
- On a given index / list of indices in the dataset like - exp.get_similar(idx=[1,10], limit=10)
- On any image/ list of images not in the dataset - exp.get_similar(img=["path/to/img1", "path/to/img2"], limit=10) In case of multiple inputs, the aggregate of their embeddings is used.
You get a pandas dataframe with the limit number of most similar data points to the input, along with their distance in the embedding space. You can use this dataset to perform further filtering
![Similarity search table](https://github.com/ultralytics/docs/releases/download/0/similarity-search-table.avif)
```python
# Search dataset by index
similar = exp.get_similar(idx=1, limit=10)
similar.head()
```
You can use the also plot the similar samples directly using the `plot_similar` util
![Similarity search image 1](https://github.com/ultralytics/docs/releases/download/0/similarity-search-image-1.avif)
```python
exp.plot_similar(idx=6500, limit=20)
exp.plot_similar(idx=[100, 101], limit=10) # Can also pass list of idxs or imgs
exp.plot_similar(img="https://ultralytics.com/images/bus.jpg", limit=10, labels=False) # Can also pass external images
```
![Similarity search image 2](https://github.com/ultralytics/docs/releases/download/0/similarity-search-image-2.avif)
## Ask AI: Search or filter with Natural Language
You can prompt the Explorer object with the kind of data points you want to see, and it'll try to return a dataframe with those. Because it is powered by LLMs, it doesn't always get it right. In that case, it'll return None.
![Ask ai table](https://github.com/ultralytics/docs/releases/download/0/ask-ai-nlp-table.avif)
```python
df = exp.ask_ai("show me images containing more than 10 objects with at least 2 persons")
df.head(5)
```
for plotting these results you can use `plot_query_result` util Example:
```python
plt = plot_query_result(exp.ask_ai("show me 10 images containing exactly 2 persons"))
Image.fromarray(plt)
```
![Ask ai image 1](https://github.com/ultralytics/docs/releases/download/0/ask-ai-nlp-image-1.avif)
```python
# plot
from PIL import Image
from ultralytics.data.explorer import plot_query_result
plt = plot_query_result(exp.ask_ai("show me 10 images containing exactly 2 persons"))
Image.fromarray(plt)
```
## Run SQL queries on your Dataset
Sometimes you might want to investigate a certain type of entries in your dataset. For this Explorer allows you to execute SQL queries. It accepts either of the formats:
- Queries beginning with "WHERE" will automatically select all columns. This can be thought of as a shorthand query
- You can also write full queries where you can specify which columns to select
This can be used to investigate model performance and specific data points. For example:
- let's say your model struggles on images that have humans and dogs. You can write a query like this to select the points that have at least 2 humans AND at least one dog.
You can combine SQL query and semantic search to filter down to specific type of results
```python
table = exp.sql_query("WHERE labels LIKE '%person, person%' AND labels LIKE '%dog%' LIMIT 10")
exp.plot_sql_query("WHERE labels LIKE '%person, person%' AND labels LIKE '%dog%' LIMIT 10", labels=True)
```
![SQL queries table](https://github.com/ultralytics/docs/releases/download/0/sql-queries-table.avif)
```python
table = exp.sql_query("WHERE labels LIKE '%person, person%' AND labels LIKE '%dog%' LIMIT 10")
print(table)
```
Just like similarity search, you also get a util to directly plot the sql queries using `exp.plot_sql_query`
![SQL queries image 1](https://github.com/ultralytics/docs/releases/download/0/sql-query-image-1.avif)
```python
exp.plot_sql_query("WHERE labels LIKE '%person, person%' AND labels LIKE '%dog%' LIMIT 10", labels=True)
```
## Working with embeddings Table (Advanced)
Explorer works on [LanceDB](https://lancedb.github.io/lancedb/) tables internally. You can access this table directly, using `Explorer.table` object and run raw queries, push down pre- and post-filters, etc.
```python
table = exp.table
print(table.schema)
```
### Run raw queries¶
Vector Search finds the nearest vectors from the database. In a recommendation system or search engine, you can find similar products from the one you searched. In LLM and other AI applications, each data point can be presented by the embeddings generated from some models, it returns the most relevant features.
A search in high-dimensional vector space, is to find K-Nearest-Neighbors (KNN) of the query vector.
Metric In LanceDB, a Metric is the way to describe the distance between a pair of vectors. Currently, it supports the following metrics:
- L2
- Cosine
- Dot Explorer's similarity search uses L2 by default. You can run queries on tables directly, or use the lance format to build custom utilities to manage datasets. More details on available LanceDB table ops in the [docs](https://lancedb.github.io/lancedb/)
![Raw-queries-table](https://github.com/ultralytics/docs/releases/download/0/raw-queries-table.avif)
```python
dummy_img_embedding = [i for i in range(256)]
table.search(dummy_img_embedding).limit(5).to_pandas()
```
### Interconversion to popular data formats
```python
df = table.to_pandas()
pa_table = table.to_arrow()
```
### Work with Embeddings
You can access the raw embedding from lancedb Table and analyse it. The image embeddings are stored in column `vector`
```python
import numpy as np
embeddings = table.to_pandas()["vector"].tolist()
embeddings = np.array(embeddings)
```
### Scatterplot
One of the preliminary steps in analysing embeddings is by plotting them in 2D space via dimensionality reduction. Let's try an example
![Scatterplot Example](https://github.com/ultralytics/docs/releases/download/0/scatterplot-sql-queries.avif)
```python
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA # pip install scikit-learn
# Reduce dimensions using PCA to 3 components for visualization in 3D
pca = PCA(n_components=3)
reduced_data = pca.fit_transform(embeddings)
# Create a 3D scatter plot using Matplotlib's Axes3D
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection="3d")
# Scatter plot
ax.scatter(reduced_data[:, 0], reduced_data[:, 1], reduced_data[:, 2], alpha=0.5)
ax.set_title("3D Scatter Plot of Reduced 256-Dimensional Data (PCA)")
ax.set_xlabel("Component 1")
ax.set_ylabel("Component 2")
ax.set_zlabel("Component 3")
plt.show()
```
### Similarity Index
Here's a simple example of an operation powered by the embeddings table. Explorer comes with a `similarity_index` operation-
- It tries to estimate how similar each data point is with the rest of the dataset.
- It does that by counting how many image embeddings lie closer than max_dist to the current image in the generated embedding space, considering top_k similar images at a time.
For a given dataset, model, `max_dist` & `top_k` the similarity index once generated will be reused. In case, your dataset has changed, or you simply need to regenerate the similarity index, you can pass `force=True`. Similar to vector and SQL search, this also comes with a util to directly plot it. Let's look
```python
sim_idx = exp.similarity_index(max_dist=0.2, top_k=0.01)
exp.plot_similarity_index(max_dist=0.2, top_k=0.01)
```
![Similarity Index](https://github.com/ultralytics/docs/releases/download/0/similarity-index.avif)
at the plot first
```python
exp.plot_similarity_index(max_dist=0.2, top_k=0.01)
```
Now let's look at the output of the operation
```python
sim_idx = exp.similarity_index(max_dist=0.2, top_k=0.01, force=False)
sim_idx
```
Let's create a query to see what data points have similarity count of more than 30 and plot images similar to them.
```python
import numpy as np
sim_count = np.array(sim_idx["count"])
sim_idx["im_file"][sim_count > 30]
```
You should see something like this
![similarity-index-image](https://github.com/ultralytics/docs/releases/download/0/similarity-index-image.avif)
```python
exp.plot_similar(idx=[7146, 14035]) # Using avg embeddings of 2 images
```

View File

@ -0,0 +1,113 @@
---
comments: true
description: Discover Ultralytics Explorer for semantic search, SQL queries, vector similarity, and natural language dataset exploration. Enhance your CV datasets effortlessly.
keywords: Ultralytics Explorer, CV datasets, semantic search, SQL queries, vector similarity, dataset visualization, python API, machine learning, computer vision
---
# Ultralytics Explorer
!!! warning "Community Note ⚠️"
As of **`ultralytics>=8.3.10`**, Ultralytics explorer support has been deprecated. But don't worry! You can now access similar and even enhanced functionality through [Ultralytics HUB](https://hub.ultralytics.com/), our intuitive no-code platform designed to streamline your workflow. With Ultralytics HUB, you can continue exploring, visualizing, and managing your data effortlessly, all without writing a single line of code. Make sure to check it out and take advantage of its powerful features!🚀
<p>
<img width="1709" alt="Ultralytics Explorer Screenshot 1" src="https://github.com/ultralytics/docs/releases/download/0/explorer-dashboard-screenshot-1.avif">
</p>
<a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/docs/en/datasets/explorer/explorer.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
Ultralytics Explorer is a tool for exploring CV datasets using semantic search, SQL queries, vector similarity search and even using natural language. It is also a Python API for accessing the same functionality.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/3VryynorQeo"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Ultralytics Explorer API | Semantic Search, SQL Queries & Ask AI Features
</p>
## Installation of Optional Dependencies
Explorer depends on external libraries for some of its functionality. These are automatically installed on usage. To manually install these dependencies, use the following command:
```bash
pip install ultralytics[explorer]
```
!!! tip
Explorer works on embedding/semantic search & SQL querying and is powered by [LanceDB](https://lancedb.com/) serverless vector database. Unlike traditional in-memory DBs, it is persisted on disk without sacrificing performance, so you can scale locally to large datasets like COCO without running out of memory.
## Explorer API
This is a Python API for exploring your datasets. It also powers the GUI Explorer. You can use this to create your own exploratory notebooks or scripts to get insights into your datasets.
Learn more about the Explorer API [here](api.md).
## GUI Explorer Usage
The GUI demo runs in your browser allowing you to create [embeddings](https://www.ultralytics.com/glossary/embeddings) for your dataset and search for similar images, run SQL queries and perform semantic search. It can be run using the following command:
```bash
yolo explorer
```
!!! note
Ask AI feature works using OpenAI, so you'll be prompted to set the api key for OpenAI when you first run the GUI.
You can set it like this - `yolo settings openai_api_key="..."`
<p>
<img width="1709" alt="Ultralytics Explorer OpenAI Integration" src="https://github.com/ultralytics/docs/releases/download/0/ultralytics-explorer-openai-integration.avif">
</p>
## FAQ
### What is Ultralytics Explorer and how can it help with CV datasets?
Ultralytics Explorer is a powerful tool designed for exploring [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) (CV) datasets through semantic search, SQL queries, vector similarity search, and even natural language. This versatile tool provides both a GUI and a Python API, allowing users to seamlessly interact with their datasets. By leveraging technologies like [LanceDB](https://lancedb.com/), Ultralytics Explorer ensures efficient, scalable access to large datasets without excessive memory usage. Whether you're performing detailed dataset analysis or exploring data patterns, Ultralytics Explorer streamlines the entire process.
Learn more about the [Explorer API](api.md).
### How do I install the dependencies for Ultralytics Explorer?
To manually install the optional dependencies needed for Ultralytics Explorer, you can use the following `pip` command:
```bash
pip install ultralytics[explorer]
```
These dependencies are essential for the full functionality of semantic search and SQL querying. By including libraries powered by [LanceDB](https://lancedb.com/), the installation ensures that the database operations remain efficient and scalable, even for large datasets like [COCO](../detect/coco.md).
### How can I use the GUI version of Ultralytics Explorer?
Using the GUI version of Ultralytics Explorer is straightforward. After installing the necessary dependencies, you can launch the GUI with the following command:
```bash
yolo explorer
```
The GUI provides a user-friendly interface for creating dataset embeddings, searching for similar images, running SQL queries, and conducting semantic searches. Additionally, the integration with OpenAI's Ask AI feature allows you to query datasets using natural language, enhancing the flexibility and ease of use.
For storage and scalability information, check out our [installation instructions](#installation-of-optional-dependencies).
### What is the Ask AI feature in Ultralytics Explorer?
The Ask AI feature in Ultralytics Explorer allows users to interact with their datasets using natural language queries. Powered by [OpenAI](https://www.ultralytics.com/blog/openai-gpt-4o-showcases-ai-potential), this feature enables you to ask complex questions and receive insightful answers without needing to write SQL queries or similar commands. To use this feature, you'll need to set your OpenAI API key the first time you run the GUI:
```bash
yolo settings openai_api_key="YOUR_API_KEY"
```
For more on this feature and how to integrate it, see our [GUI Explorer Usage](#gui-explorer-usage) section.
### Can I run Ultralytics Explorer in Google Colab?
Yes, Ultralytics Explorer can be run in Google Colab, providing a convenient and powerful environment for dataset exploration. You can start by opening the provided Colab notebook, which is pre-configured with all the necessary settings:
<a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/docs/en/datasets/explorer/explorer.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
This setup allows you to explore your datasets fully, taking advantage of Google's cloud resources. Learn more in our [Google Colab Guide](../../integrations/google-colab.md).

View File

@ -0,0 +1,234 @@
---
comments: true
description: Explore Ultralytics' diverse datasets for vision tasks like detection, segmentation, classification, and more. Enhance your projects with high-quality annotated data.
keywords: Ultralytics, datasets, computer vision, object detection, instance segmentation, pose estimation, image classification, multi-object tracking
---
# Datasets Overview
Ultralytics provides support for various datasets to facilitate computer vision tasks such as detection, [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), pose estimation, classification, and multi-object tracking. Below is a list of the main Ultralytics datasets, followed by a summary of each computer vision task and the respective datasets.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/YDXKa1EljmU"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Ultralytics Datasets Overview
</p>
## [Object Detection](detect/index.md)
[Bounding box](https://www.ultralytics.com/glossary/bounding-box) object detection is a computer vision technique that involves detecting and localizing objects in an image by drawing a bounding box around each object.
- [Argoverse](detect/argoverse.md): A dataset containing 3D tracking and motion forecasting data from urban environments with rich annotations.
- [COCO](detect/coco.md): Common Objects in Context (COCO) is a large-scale object detection, segmentation, and captioning dataset with 80 object categories.
- [LVIS](detect/lvis.md): A large-scale object detection, segmentation, and captioning dataset with 1203 object categories.
- [COCO8](detect/coco8.md): A smaller subset of the first 4 images from COCO train and COCO val, suitable for quick tests.
- [COCO128](detect/coco128.md): A smaller subset of the first 128 images from COCO train and COCO val, suitable for tests.
- [Global Wheat 2020](detect/globalwheat2020.md): A dataset containing images of wheat heads for the Global Wheat Challenge 2020.
- [Objects365](detect/objects365.md): A high-quality, large-scale dataset for object detection with 365 object categories and over 600K annotated images.
- [OpenImagesV7](detect/open-images-v7.md): A comprehensive dataset by Google with 1.7M train images and 42k validation images.
- [SKU-110K](detect/sku-110k.md): A dataset featuring dense object detection in retail environments with over 11K images and 1.7 million bounding boxes.
- [VisDrone](detect/visdrone.md): A dataset containing object detection and multi-object tracking data from drone-captured imagery with over 10K images and video sequences.
- [VOC](detect/voc.md): The Pascal Visual Object Classes (VOC) dataset for object detection and segmentation with 20 object classes and over 11K images.
- [xView](detect/xview.md): A dataset for object detection in overhead imagery with 60 object categories and over 1 million annotated objects.
- [RF100](detect/roboflow-100.md): A diverse object detection benchmark with 100 datasets spanning seven imagery domains for comprehensive model evaluation.
- [Brain-tumor](detect/brain-tumor.md): A dataset for detecting brain tumors includes MRI or CT scan images with details on tumor presence, location, and characteristics.
- [African-wildlife](detect/african-wildlife.md): A dataset featuring images of African wildlife, including buffalo, elephant, rhino, and zebras.
- [Signature](detect/signature.md): A dataset featuring images of various documents with annotated signatures, supporting document verification and fraud detection research.
- [Medical-pills](detect/medical-pills.md): A dataset containing labeled images of medical-pills, designed to aid in tasks like pharmaceutical quality control, sorting, and ensuring compliance with industry standards.
## [Instance Segmentation](segment/index.md)
Instance segmentation is a computer vision technique that involves identifying and localizing objects in an image at the pixel level. Unlike semantic segmentation which only classifies each pixel, [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation) distinguishes between different instances of the same class.
- [COCO](segment/coco.md): A large-scale dataset designed for object detection, segmentation, and captioning tasks with over 200K labeled images.
- [COCO8-seg](segment/coco8-seg.md): A smaller dataset for instance segmentation tasks, containing a subset of 8 COCO images with segmentation annotations.
- [COCO128-seg](segment/coco.md): A smaller dataset for instance segmentation tasks, containing a subset of 128 COCO images with segmentation annotations.
- [Crack-seg](segment/crack-seg.md): Specifically crafted dataset for detecting cracks on roads and walls, applicable for both object detection and segmentation tasks.
- [Package-seg](segment/package-seg.md): Tailored dataset for identifying packages in warehouses or industrial settings, suitable for both object detection and segmentation applications.
- [Carparts-seg](segment/carparts-seg.md): Purpose-built dataset for identifying vehicle parts, catering to design, manufacturing, and research needs. It serves for both object detection and segmentation tasks.
## [Pose Estimation](pose/index.md)
Pose estimation is a technique used to determine the pose of the object relative to the camera or the world coordinate system. This involves identifying key points or joints on objects, particularly humans or animals.
- [COCO](pose/coco.md): A large-scale dataset with human pose annotations designed for pose estimation tasks.
- [COCO8-pose](pose/coco8-pose.md): A smaller dataset for pose estimation tasks, containing a subset of 8 COCO images with human pose annotations.
- [Tiger-pose](pose/tiger-pose.md): A compact dataset consisting of 263 images focused on tigers, annotated with 12 keypoints per tiger for pose estimation tasks.
- [Hand-Keypoints](pose/hand-keypoints.md): A concise dataset featuring over 26,000 images centered on human hands, annotated with 21 keypoints per hand, designed for pose estimation tasks.
- [Dog-pose](pose/dog-pose.md): A comprehensive dataset featuring approximately 6,000 images focused on dogs, annotated with 24 keypoints per dog, tailored for pose estimation tasks.
## [Classification](classify/index.md)
[Image classification](https://www.ultralytics.com/glossary/image-classification) is a computer vision task that involves categorizing an image into one or more predefined classes or categories based on its visual content.
- [Caltech 101](classify/caltech101.md): A dataset containing images of 101 object categories for image classification tasks.
- [Caltech 256](classify/caltech256.md): An extended version of Caltech 101 with 256 object categories and more challenging images.
- [CIFAR-10](classify/cifar10.md): A dataset of 60K 32x32 color images in 10 classes, with 6K images per class.
- [CIFAR-100](classify/cifar100.md): An extended version of CIFAR-10 with 100 object categories and 600 images per class.
- [Fashion-MNIST](classify/fashion-mnist.md): A dataset consisting of 70,000 grayscale images of 10 fashion categories for image classification tasks.
- [ImageNet](classify/imagenet.md): A large-scale dataset for object detection and image classification with over 14 million images and 20,000 categories.
- [ImageNet-10](classify/imagenet10.md): A smaller subset of ImageNet with 10 categories for faster experimentation and testing.
- [Imagenette](classify/imagenette.md): A smaller subset of ImageNet that contains 10 easily distinguishable classes for quicker training and testing.
- [Imagewoof](classify/imagewoof.md): A more challenging subset of ImageNet containing 10 dog breed categories for image classification tasks.
- [MNIST](classify/mnist.md): A dataset of 70,000 grayscale images of handwritten digits for image classification tasks.
- [MNIST160](classify/mnist.md): First 8 images of each MNIST category from the MNIST dataset. Dataset contains 160 images total.
## [Oriented Bounding Boxes (OBB)](obb/index.md)
Oriented Bounding Boxes (OBB) is a method in computer vision for detecting angled objects in images using rotated bounding boxes, often applied to aerial and satellite imagery. Unlike traditional bounding boxes, OBB can better fit objects at various orientations.
- [DOTA-v2](obb/dota-v2.md): A popular OBB aerial imagery dataset with 1.7 million instances and 11,268 images.
- [DOTA8](obb/dota8.md): A smaller subset of the first 8 images from the DOTAv1 split set, 4 for training and 4 for validation, suitable for quick tests.
## [Multi-Object Tracking](track/index.md)
Multi-object tracking is a computer vision technique that involves detecting and tracking multiple objects over time in a video sequence. This task extends object detection by maintaining consistent identities of objects across frames.
- [Argoverse](detect/argoverse.md): A dataset containing 3D tracking and motion forecasting data from urban environments with rich annotations for multi-object tracking tasks.
- [VisDrone](detect/visdrone.md): A dataset containing object detection and multi-object tracking data from drone-captured imagery with over 10K images and video sequences.
## Contribute New Datasets
Contributing a new dataset involves several steps to ensure that it aligns well with the existing infrastructure. Below are the necessary steps:
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/yMR7BgwHQ3g?start=427"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to Contribute to Ultralytics Datasets 🚀
</p>
### Steps to Contribute a New Dataset
1. **Collect Images**: Gather the images that belong to the dataset. These could be collected from various sources, such as public databases or your own collection.
2. **Annotate Images**: Annotate these images with bounding boxes, segments, or keypoints, depending on the task.
3. **Export Annotations**: Convert these annotations into the YOLO `*.txt` file format which Ultralytics supports.
4. **Organize Dataset**: Arrange your dataset into the correct folder structure. You should have `train/` and `val/` top-level directories, and within each, an `images/` and `labels/` subdirectory.
```
dataset/
├── train/
│ ├── images/
│ └── labels/
└── val/
├── images/
└── labels/
```
5. **Create a `data.yaml` File**: In your dataset's root directory, create a `data.yaml` file that describes the dataset, classes, and other necessary information.
6. **Optimize Images (Optional)**: If you want to reduce the size of the dataset for more efficient processing, you can optimize the images using the code below. This is not required, but recommended for smaller dataset sizes and faster download speeds.
7. **Zip Dataset**: Compress the entire dataset folder into a zip file.
8. **Document and PR**: Create a documentation page describing your dataset and how it fits into the existing framework. After that, submit a Pull Request (PR). Refer to [Ultralytics Contribution Guidelines](https://docs.ultralytics.com/help/contributing/) for more details on how to submit a PR.
### Example Code to Optimize and Zip a Dataset
!!! example "Optimize and Zip a Dataset"
=== "Python"
```python
from pathlib import Path
from ultralytics.data.utils import compress_one_image
from ultralytics.utils.downloads import zip_directory
# Define dataset directory
path = Path("path/to/dataset")
# Optimize images in dataset (optional)
for f in path.rglob("*.jpg"):
compress_one_image(f)
# Zip dataset into 'path/to/dataset.zip'
zip_directory(path)
```
By following these steps, you can contribute a new dataset that integrates well with Ultralytics' existing structure.
## FAQ
### What datasets does Ultralytics support for object detection?
Ultralytics supports a wide variety of datasets for [object detection](https://www.ultralytics.com/glossary/object-detection), including:
- [COCO](detect/coco.md): A large-scale object detection, segmentation, and captioning dataset with 80 object categories.
- [LVIS](detect/lvis.md): An extensive dataset with 1203 object categories, designed for more fine-grained object detection and segmentation.
- [Argoverse](detect/argoverse.md): A dataset containing 3D tracking and motion forecasting data from urban environments with rich annotations.
- [VisDrone](detect/visdrone.md): A dataset with object detection and multi-object tracking data from drone-captured imagery.
- [SKU-110K](detect/sku-110k.md): Featuring dense object detection in retail environments with over 11K images.
These datasets facilitate training robust [Ultralytics YOLO](https://docs.ultralytics.com/models/) models for various object detection applications.
### How do I contribute a new dataset to Ultralytics?
Contributing a new dataset involves several steps:
1. **Collect Images**: Gather images from public databases or personal collections.
2. **Annotate Images**: Apply bounding boxes, segments, or keypoints, depending on the task.
3. **Export Annotations**: Convert annotations into the YOLO `*.txt` format.
4. **Organize Dataset**: Use the folder structure with `train/` and `val/` directories, each containing `images/` and `labels/` subdirectories.
5. **Create a `data.yaml` File**: Include dataset descriptions, classes, and other relevant information.
6. **Optimize Images (Optional)**: Reduce dataset size for efficiency.
7. **Zip Dataset**: Compress the dataset into a zip file.
8. **Document and PR**: Describe your dataset and submit a Pull Request following [Ultralytics Contribution Guidelines](https://docs.ultralytics.com/help/contributing/).
Visit [Contribute New Datasets](#contribute-new-datasets) for a comprehensive guide.
### Why should I use Ultralytics HUB for my dataset?
[Ultralytics HUB](https://hub.ultralytics.com/) offers powerful features for dataset management and analysis, including:
- **Seamless Dataset Management**: Upload, organize, and manage your datasets in one place.
- **Immediate Training Integration**: Use uploaded datasets directly for model training without additional setup.
- **Visualization Tools**: Explore and visualize your dataset images and annotations.
- **Dataset Analysis**: Get insights into your dataset distribution and characteristics.
The platform streamlines the transition from dataset management to model training, making the entire process more efficient. Learn more about [Ultralytics HUB Datasets](https://docs.ultralytics.com/hub/datasets/).
### What are the unique features of Ultralytics YOLO models for computer vision?
Ultralytics YOLO models provide several unique features for [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) tasks:
- **Real-time Performance**: High-speed inference and training capabilities for time-sensitive applications.
- **Versatility**: Support for detection, segmentation, classification, and pose estimation tasks in a unified framework.
- **Pretrained Models**: Access to high-performing, pretrained models for various applications, reducing training time.
- **Extensive Community Support**: Active community and comprehensive documentation for troubleshooting and development.
- **Easy Integration**: Simple API for integrating with existing projects and workflows.
Discover more about YOLO models on the [Ultralytics Models](https://docs.ultralytics.com/models/) page.
### How can I optimize and zip a dataset using Ultralytics tools?
To optimize and zip a dataset using Ultralytics tools, follow this example code:
!!! example "Optimize and Zip a Dataset"
=== "Python"
```python
from pathlib import Path
from ultralytics.data.utils import compress_one_image
from ultralytics.utils.downloads import zip_directory
# Define dataset directory
path = Path("path/to/dataset")
# Optimize images in dataset (optional)
for f in path.rglob("*.jpg"):
compress_one_image(f)
# Zip dataset into 'path/to/dataset.zip'
zip_directory(path)
```
This process helps reduce dataset size for more efficient storage and faster download speeds. Learn more on how to [Optimize and Zip a Dataset](#example-code-to-optimize-and-zip-a-dataset).

View File

@ -0,0 +1,230 @@
---
comments: true
description: Explore the DOTA dataset for object detection in aerial images, featuring 1.7M Oriented Bounding Boxes across 18 categories. Ideal for aerial image analysis.
keywords: DOTA dataset, object detection, aerial images, oriented bounding boxes, OBB, DOTA v1.0, DOTA v1.5, DOTA v2.0, multiscale detection, Ultralytics
---
# DOTA Dataset with OBB
[DOTA](https://captain-whu.github.io/DOTA/index.html) stands as a specialized dataset, emphasizing [object detection](https://www.ultralytics.com/glossary/object-detection) in aerial images. Originating from the DOTA series of datasets, it offers annotated images capturing a diverse array of aerial scenes with [Oriented Bounding Boxes (OBB)](https://docs.ultralytics.com/datasets/obb/).
![DOTA classes visual](https://github.com/ultralytics/docs/releases/download/0/dota-classes-visual.avif)
## Key Features
- Collection from various sensors and platforms, with image sizes ranging from 800 × 800 to 20,000 × 20,000 pixels.
- Features more than 1.7M Oriented Bounding Boxes across 18 categories.
- Encompasses multiscale object detection.
- Instances are annotated by experts using arbitrary (8 d.o.f.) quadrilateral, capturing objects of different scales, orientations, and shapes.
## Dataset Versions
### DOTA-v1.0
- Contains 15 common categories.
- Comprises 2,806 images with 188,282 instances.
- Split ratios: 1/2 for training, 1/6 for validation, and 1/3 for testing.
### DOTA-v1.5
- Incorporates the same images as DOTA-v1.0.
- Very small instances (less than 10 pixels) are also annotated.
- Addition of a new category: "container crane".
- A total of 403,318 instances.
- Released for the [DOAI Challenge 2019 on Object Detection in Aerial Images](https://captain-whu.github.io/DOAI2019/challenge.html).
### DOTA-v2.0
- Collections from Google Earth, GF-2 Satellite, and other aerial images.
- Contains 18 common categories.
- Comprises 11,268 images with a whopping 1,793,658 instances.
- New categories introduced: "airport" and "helipad".
- Image splits:
- Training: 1,830 images with 268,627 instances.
- Validation: 593 images with 81,048 instances.
- Test-dev: 2,792 images with 353,346 instances.
- Test-challenge: 6,053 images with 1,090,637 instances.
## Dataset Structure
DOTA exhibits a structured layout tailored for OBB object detection challenges:
- **Images**: A vast collection of high-resolution aerial images capturing diverse terrains and structures.
- **Oriented Bounding Boxes**: Annotations in the form of rotated rectangles encapsulating objects irrespective of their orientation, ideal for capturing objects like airplanes, ships, and buildings.
## Applications
DOTA serves as a benchmark for training and evaluating models specifically tailored for aerial image analysis. With the inclusion of OBB annotations, it provides a unique challenge, enabling the development of specialized [object detection](https://docs.ultralytics.com/tasks/detect/) models that cater to aerial imagery's nuances. The dataset is particularly valuable for applications in remote sensing, surveillance, and environmental monitoring.
## Dataset YAML
Typically, datasets incorporate a YAML (Yet Another Markup Language) file detailing the dataset's configuration. For DOTA v1 and DOTA v1.5, Ultralytics provides `DOTAv1.yaml` and `DOTAv1.5.yaml` files. For additional details on these as well as DOTA v2 please consult DOTA's official repository and documentation.
!!! example "DOTAv1.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/DOTAv1.yaml"
```
## Split DOTA images
To train DOTA dataset, we split original DOTA images with high-resolution into images with 1024x1024 resolution in multiscale way. This preprocessing step is crucial for efficient training as the original images can be extremely large.
!!! example "Split images"
=== "Python"
```python
from ultralytics.data.split_dota import split_test, split_trainval
# split train and val set, with labels.
split_trainval(
data_root="path/to/DOTAv1.0/",
save_dir="path/to/DOTAv1.0-split/",
rates=[0.5, 1.0, 1.5], # multiscale
gap=500,
)
# split test set, without labels.
split_test(
data_root="path/to/DOTAv1.0/",
save_dir="path/to/DOTAv1.0-split/",
rates=[0.5, 1.0, 1.5], # multiscale
gap=500,
)
```
## Usage
To train a model on the DOTA v1 dataset, you can utilize the following code snippets. Always refer to your model's documentation for a thorough list of available arguments. For those looking to experiment with a smaller subset first, consider using the [DOTA8 dataset](https://docs.ultralytics.com/datasets/obb/dota8/), which contains just 8 images for quick testing.
!!! warning
Please note that all images and associated annotations in the DOTAv1 dataset can be used for academic purposes, but commercial use is prohibited. Your understanding and respect for the dataset creators' wishes are greatly appreciated!
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Create a new YOLO11n-OBB model from scratch
model = YOLO("yolo11n-obb.yaml")
# Train the model on the DOTAv1 dataset
results = model.train(data="DOTAv1.yaml", epochs=100, imgsz=1024)
```
=== "CLI"
```bash
# Train a new YOLO11n-OBB model on the DOTAv1 dataset
yolo obb train data=DOTAv1.yaml model=yolo11n-obb.pt epochs=100 imgsz=1024
```
## Sample Data and Annotations
Having a glance at the dataset illustrates its depth:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/instances-DOTA.avif)
- **DOTA examples**: This snapshot underlines the complexity of aerial scenes and the significance of Oriented [Bounding Box](https://www.ultralytics.com/glossary/bounding-box) annotations, capturing objects in their natural orientation.
The dataset's richness offers invaluable insights into object detection challenges exclusive to aerial imagery. The [DOTA-v2.0 dataset](https://www.ultralytics.com/blog/exploring-the-best-computer-vision-datasets-in-2025) has become particularly popular for remote sensing and aerial surveillance projects due to its comprehensive annotations and diverse object categories.
## Citations and Acknowledgments
For those leveraging DOTA in their endeavors, it's pertinent to cite the relevant research papers:
!!! quote ""
=== "BibTeX"
```bibtex
@article{9560031,
author={Ding, Jian and Xue, Nan and Xia, Gui-Song and Bai, Xiang and Yang, Wen and Yang, Michael and Belongie, Serge and Luo, Jiebo and Datcu, Mihai and Pelillo, Marcello and Zhang, Liangpei},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
title={Object Detection in Aerial Images: A Large-Scale Benchmark and Challenges},
year={2021},
volume={},
number={},
pages={1-1},
doi={10.1109/TPAMI.2021.3117983}
}
```
A special note of gratitude to the team behind the DOTA datasets for their commendable effort in curating this dataset. For an exhaustive understanding of the dataset and its nuances, please visit the [official DOTA website](https://captain-whu.github.io/DOTA/index.html).
## FAQ
### What is the DOTA dataset and why is it important for object detection in aerial images?
The [DOTA dataset](https://captain-whu.github.io/DOTA/index.html) is a specialized dataset focused on object detection in aerial images. It features Oriented Bounding Boxes (OBB), providing annotated images from diverse aerial scenes. DOTA's diversity in object orientation, scale, and shape across its 1.7M annotations and 18 categories makes it ideal for developing and evaluating models tailored for aerial imagery analysis, such as those used in surveillance, environmental monitoring, and disaster management.
### How does the DOTA dataset handle different scales and orientations in images?
DOTA utilizes Oriented Bounding Boxes (OBB) for annotation, which are represented by rotated rectangles encapsulating objects regardless of their orientation. This method ensures that objects, whether small or at different angles, are accurately captured. The dataset's multiscale images, ranging from 800 × 800 to 20,000 × 20,000 pixels, further allow for the detection of both small and large objects effectively. This approach is particularly valuable for aerial imagery where objects appear at various angles and scales.
### How can I train a model using the DOTA dataset?
To train a model on the DOTA dataset, you can use the following example with [Ultralytics YOLO](https://docs.ultralytics.com/tasks/obb/):
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Create a new YOLO11n-OBB model from scratch
model = YOLO("yolo11n-obb.yaml")
# Train the model on the DOTAv1 dataset
results = model.train(data="DOTAv1.yaml", epochs=100, imgsz=1024)
```
=== "CLI"
```bash
# Train a new YOLO11n-OBB model on the DOTAv1 dataset
yolo obb train data=DOTAv1.yaml model=yolo11n-obb.pt epochs=100 imgsz=1024
```
For more details on how to split and preprocess the DOTA images, refer to the [split DOTA images section](#split-dota-images).
### What are the differences between DOTA-v1.0, DOTA-v1.5, and DOTA-v2.0?
- **DOTA-v1.0**: Includes 15 common categories across 2,806 images with 188,282 instances. The dataset is split into training, validation, and testing sets.
- **DOTA-v1.5**: Builds upon DOTA-v1.0 by annotating very small instances (less than 10 pixels) and adding a new category, "container crane," totaling 403,318 instances.
- **DOTA-v2.0**: Expands further with annotations from Google Earth and GF-2 Satellite, featuring 11,268 images and 1,793,658 instances. It includes new categories like "airport" and "helipad."
For a detailed comparison and additional specifics, check the [dataset versions section](#dataset-versions).
### How can I prepare high-resolution DOTA images for training?
DOTA images, which can be very large, are split into smaller resolutions for manageable training. Here's a Python snippet to split images:
!!! example
=== "Python"
```python
from ultralytics.data.split_dota import split_test, split_trainval
# split train and val set, with labels.
split_trainval(
data_root="path/to/DOTAv1.0/",
save_dir="path/to/DOTAv1.0-split/",
rates=[0.5, 1.0, 1.5], # multiscale
gap=500,
)
# split test set, without labels.
split_test(
data_root="path/to/DOTAv1.0/",
save_dir="path/to/DOTAv1.0-split/",
rates=[0.5, 1.0, 1.5], # multiscale
gap=500,
)
```
This process facilitates better training efficiency and model performance. For detailed instructions, visit the [split DOTA images section](#split-dota-images).

View File

@ -0,0 +1,124 @@
---
comments: true
description: Explore the DOTA8 dataset - a small, versatile oriented object detection dataset ideal for testing and debugging object detection models using Ultralytics YOLO11.
keywords: DOTA8 dataset, Ultralytics, YOLO11, object detection, debugging, training models, oriented object detection, dataset YAML
---
# DOTA8 Dataset
## Introduction
[Ultralytics](https://www.ultralytics.com/) DOTA8 is a small, but versatile oriented [object detection](https://www.ultralytics.com/glossary/object-detection) dataset composed of the first 8 images of the split DOTAv1 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics).
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the DOTA8 dataset, the `dota8.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/dota8.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/dota8.yaml).
!!! example "ultralytics/cfg/datasets/dota8.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/dota8.yaml"
```
## Usage
To train a YOLO11n-obb model on the DOTA8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-obb.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="dota8.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo obb train data=dota8.yaml model=yolo11n-obb.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
Here are some examples of images from the DOTA8 dataset, along with their corresponding annotations:
<img src="https://github.com/ultralytics/docs/releases/download/0/mosaiced-training-batch.avif" alt="Dataset sample image" width="800">
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the DOTA8 dataset and the benefits of using mosaicing during the training process.
## Citations and Acknowledgments
If you use the DOTA dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@article{9560031,
author={Ding, Jian and Xue, Nan and Xia, Gui-Song and Bai, Xiang and Yang, Wen and Yang, Michael and Belongie, Serge and Luo, Jiebo and Datcu, Mihai and Pelillo, Marcello and Zhang, Liangpei},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
title={Object Detection in Aerial Images: A Large-Scale Benchmark and Challenges},
year={2021},
volume={},
number={},
pages={1-1},
doi={10.1109/TPAMI.2021.3117983}
}
```
A special note of gratitude to the team behind the DOTA datasets for their commendable effort in curating this dataset. For an exhaustive understanding of the dataset and its nuances, please visit the [official DOTA website](https://captain-whu.github.io/DOTA/index.html).
## FAQ
### What is the DOTA8 dataset and how can it be used?
The DOTA8 dataset is a small, versatile oriented object detection dataset made up of the first 8 images from the DOTAv1 split set, with 4 images designated for training and 4 for validation. It's ideal for testing and debugging object detection models like Ultralytics YOLO11. Due to its manageable size and diversity, it helps in identifying pipeline errors and running sanity checks before deploying larger datasets. Learn more about object detection with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics).
### How do I train a YOLO11 model using the DOTA8 dataset?
To train a YOLO11n-obb model on the DOTA8 dataset for 100 epochs with an image size of 640, you can use the following code snippets. For comprehensive argument options, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-obb.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="dota8.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo obb train data=dota8.yaml model=yolo11n-obb.pt epochs=100 imgsz=640
```
### What are the key features of the DOTA dataset and where can I access the YAML file?
The DOTA dataset is known for its large-scale benchmark and the challenges it presents for object detection in aerial images. The DOTA8 subset is a smaller, manageable dataset ideal for initial tests. You can access the `dota8.yaml` file, which contains paths, classes, and configuration details, at this [GitHub link](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/dota8.yaml).
### How does mosaicing enhance model training with the DOTA8 dataset?
Mosaicing combines multiple images into one during training, increasing the variety of objects and contexts within each batch. This improves a model's ability to generalize to different object sizes, aspect ratios, and scenes. This technique can be visually demonstrated through a training batch composed of mosaiced DOTA8 dataset images, helping in robust model development. Explore more about mosaicing and training techniques on our [Training](../../modes/train.md) page.
### Why should I use Ultralytics YOLO11 for object detection tasks?
Ultralytics YOLO11 provides state-of-the-art real-time object detection capabilities, including features like oriented bounding boxes (OBB), [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), and a highly versatile training pipeline. It's suitable for various applications and offers pretrained models for efficient fine-tuning. Explore further about the advantages and usage in the [Ultralytics YOLO11 documentation](https://github.com/ultralytics/ultralytics).

View File

@ -0,0 +1,147 @@
---
comments: true
description: Discover OBB dataset formats for Ultralytics YOLO models. Learn about their structure, application, and format conversions to enhance your object detection training.
keywords: Oriented Bounding Box, OBB Datasets, YOLO, Ultralytics, Object Detection, Dataset Formats
---
# Oriented Bounding Box (OBB) Datasets Overview
Training a precise [object detection](https://www.ultralytics.com/glossary/object-detection) model with oriented bounding boxes (OBB) requires a thorough dataset. This guide explains the various OBB dataset formats compatible with Ultralytics YOLO models, offering insights into their structure, application, and methods for format conversions.
## Supported OBB Dataset Formats
### YOLO OBB Format
The YOLO OBB format designates bounding boxes by their four corner points with coordinates normalized between 0 and 1. It follows this format:
```bash
class_index x1 y1 x2 y2 x3 y3 x4 y4
```
Internally, YOLO processes losses and outputs in the `xywhr` format, which represents the [bounding box](https://www.ultralytics.com/glossary/bounding-box)'s center point (xy), width, height, and rotation.
<p align="center"><img width="800" src="https://github.com/ultralytics/docs/releases/download/0/obb-format-examples.avif" alt="OBB format examples"></p>
An example of a `*.txt` label file for the above image, which contains an object of class `0` in OBB format, could look like:
```bash
0 0.780811 0.743961 0.782371 0.74686 0.777691 0.752174 0.776131 0.749758
```
## Usage
To train a model using these OBB formats:
!!! example
=== "Python"
```python
from ultralytics import YOLO
# Create a new YOLO11n-OBB model from scratch
model = YOLO("yolo11n-obb.yaml")
# Train the model on the DOTAv1 dataset
results = model.train(data="DOTAv1.yaml", epochs=100, imgsz=1024)
```
=== "CLI"
```bash
# Train a new YOLO11n-OBB model on the DOTAv1 dataset
yolo obb train data=DOTAv1.yaml model=yolo11n-obb.pt epochs=100 imgsz=1024
```
## Supported Datasets
Currently, the following datasets with Oriented Bounding Boxes are supported:
- [DOTA-v1](dota-v2.md): The first version of the DOTA dataset, providing a comprehensive set of aerial images with oriented bounding boxes for object detection.
- [DOTA-v1.5](dota-v2.md): An intermediate version of the DOTA dataset, offering additional annotations and improvements over DOTA-v1 for enhanced object detection tasks.
- [DOTA-v2](dota-v2.md): DOTA (A Large-scale Dataset for Object Detection in Aerial Images) version 2, emphasizes detection from aerial perspectives and contains oriented bounding boxes with 1.7 million instances and 11,268 images.
- [DOTA8](dota8.md): A small, 8-image subset of the full DOTA dataset suitable for testing workflows and Continuous Integration (CI) checks of OBB training in the `ultralytics` repository.
### Incorporating your own OBB dataset
For those looking to introduce their own datasets with oriented bounding boxes, ensure compatibility with the "YOLO OBB format" mentioned above. Convert your annotations to this required format and detail the paths, classes, and class names in a corresponding YAML configuration file.
## Convert Label Formats
### DOTA Dataset Format to YOLO OBB Format
Transitioning labels from the DOTA dataset format to the YOLO OBB format can be achieved with this script:
!!! example
=== "Python"
```python
from ultralytics.data.converter import convert_dota_to_yolo_obb
convert_dota_to_yolo_obb("path/to/DOTA")
```
This conversion mechanism is instrumental for datasets in the DOTA format, ensuring alignment with the [Ultralytics YOLO](../../models/yolo11.md) OBB format.
It's imperative to validate the compatibility of the dataset with your model and adhere to the necessary format conventions. Properly structured datasets are pivotal for training efficient object detection models with oriented bounding boxes.
## FAQ
### What are Oriented Bounding Boxes (OBB) and how are they used in Ultralytics YOLO models?
Oriented Bounding Boxes (OBB) are a type of bounding box annotation where the box can be rotated to align more closely with the object being detected, rather than just being axis-aligned. This is particularly useful in aerial or satellite imagery where objects might not be aligned with the image axes. In [Ultralytics YOLO](../../tasks/obb.md) models, OBBs are represented by their four corner points in the YOLO OBB format. This allows for more accurate object detection since the bounding boxes can rotate to fit the objects better.
### How do I convert my existing DOTA dataset labels to YOLO OBB format for use with Ultralytics YOLO11?
You can convert DOTA dataset labels to YOLO OBB format using the [`convert_dota_to_yolo_obb`](../../reference/data/converter.md) function from Ultralytics. This conversion ensures compatibility with the Ultralytics YOLO models, enabling you to leverage the OBB capabilities for enhanced object detection. Here's a quick example:
```python
from ultralytics.data.converter import convert_dota_to_yolo_obb
convert_dota_to_yolo_obb("path/to/DOTA")
```
This script will reformat your DOTA annotations into a YOLO-compatible format.
### How do I train a YOLO11 model with oriented bounding boxes (OBB) on my dataset?
Training a YOLO11 model with OBBs involves ensuring your dataset is in the YOLO OBB format and then using the [Ultralytics API](../../usage/python.md) to train the model. Here's an example in both Python and CLI:
!!! example
=== "Python"
```python
from ultralytics import YOLO
# Create a new YOLO11n-OBB model from scratch
model = YOLO("yolo11n-obb.yaml")
# Train the model on the custom dataset
results = model.train(data="your_dataset.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Train a new YOLO11n-OBB model on the custom dataset
yolo obb train data=your_dataset.yaml model=yolo11n-obb.yaml epochs=100 imgsz=640
```
This ensures your model leverages the detailed OBB annotations for improved detection [accuracy](https://www.ultralytics.com/glossary/accuracy).
### What datasets are currently supported for OBB training in Ultralytics YOLO models?
Currently, Ultralytics supports the following datasets for OBB training:
- [DOTA-v1](dota-v2.md): The first version of the DOTA dataset, providing a comprehensive set of aerial images with oriented bounding boxes for object detection.
- [DOTA-v1.5](dota-v2.md): An intermediate version of the DOTA dataset, offering additional annotations and improvements over DOTA-v1 for enhanced object detection tasks.
- [DOTA-v2](dota-v2.md): This dataset includes 1.7 million instances with oriented bounding boxes and 11,268 images, primarily focusing on aerial object detection.
- [DOTA8](dota8.md): A smaller, 8-image subset of the DOTA dataset used for testing and [continuous integration](../../help/CI.md) (CI) checks.
These datasets are tailored for scenarios where OBBs offer a significant advantage, such as aerial and satellite image analysis.
### Can I use my own dataset with oriented bounding boxes for YOLO11 training, and if so, how?
Yes, you can use your own dataset with oriented bounding boxes for YOLO11 training. Ensure your dataset annotations are converted to the YOLO OBB format, which involves defining bounding boxes by their four corner points. You can then create a [YAML configuration file](../../usage/cfg.md) specifying the dataset paths, classes, and other necessary details. For more information on creating and configuring your datasets, refer to the [Supported Datasets](#supported-datasets) section.

View File

@ -0,0 +1,152 @@
---
comments: true
description: Explore the COCO-Pose dataset for advanced pose estimation. Learn about datasets, pretrained models, metrics, and applications for training with YOLO.
keywords: COCO-Pose, pose estimation, dataset, keypoints, COCO Keypoints 2017, YOLO, deep learning, computer vision
---
# COCO-Pose Dataset
The [COCO-Pose](https://cocodataset.org/#keypoints-2017) dataset is a specialized version of the COCO (Common Objects in Context) dataset, designed for pose estimation tasks. It leverages the COCO Keypoints 2017 images and labels to enable the training of models like YOLO for pose estimation tasks.
![Pose sample image](https://github.com/ultralytics/docs/releases/download/0/pose-sample-image.avif)
## COCO-Pose Pretrained Models
{% include "macros/yolo-pose-perf.md" %}
## Key Features
- COCO-Pose builds upon the COCO Keypoints 2017 dataset which contains 200K images labeled with keypoints for pose estimation tasks.
- The dataset supports 17 keypoints for human figures, facilitating detailed pose estimation.
- Like COCO, it provides standardized evaluation metrics, including Object Keypoint Similarity (OKS) for pose estimation tasks, making it suitable for comparing model performance.
## Dataset Structure
The COCO-Pose dataset is split into three subsets:
1. **Train2017**: This subset contains 56599 images from the COCO dataset, annotated for training pose estimation models.
2. **Val2017**: This subset has 2346 images used for validation purposes during model training.
3. **Test2017**: This subset consists of images used for testing and benchmarking the trained models. Ground truth annotations for this subset are not publicly available, and the results are submitted to the [COCO evaluation server](https://codalab.lisn.upsaclay.fr/competitions/7384) for performance evaluation.
## Applications
The COCO-Pose dataset is specifically used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in keypoint detection and pose estimation tasks, such as OpenPose. The dataset's large number of annotated images and standardized evaluation metrics make it an essential resource for [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) researchers and practitioners focused on pose estimation.
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO-Pose dataset, the `coco-pose.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco-pose.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco-pose.yaml).
!!! example "ultralytics/cfg/datasets/coco-pose.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/coco-pose.yaml"
```
## Usage
To train a YOLO11n-pose model on the COCO-Pose dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco-pose.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo pose train data=coco-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
The COCO-Pose dataset contains a diverse set of images with human figures annotated with keypoints. Here are some examples of images from the dataset, along with their corresponding annotations:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/mosaiced-training-batch-6.avif)
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the COCO-Pose dataset and the benefits of using mosaicing during the training process.
## Citations and Acknowledgments
If you use the COCO-Pose dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{lin2015microsoft,
title={Microsoft COCO: Common Objects in Context},
author={Tsung-Yi Lin and Michael Maire and Serge Belongie and Lubomir Bourdev and Ross Girshick and James Hays and Pietro Perona and Deva Ramanan and C. Lawrence Zitnick and Piotr Dollár},
year={2015},
eprint={1405.0312},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the COCO-Pose dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home).
## FAQ
### What is the COCO-Pose dataset and how is it used with Ultralytics YOLO for pose estimation?
The [COCO-Pose](https://cocodataset.org/#keypoints-2017) dataset is a specialized version of the COCO (Common Objects in Context) dataset designed for pose estimation tasks. It builds upon the COCO Keypoints 2017 images and annotations, allowing for the training of models like Ultralytics YOLO for detailed pose estimation. For instance, you can use the COCO-Pose dataset to train a YOLO11n-pose model by loading a pretrained model and training it with a YAML configuration. For training examples, refer to the [Training](../../modes/train.md) documentation.
### How can I train a YOLO11 model on the COCO-Pose dataset?
Training a YOLO11 model on the COCO-Pose dataset can be accomplished using either Python or CLI commands. For example, to train a YOLO11n-pose model for 100 epochs with an image size of 640, you can follow the steps below:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco-pose.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo pose train data=coco-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640
```
For more details on the training process and available arguments, check the [training page](../../modes/train.md).
### What are the different metrics provided by the COCO-Pose dataset for evaluating model performance?
The COCO-Pose dataset provides several standardized evaluation metrics for pose estimation tasks, similar to the original COCO dataset. Key metrics include the Object Keypoint Similarity (OKS), which evaluates the [accuracy](https://www.ultralytics.com/glossary/accuracy) of predicted keypoints against ground truth annotations. These metrics allow for thorough performance comparisons between different models. For instance, the COCO-Pose pretrained models such as YOLO11n-pose, YOLO11s-pose, and others have specific performance metrics listed in the documentation, like mAP<sup>pose</sup>50-95 and mAP<sup>pose</sup>50.
### How is the dataset structured and split for the COCO-Pose dataset?
The COCO-Pose dataset is split into three subsets:
1. **Train2017**: Contains 56599 COCO images, annotated for training pose estimation models.
2. **Val2017**: 2346 images for validation purposes during model training.
3. **Test2017**: Images used for testing and benchmarking trained models. Ground truth annotations for this subset are not publicly available; results are submitted to the [COCO evaluation server](https://codalab.lisn.upsaclay.fr/competitions/7403) for performance evaluation.
These subsets help organize the training, validation, and testing phases effectively. For configuration details, explore the `coco-pose.yaml` file available on [GitHub](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco-pose.yaml).
### What are the key features and applications of the COCO-Pose dataset?
The COCO-Pose dataset extends the COCO Keypoints 2017 annotations to include 17 keypoints for human figures, enabling detailed pose estimation. Standardized evaluation metrics (e.g., OKS) facilitate comparisons across different models. Applications of the COCO-Pose dataset span various domains, such as sports analytics, healthcare, and human-computer interaction, wherever detailed pose estimation of human figures is required. For practical use, leveraging pretrained models like those provided in the documentation (e.g., YOLO11n-pose) can significantly streamline the process ([Key Features](#key-features)).
If you use the COCO-Pose dataset in your research or development work, please cite the paper with the following [BibTeX entry](#citations-and-acknowledgments).

View File

@ -0,0 +1,131 @@
---
comments: true
description: Explore the compact, versatile COCO8-Pose dataset for testing and debugging object detection models. Ideal for quick experiments with YOLO11.
keywords: COCO8-Pose, Ultralytics, pose detection dataset, object detection, YOLO11, machine learning, computer vision, training data
---
# COCO8-Pose Dataset
## Introduction
[Ultralytics](https://www.ultralytics.com/) COCO8-Pose is a small, but versatile pose detection dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging [object detection](https://www.ultralytics.com/glossary/object-detection) models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics).
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO8-Pose dataset, the `coco8-pose.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-pose.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-pose.yaml).
!!! example "ultralytics/cfg/datasets/coco8-pose.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/coco8-pose.yaml"
```
## Usage
To train a YOLO11n-pose model on the COCO8-Pose dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco8-pose.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo pose train data=coco8-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
Here are some examples of images from the COCO8-Pose dataset, along with their corresponding annotations:
<img src="https://github.com/ultralytics/docs/releases/download/0/mosaiced-training-batch-5.avif" alt="Dataset sample image" width="800">
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the COCO8-Pose dataset and the benefits of using mosaicing during the training process.
## Citations and Acknowledgments
If you use the COCO dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{lin2015microsoft,
title={Microsoft COCO: Common Objects in Context},
author={Tsung-Yi Lin and Michael Maire and Serge Belongie and Lubomir Bourdev and Ross Girshick and James Hays and Pietro Perona and Deva Ramanan and C. Lawrence Zitnick and Piotr Dollár},
year={2015},
eprint={1405.0312},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home).
## FAQ
### What is the COCO8-Pose dataset, and how is it used with Ultralytics YOLO11?
The COCO8-Pose dataset is a small, versatile pose detection dataset that includes the first 8 images from the COCO train 2017 set, with 4 images for training and 4 for validation. It's designed for testing and debugging object detection models and experimenting with new detection approaches. This dataset is ideal for quick experiments with [Ultralytics YOLO11](../../models/yolo11.md). For more details on dataset configuration, check out the dataset YAML file [here](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-pose.yaml).
### How do I train a YOLO11 model using the COCO8-Pose dataset in Ultralytics?
To train a YOLO11n-pose model on the COCO8-Pose dataset for 100 epochs with an image size of 640, follow these examples:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-pose.pt")
# Train the model
results = model.train(data="coco8-pose.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
yolo pose train data=coco8-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640
```
For a comprehensive list of training arguments, refer to the model [Training](../../modes/train.md) page.
### What are the benefits of using the COCO8-Pose dataset?
The COCO8-Pose dataset offers several benefits:
- **Compact Size**: With only 8 images, it is easy to manage and perfect for quick experiments.
- **Diverse Data**: Despite its small size, it includes a variety of scenes, useful for thorough pipeline testing.
- **Error Debugging**: Ideal for identifying training errors and performing sanity checks before scaling up to larger datasets.
For more about its features and usage, see the [Dataset Introduction](#introduction) section.
### How does mosaicing benefit the YOLO11 training process using the COCO8-Pose dataset?
Mosaicing, demonstrated in the sample images of the COCO8-Pose dataset, combines multiple images into one, increasing the variety of objects and scenes within each training batch. This technique helps improve the model's ability to generalize across various object sizes, aspect ratios, and contexts, ultimately enhancing model performance. See the [Sample Images and Annotations](#sample-images-and-annotations) section for example images.
### Where can I find the COCO8-Pose dataset YAML file and how do I use it?
The COCO8-Pose dataset YAML file can be found [here](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-pose.yaml). This file defines the dataset configuration, including paths, classes, and other relevant information. Use this file with the YOLO11 training scripts as mentioned in the [Train Example](#how-do-i-train-a-yolo11-model-using-the-coco8-pose-dataset-in-ultralytics) section.
For more FAQs and detailed documentation, visit the [Ultralytics Documentation](https://docs.ultralytics.com/).

View File

@ -0,0 +1,150 @@
---
comments: true
description: Discover the Dog-Pose dataset for pose detection. Featuring 6,773 training and 1,703 test images, it's a robust dataset for training YOLO11 models.
keywords: Dog-Pose, Ultralytics, pose detection dataset, YOLO11, machine learning, computer vision, training data
---
# Dog-Pose Dataset
## Introduction
The [Ultralytics](https://www.ultralytics.com/) Dog-pose dataset is a high-quality and extensive dataset specifically curated for dog keypoint estimation. With 6,773 training images and 1,703 test images, this dataset provides a solid foundation for training robust pose estimation models. Each annotated image includes 24 keypoints with 3 dimensions per keypoint (x, y, visibility), making it a valuable resource for advanced research and development in computer vision.
<img src="https://github.com/ultralytics/docs/releases/download/0/ultralytics-dogs.avif" alt="Ultralytics Dog-pose display image" width="800">
This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics).
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It includes paths, keypoint details, and other relevant information. In the case of the Dog-pose dataset, The `dog-pose.yaml` is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/dog-pose.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/dog-pose.yaml).
!!! example "ultralytics/cfg/datasets/dog-pose.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/dog-pose.yaml"
```
## Usage
To train a YOLO11n-pose model on the Dog-pose dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="dog-pose.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo pose train data=dog-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
Here are some examples of images from the Dog-pose dataset, along with their corresponding annotations:
<img src="https://github.com/ultralytics/docs/releases/download/0/mosaiced-training-batch-2-dog-pose.avif" alt="Dataset sample image" width="800">
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the Dog-pose dataset and the benefits of using mosaicing during the training process.
## Citations and Acknowledgments
If you use the Dog-pose dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@inproceedings{khosla2011fgvc,
title={Novel dataset for Fine-Grained Image Categorization},
author={Aditya Khosla and Nityananda Jayadevaprakash and Bangpeng Yao and Li Fei-Fei},
booktitle={First Workshop on Fine-Grained Visual Categorization (FGVC), IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
year={2011}
}
@inproceedings{deng2009imagenet,
title={ImageNet: A Large-Scale Hierarchical Image Database},
author={Jia Deng and Wei Dong and Richard Socher and Li-Jia Li and Kai Li and Li Fei-Fei},
booktitle={IEEE Computer Vision and Pattern Recognition (CVPR)},
year={2009}
}
```
We would like to acknowledge the Stanford team for creating and maintaining this valuable resource for the [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) community. For more information about the Dog-pose dataset and its creators, visit the [Stanford Dogs Dataset website](http://vision.stanford.edu/aditya86/ImageNetDogs/).
## FAQ
### What is the Dog-pose dataset, and how is it used with Ultralytics YOLO11?
The Dog-Pose dataset features 6,773 training and 1,703 test images annotated with 24 keypoints for dog pose estimation. It's designed for training and validating models with [Ultralytics YOLO11](../../models/yolo11.md), supporting applications like animal behavior analysis, pet monitoring, and veterinary studies. The dataset's comprehensive annotations make it ideal for developing accurate pose estimation models for canines.
### How do I train a YOLO11 model using the Dog-pose dataset in Ultralytics?
To train a YOLO11n-pose model on the Dog-pose dataset for 100 epochs with an image size of 640, follow these examples:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-pose.pt")
# Train the model
results = model.train(data="dog-pose.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
yolo pose train data=dog-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640
```
For a comprehensive list of training arguments, refer to the model [Training](../../modes/train.md) page.
### What are the benefits of using the Dog-pose dataset?
The Dog-pose dataset offers several benefits:
**Large and Diverse Dataset**: With over 8,400 images, it provides substantial data covering a wide range of dog poses, breeds, and contexts, enabling robust model training and evaluation.
**Detailed Keypoint Annotations**: Each image includes 24 keypoints with 3 dimensions per keypoint (x, y, visibility), offering precise annotations for training accurate pose detection models.
**Real-World Scenarios**: Includes images from varied environments, enhancing the model's ability to generalize to real-world applications like [pet monitoring](https://www.ultralytics.com/blog/custom-training-ultralytics-yolo11-for-dog-pose-estimation) and behavior analysis.
**Transfer Learning Advantage**: The dataset works well with [transfer learning](https://www.ultralytics.com/blog/understanding-few-shot-zero-shot-and-transfer-learning) techniques, allowing models pre-trained on human pose datasets to adapt to dog-specific features.
For more about its features and usage, see the [Dataset Introduction](#introduction) section.
### How does mosaicing benefit the YOLO11 training process using the Dog-pose dataset?
Mosaicing, as illustrated in the sample images from the Dog-pose dataset, merges multiple images into a single composite, enriching the diversity of objects and scenes in each training batch. This technique offers several benefits:
- Increases the variety of dog poses, sizes, and backgrounds in each batch
- Improves the model's ability to detect dogs in different contexts and scales
- Enhances generalization by exposing the model to more diverse visual patterns
- Reduces overfitting by creating novel combinations of training examples
This approach leads to more robust models that perform better in real-world scenarios. For example images, refer to the [Sample Images and Annotations](#sample-images-and-annotations) section.
### Where can I find the Dog-pose dataset YAML file and how do I use it?
The Dog-pose dataset YAML file can be found [here](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/dog-pose.yaml). This file defines the dataset configuration, including paths, classes, keypoint details, and other relevant information. The YAML specifies 24 keypoints with 3 dimensions per keypoint, making it suitable for detailed pose estimation tasks.
To use this file with YOLO11 training scripts, simply reference it in your training command as shown in the [Usage](#usage) section. The dataset will be automatically downloaded when first used, making setup straightforward.
For more FAQs and detailed documentation, visit the [Ultralytics Documentation](https://docs.ultralytics.com/).

View File

@ -0,0 +1,186 @@
---
comments: true
description: Explore the hand keypoints estimation dataset for advanced pose estimation. Learn about datasets, pretrained models, metrics, and applications for training with YOLO.
keywords: Hand KeyPoints, pose estimation, dataset, keypoints, MediaPipe, YOLO, deep learning, computer vision
---
# Hand Keypoints Dataset
## Introduction
The hand-keypoints dataset contains 26,768 images of hands annotated with keypoints, making it suitable for training models like Ultralytics YOLO for pose estimation tasks. The annotations were generated using the Google MediaPipe library, ensuring high [accuracy](https://www.ultralytics.com/glossary/accuracy) and consistency, and the dataset is compatible with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) formats.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/fd6u1TW_AGY"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Hand Keypoints Estimation with Ultralytics YOLO11 | Human Hand Pose Estimation Tutorial
</p>
## Hand Landmarks
![Hand Landmarks](https://github.com/ultralytics/docs/releases/download/0/hand_landmarks.jpg)
## KeyPoints
The dataset includes keypoints for hand detection. The keypoints are annotated as follows:
1. Wrist
2. Thumb (4 points)
3. Index finger (4 points)
4. Middle finger (4 points)
5. Ring finger (4 points)
6. Little finger (4 points)
Each hand has a total of 21 keypoints.
## Key Features
- **Large Dataset**: 26,768 images with hand keypoint annotations.
- **YOLO11 Compatibility**: Ready for use with YOLO11 models.
- **21 Keypoints**: Detailed hand pose representation.
## Dataset Structure
The hand keypoint dataset is split into two subsets:
1. **Train**: This subset contains 18,776 images from the hand keypoints dataset, annotated for training pose estimation models.
2. **Val**: This subset contains 7,992 images that can be used for validation purposes during model training.
## Applications
Hand keypoints can be used for [gesture recognition](https://www.ultralytics.com/blog/enhancing-hand-keypoints-estimation-with-ultralytics-yolo11), [AR/VR controls](https://docs.ultralytics.com/tasks/pose/), robotic manipulation, and hand movement analysis in healthcare. They can also be applied in animation for motion capture and biometric authentication systems for security. The detailed tracking of finger positions enables precise interaction with virtual objects and touchless control interfaces.
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the Hand Keypoints dataset, the `hand-keypoints.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/hand-keypoints.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/hand-keypoints.yaml).
!!! example "ultralytics/cfg/datasets/hand-keypoints.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/hand-keypoints.yaml"
```
## Usage
To train a YOLO11n-pose model on the Hand Keypoints dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="hand-keypoints.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo pose train data=hand-keypoints.yaml model=yolo11n-pose.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
The Hand keypoints dataset contains a diverse set of images with human hands annotated with keypoints. Here are some examples of images from the dataset, along with their corresponding annotations:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/human-hand-pose.jpg)
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the Hand Keypoints dataset and the benefits of using mosaicing during the training process.
## Citations and Acknowledgments
If you use the hand-keypoints dataset in your research or development work, please acknowledge the following sources:
!!! quote ""
=== "Credits"
We would like to thank the following sources for providing the images used in this dataset:
- [11k Hands](https://sites.google.com/view/11khands)
- [2000 Hand Gestures](https://www.kaggle.com/datasets/ritikagiridhar/2000-hand-gestures)
- [Gesture Recognition](https://www.kaggle.com/datasets/imsparsh/gesture-recognition)
The images were collected and used under the respective licenses provided by each platform and are distributed under the [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-nc-sa/4.0/).
We would also like to acknowledge the creator of this dataset, [Rion Dsilva](https://www.linkedin.com/in/rion-dsilva-043464229/), for his great contribution to Vision AI research.
## FAQ
### How do I train a YOLO11 model on the Hand Keypoints dataset?
To train a YOLO11 model on the Hand Keypoints dataset, you can use either Python or the command line interface (CLI). Here's an example for training a YOLO11n-pose model for 100 epochs with an image size of 640:
!!! Example
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="hand-keypoints.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo pose train data=hand-keypoints.yaml model=yolo11n-pose.pt epochs=100 imgsz=640
```
For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
### What are the key features of the Hand Keypoints dataset?
The Hand Keypoints dataset is designed for advanced [pose estimation](https://docs.ultralytics.com/datasets/pose/) tasks and includes several key features:
- **Large Dataset**: Contains 26,768 images with hand keypoint annotations.
- **YOLO11 Compatibility**: Ready for use with YOLO11 models.
- **21 Keypoints**: Detailed hand pose representation, including wrist and finger joints.
For more details, you can explore the [Hand Keypoints Dataset](#introduction) section.
### What applications can benefit from using the Hand Keypoints dataset?
The Hand Keypoints dataset can be applied in various fields, including:
- **Gesture Recognition**: Enhancing human-computer interaction.
- **AR/VR Controls**: Improving user experience in augmented and virtual reality.
- **Robotic Manipulation**: Enabling precise control of robotic hands.
- **Healthcare**: Analyzing hand movements for medical diagnostics.
- **Animation**: Capturing motion for realistic animations.
- **Biometric Authentication**: Enhancing security systems.
For more information, refer to the [Applications](#applications) section.
### How is the Hand Keypoints dataset structured?
The Hand Keypoints dataset is divided into two subsets:
1. **Train**: Contains 18,776 images for training pose estimation models.
2. **Val**: Contains 7,992 images for validation purposes during model training.
This structure ensures a comprehensive training and validation process. For more details, see the [Dataset Structure](#dataset-structure) section.
### How do I use the dataset YAML file for training?
The dataset configuration is defined in a YAML file, which includes paths, classes, and other relevant information. The `hand-keypoints.yaml` file can be found at [hand-keypoints.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/hand-keypoints.yaml).
To use this YAML file for training, specify it in your training script or CLI command as shown in the training example above. For more details, refer to the [Dataset YAML](#dataset-yaml) section.

View File

@ -0,0 +1,230 @@
---
comments: true
description: Learn about Ultralytics YOLO format for pose estimation datasets, supported formats, COCO-Pose, COCO8-Pose, Tiger-Pose, and how to add your own dataset.
keywords: pose estimation, Ultralytics, YOLO format, COCO-Pose, COCO8-Pose, Tiger-Pose, dataset conversion, keypoints
---
# Pose Estimation Datasets Overview
## Supported Dataset Formats
### Ultralytics YOLO format
The dataset label format used for training YOLO pose models is as follows:
1. One text file per image: Each image in the dataset has a corresponding text file with the same name as the image file and the ".txt" extension.
2. One row per object: Each row in the text file corresponds to one object instance in the image.
3. Object information per row: Each row contains the following information about the object instance:
- Object class index: An integer representing the class of the object (e.g., 0 for person, 1 for car, etc.).
- Object center coordinates: The x and y coordinates of the center of the object, normalized to be between 0 and 1.
- Object width and height: The width and height of the object, normalized to be between 0 and 1.
- Object keypoint coordinates: The keypoints of the object, normalized to be between 0 and 1.
Here is an example of the label format for pose estimation task:
Format with Dim = 2
```
<class-index> <x> <y> <width> <height> <px1> <py1> <px2> <py2> ... <pxn> <pyn>
```
Format with Dim = 3
```
<class-index> <x> <y> <width> <height> <px1> <py1> <p1-visibility> <px2> <py2> <p2-visibility> <pxn> <pyn> <pn-visibility>
```
In this format, `<class-index>` is the index of the class for the object,`<x> <y> <width> <height>` are coordinates of [bounding box](https://www.ultralytics.com/glossary/bounding-box), and `<px1> <py1> <px2> <py2> ... <pxn> <pyn>` are the pixel coordinates of the keypoints. The coordinates are separated by spaces.
### Dataset YAML format
The Ultralytics framework uses a YAML file format to define the dataset and model configuration for training pose estimation models. Here is an example of the YAML format used for defining a pose dataset:
```yaml
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
path: ../datasets/coco8-pose # dataset root dir (absolute or relative; if relative, it's relative to default datasets_dir)
train: images/train # train images (relative to 'path') 4 images
val: images/val # val images (relative to 'path') 4 images
test: # test images (optional)
# Keypoints
kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
# Classes dictionary
names:
0: person
```
The `train` and `val` fields specify the paths to the directories containing the training and validation images, respectively.
`names` is a dictionary of class names. The order of the names should match the order of the object class indices in the YOLO dataset files.
(Optional) if the points are symmetric then need flip_idx, like left-right side of human or face. For example if we assume five keypoints of facial landmark: [left eye, right eye, nose, left mouth, right mouth], and the original index is [0, 1, 2, 3, 4], then flip_idx is [1, 0, 2, 4, 3] (just exchange the left-right index, i.e. 0-1 and 3-4, and do not modify others like nose in this example).
## Usage
!!! example
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco8-pose.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo pose train data=coco8-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640
```
## Supported Datasets
This section outlines the datasets that are compatible with Ultralytics YOLO format and can be used for training [pose estimation](https://docs.ultralytics.com/tasks/pose/) models:
### COCO-Pose
- **Description**: COCO-Pose is a large-scale [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and pose estimation dataset. It is a subset of the popular COCO dataset and focuses on human pose estimation. COCO-Pose includes multiple keypoints for each human instance.
- **Label Format**: Same as Ultralytics YOLO format as described above, with keypoints for human poses.
- **Number of Classes**: 1 (Human).
- **Keypoints**: 17 keypoints including nose, eyes, ears, shoulders, elbows, wrists, hips, knees, and ankles.
- **Usage**: Suitable for training human pose estimation models.
- **Additional Notes**: The dataset is rich and diverse, containing over 200k labeled images.
- [Read more about COCO-Pose](coco.md)
### COCO8-Pose
- **Description**: [Ultralytics](https://www.ultralytics.com/) COCO8-Pose is a small, but versatile pose detection dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation.
- **Label Format**: Same as Ultralytics YOLO format as described above, with keypoints for human poses.
- **Number of Classes**: 1 (Human).
- **Keypoints**: 17 keypoints including nose, eyes, ears, shoulders, elbows, wrists, hips, knees, and ankles.
- **Usage**: Suitable for testing and debugging object detection models, or for experimenting with new detection approaches.
- **Additional Notes**: COCO8-Pose is ideal for sanity checks and [CI checks](https://docs.ultralytics.com/help/CI/).
- [Read more about COCO8-Pose](coco8-pose.md)
### Tiger-Pose
- **Description**: [Ultralytics](https://www.ultralytics.com/) This animal pose dataset comprises 263 images sourced from a [YouTube Video](https://www.youtube.com/watch?v=MIBAT6BGE6U&pp=ygUbVGlnZXIgd2Fsa2luZyByZWZlcmVuY2UubXA0), with 210 images allocated for training and 53 for validation.
- **Label Format**: Same as Ultralytics YOLO format as described above, with 12 keypoints for animal pose and no visible dimension.
- **Number of Classes**: 1 (Tiger).
- **Keypoints**: 12 keypoints.
- **Usage**: Great for animal pose or any other pose that is not human-based.
- [Read more about Tiger-Pose](tiger-pose.md)
### Hand Keypoints
- **Description**: Hand keypoints pose dataset comprises nearly 26K images, with 18776 images allocated for training and 7992 for validation.
- **Label Format**: Same as Ultralytics YOLO format as described above, but with 21 keypoints for human hand and visible dimension.
- **Number of Classes**: 1 (Hand).
- **Keypoints**: 21 keypoints.
- **Usage**: Great for human hand pose estimation and [gesture recognition](https://www.ultralytics.com/blog/enhancing-hand-keypoints-estimation-with-ultralytics-yolo11).
- [Read more about Hand Keypoints](hand-keypoints.md)
### Dog-Pose
- **Description**: The Dog Pose dataset contains approximately 6,000 images, providing a diverse and extensive resource for training and validation of dog pose estimation models.
- **Label Format**: Follows the Ultralytics YOLO format, with annotations for multiple keypoints specific to dog anatomy.
- **Number of Classes**: 1 (Dog).
- **Keypoints**: Includes 24 keypoints tailored to dog poses, such as limbs, joints, and head positions.
- **Usage**: Ideal for training models to estimate dog poses in various scenarios, from research to [real-world applications](https://www.ultralytics.com/blog/custom-training-ultralytics-yolo11-for-dog-pose-estimation).
- [Read more about Dog-Pose](dog-pose.md)
### Adding your own dataset
If you have your own dataset and would like to use it for training pose estimation models with Ultralytics YOLO format, ensure that it follows the format specified above under "Ultralytics YOLO format". Convert your annotations to the required format and specify the paths, number of classes, and class names in the YAML configuration file.
### Conversion Tool
Ultralytics provides a convenient conversion tool to convert labels from the popular [COCO dataset](https://docs.ultralytics.com/datasets/detect/coco/) format to YOLO format:
!!! example
=== "Python"
```python
from ultralytics.data.converter import convert_coco
convert_coco(labels_dir="path/to/coco/annotations/", use_keypoints=True)
```
This conversion tool can be used to convert the COCO dataset or any dataset in the COCO format to the Ultralytics YOLO format. The `use_keypoints` parameter specifies whether to include keypoints (for pose estimation) in the converted labels.
## FAQ
### What is the Ultralytics YOLO format for pose estimation?
The Ultralytics YOLO format for pose estimation datasets involves labeling each image with a corresponding text file. Each row of the text file stores information about an object instance:
- Object class index
- Object center coordinates (normalized x and y)
- Object width and height (normalized)
- Object keypoint coordinates (normalized pxn and pyn)
For 2D poses, keypoints include pixel coordinates. For 3D, each keypoint also has a visibility flag. For more details, see [Ultralytics YOLO format](#ultralytics-yolo-format).
### How do I use the COCO-Pose dataset with Ultralytics YOLO?
To use the [COCO-Pose dataset](https://docs.ultralytics.com/datasets/pose/coco/) with Ultralytics YOLO:
1. Download the dataset and prepare your label files in the YOLO format.
2. Create a YAML configuration file specifying paths to training and validation images, keypoint shape, and class names.
3. Use the configuration file for training:
```python
from ultralytics import YOLO
model = YOLO("yolo11n-pose.pt") # load pretrained model
results = model.train(data="coco-pose.yaml", epochs=100, imgsz=640)
```
For more information, visit [COCO-Pose](coco.md) and [train](../../modes/train.md) sections.
### How can I add my own dataset for pose estimation in Ultralytics YOLO?
To add your dataset:
1. Convert your annotations to the Ultralytics YOLO format.
2. Create a YAML configuration file specifying the dataset paths, number of classes, and class names.
3. Use the configuration file to train your model:
```python
from ultralytics import YOLO
model = YOLO("yolo11n-pose.pt")
results = model.train(data="your-dataset.yaml", epochs=100, imgsz=640)
```
For complete steps, check the [Adding your own dataset](#adding-your-own-dataset) section.
### What is the purpose of the dataset YAML file in Ultralytics YOLO?
The dataset YAML file in Ultralytics YOLO defines the dataset and model configuration for training. It specifies paths to training, validation, and test images, keypoint shapes, class names, and other configuration options. This structured format helps streamline [dataset management](https://docs.ultralytics.com/datasets/explorer/) and model training. Here is an example YAML format:
```yaml
path: ../datasets/coco8-pose
train: images/train
val: images/val
names:
0: person
```
Read more about creating YAML configuration files in [Dataset YAML format](#dataset-yaml-format).
### How can I convert COCO dataset labels to Ultralytics YOLO format for pose estimation?
Ultralytics provides a conversion tool to convert COCO dataset labels to the YOLO format, including keypoint information:
```python
from ultralytics.data.converter import convert_coco
convert_coco(labels_dir="path/to/coco/annotations/", use_keypoints=True)
```
This tool helps seamlessly integrate COCO datasets into YOLO projects. For details, refer to the [Conversion Tool](#conversion-tool) section and the [data preprocessing guide](https://docs.ultralytics.com/guides/preprocessing_annotated_data/).

View File

@ -0,0 +1,164 @@
---
comments: true
description: Explore Ultralytics Tiger-Pose dataset with 263 diverse images. Ideal for testing, training, and refining pose estimation algorithms.
keywords: Ultralytics, Tiger-Pose, dataset, pose estimation, YOLO11, training data, machine learning, neural networks
---
# Tiger-Pose Dataset
## Introduction
[Ultralytics](https://www.ultralytics.com/) introduces the Tiger-Pose dataset, a versatile collection designed for pose estimation tasks. This dataset comprises 263 images sourced from a [YouTube Video](https://www.youtube.com/watch?v=MIBAT6BGE6U&pp=ygUbVGlnZXIgd2Fsa2luZyByZWZlcmVuY2UubXA0), with 210 images allocated for training and 53 for validation. It serves as an excellent resource for testing and troubleshooting pose estimation algorithms.
Despite its manageable size of 210 images, the Tiger-Pose dataset offers diversity, making it suitable for assessing training pipelines, identifying potential errors, and serving as a valuable preliminary step before working with larger datasets for [pose estimation](https://docs.ultralytics.com/tasks/pose/).
This dataset is intended for use with [Ultralytics HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics).
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/Gc6K5eKrTNQ"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Train YOLO11 Pose Model on Tiger-Pose Dataset Using Ultralytics HUB
</p>
## Dataset YAML
A YAML (Yet Another Markup Language) file serves as the means to specify the configuration details of a dataset. It encompasses crucial data such as file paths, class definitions, and other pertinent information. Specifically, for the `tiger-pose.yaml` file, you can check [Ultralytics Tiger-Pose Dataset Configuration File](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/tiger-pose.yaml).
!!! example "ultralytics/cfg/datasets/tiger-pose.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/tiger-pose.yaml"
```
## Usage
To train a YOLO11n-pose model on the Tiger-Pose dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="tiger-pose.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo task=pose mode=train data=tiger-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
Here are some examples of images from the Tiger-Pose dataset, along with their corresponding annotations:
<img src="https://github.com/ultralytics/docs/releases/download/0/mosaiced-training-batch-4.avif" alt="Dataset sample image" width="100%">
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the Tiger-Pose dataset and the benefits of using mosaicing during the training process.
## Inference Example
!!! example "Inference Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("path/to/best.pt") # load a tiger-pose trained model
# Run inference
results = model.predict(source="https://youtu.be/MIBAT6BGE6U", show=True)
```
=== "CLI"
```bash
# Run inference using a tiger-pose trained model
yolo task=pose mode=predict source="https://youtu.be/MIBAT6BGE6U" show=True model="path/to/best.pt"
```
## Citations and Acknowledgments
The dataset has been released available under the [AGPL-3.0 License](https://github.com/ultralytics/ultralytics/blob/main/LICENSE).
## FAQ
### What is the Ultralytics Tiger-Pose dataset used for?
The Ultralytics Tiger-Pose dataset is designed for pose estimation tasks, consisting of 263 images sourced from a [YouTube video](https://www.youtube.com/watch?v=MIBAT6BGE6U&pp=ygUbVGlnZXIgd2Fsa2luZyByZWZlcmVuY2UubXA0). The dataset is divided into 210 training images and 53 validation images. It is particularly useful for testing, training, and refining pose estimation algorithms using [Ultralytics HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics).
### How do I train a YOLO11 model on the Tiger-Pose dataset?
To train a YOLO11n-pose model on the Tiger-Pose dataset for 100 epochs with an image size of 640, use the following code snippets. For more details, visit the [Training](../../modes/train.md) page:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="tiger-pose.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo task=pose mode=train data=tiger-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640
```
### What configurations does the `tiger-pose.yaml` file include?
The `tiger-pose.yaml` file is used to specify the configuration details of the Tiger-Pose dataset. It includes crucial data such as file paths and class definitions. To see the exact configuration, you can check out the [Ultralytics Tiger-Pose Dataset Configuration File](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/tiger-pose.yaml).
### How can I run inference using a YOLO11 model trained on the Tiger-Pose dataset?
To perform inference using a YOLO11 model trained on the Tiger-Pose dataset, you can use the following code snippets. For a detailed guide, visit the [Prediction](../../modes/predict.md) page:
!!! example "Inference Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("path/to/best.pt") # load a tiger-pose trained model
# Run inference
results = model.predict(source="https://youtu.be/MIBAT6BGE6U", show=True)
```
=== "CLI"
```bash
# Run inference using a tiger-pose trained model
yolo task=pose mode=predict source="https://youtu.be/MIBAT6BGE6U" show=True model="path/to/best.pt"
```
### What are the benefits of using the Tiger-Pose dataset for pose estimation?
The Tiger-Pose dataset, despite its manageable size of 210 images for training, provides a diverse collection of images that are ideal for testing pose estimation pipelines. The dataset helps identify potential errors and acts as a preliminary step before working with larger datasets. Additionally, the dataset supports the training and refinement of pose estimation algorithms using advanced tools like [Ultralytics HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics), enhancing model performance and [accuracy](https://www.ultralytics.com/glossary/accuracy).

View File

@ -0,0 +1,163 @@
---
comments: true
description: Explore the Roboflow Carparts Segmentation Dataset for automotive AI applications. Enhance your segmentation models with rich, annotated data.
keywords: Carparts Segmentation Dataset, Roboflow, computer vision, automotive AI, vehicle maintenance, Ultralytics
---
# Roboflow Universe Carparts Segmentation Dataset
<a href="https://colab.research.google.com/github/ultralytics/notebooks/blob/main/notebooks/how-to-train-ultralytics-yolo-on-carparts-segmentation-dataset.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open Carparts Segmentation Dataset In Colab"></a>
The [Roboflow](https://roboflow.com/?ref=ultralytics) [Carparts Segmentation Dataset](https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm?ref=ultralytics) is a curated collection of images and videos designed for [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) applications, specifically focusing on segmentation tasks related to car parts. This dataset provides a diverse set of visuals captured from multiple perspectives, offering valuable annotated examples for training and testing segmentation models.
Whether you're working on automotive research, developing AI solutions for vehicle maintenance, or exploring computer vision applications, the Carparts Segmentation Dataset serves as a valuable resource for enhancing accuracy and efficiency in your projects.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/HATMPgLYAPU"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Carparts <a href="https://www.ultralytics.com/glossary/instance-segmentation">Instance Segmentation</a> with Ultralytics YOLO11
</p>
## Dataset Structure
The data distribution within the Carparts Segmentation Dataset is organized as outlined below:
- **Training set**: Includes 3156 images, each accompanied by its corresponding annotations.
- **Testing set**: Comprises 276 images, with each one paired with its respective annotations.
- **Validation set**: Consists of 401 images, each having corresponding annotations.
## Applications
Carparts Segmentation finds applications in [automotive quality control](https://docs.ultralytics.com/solutions/), auto repair, e-commerce cataloging, traffic monitoring, [autonomous vehicles](https://www.ultralytics.com/blog/ai-in-self-driving-cars), insurance processing, recycling, and smart city initiatives. It streamlines processes by accurately identifying and categorizing different vehicle components, contributing to efficiency and automation in various industries.
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the Carparts Segmentation dataset, the `carparts-seg.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/carparts-seg.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/carparts-seg.yaml).
!!! example "ultralytics/cfg/datasets/carparts-seg.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/carparts-seg.yaml"
```
## Usage
To train Ultralytics YOLO11n model on the Carparts Segmentation dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="carparts-seg.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo segment train data=carparts-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640
```
## Sample Data and Annotations
The Carparts Segmentation dataset includes a diverse array of images and videos taken from various perspectives. Below, you'll find examples of data from the dataset along with their corresponding annotations:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/dataset-sample-image.avif)
- This image illustrates [object segmentation](https://docs.ultralytics.com/tasks/segment/) within a sample, featuring annotated bounding boxes with masks surrounding identified objects. The dataset consists of a varied set of images captured in various locations, environments, and densities, serving as a comprehensive resource for crafting models specific to this task.
- This instance highlights the diversity and complexity inherent in the dataset, emphasizing the crucial role of [high-quality data](https://www.ultralytics.com/blog/the-importance-of-high-quality-computer-vision-datasets) in computer vision tasks, particularly in the realm of car parts segmentation.
## Citations and Acknowledgments
If you integrate the Carparts Segmentation dataset into your research or development projects, please make reference to the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{ car-seg-un1pm_dataset,
title = { car-seg Dataset },
type = { Open Source Dataset },
author = { Gianmarco Russo },
howpublished = { \url{ https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm } },
url = { https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm },
journal = { Roboflow Universe },
publisher = { Roboflow },
year = { 2023 },
month = { nov },
note = { visited on 2024-01-24 },
}
```
We extend our thanks to the [Roboflow](https://roboflow.com/?ref=ultralytics) team for their dedication in developing and managing the Carparts Segmentation dataset, a valuable resource for vehicle maintenance and research projects. For additional details about the Carparts Segmentation dataset and its creators, please visit the [CarParts Segmentation Dataset Page](https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm?ref=ultralytics).
## FAQ
### What is the Roboflow Carparts Segmentation Dataset?
The [Roboflow Carparts Segmentation Dataset](https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm?ref=ultralytics) is a curated collection of images and videos specifically designed for car part segmentation tasks in computer vision. This dataset includes a diverse range of visuals captured from multiple perspectives, making it an invaluable resource for training and testing [segmentation models](https://docs.ultralytics.com/tasks/segment/) for automotive applications.
### How can I use the Carparts Segmentation Dataset with Ultralytics YOLO11?
To train a YOLO11 model on the Carparts Segmentation dataset, you can follow these steps:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="carparts-seg.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo segment train data=carparts-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640
```
For more details, refer to the [Training](../../modes/train.md) documentation.
### What are some applications of Carparts Segmentation?
Carparts Segmentation can be widely applied in various fields such as:
- **Automotive quality control**
- **Auto repair and maintenance**
- **E-commerce cataloging**
- **Traffic monitoring**
- **Autonomous vehicles**
- **Insurance claim processing**
- **Recycling initiatives**
- **Smart city projects**
This segmentation helps in accurately identifying and categorizing different vehicle components, enhancing the efficiency and automation in these industries.
### Where can I find the dataset configuration file for Carparts Segmentation?
The dataset configuration file for the Carparts Segmentation dataset, `carparts-seg.yaml`, can be found at the following location: [carparts-seg.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/carparts-seg.yaml).
### Why should I use the Carparts Segmentation Dataset?
The Carparts Segmentation Dataset provides rich, annotated data essential for developing high-[accuracy](https://www.ultralytics.com/glossary/accuracy) segmentation models in automotive computer vision. This dataset's diversity and detailed annotations improve model training, making it ideal for applications like vehicle maintenance automation, enhancing vehicle safety systems, and supporting [autonomous driving technologies](https://www.ultralytics.com/blog/ai-in-self-driving-cars). Partnering with a robust dataset accelerates AI development and ensures better model performance.
For more details, visit the [CarParts Segmentation Dataset Page](https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm?ref=ultralytics).

View File

@ -0,0 +1,156 @@
---
comments: true
description: Explore the COCO-Seg dataset, an extension of COCO, with detailed segmentation annotations. Learn how to train YOLO models with COCO-Seg.
keywords: COCO-Seg, dataset, YOLO models, instance segmentation, object detection, COCO dataset, YOLO11, computer vision, Ultralytics, machine learning
---
# COCO-Seg Dataset
The [COCO-Seg](https://cocodataset.org/#home) dataset, an extension of the COCO (Common Objects in Context) dataset, is specially designed to aid research in object [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation). It uses the same images as COCO but introduces more detailed segmentation annotations. This dataset is a crucial resource for researchers and developers working on instance segmentation tasks, especially for training [Ultralytics YOLO](https://docs.ultralytics.com/models/) models.
## COCO-Seg Pretrained Models
{% include "macros/yolo-seg-perf.md" %}
## Key Features
- COCO-Seg retains the original 330K images from COCO.
- The dataset consists of the same 80 object categories found in the original COCO dataset.
- Annotations now include more detailed instance segmentation masks for each object in the images.
- COCO-Seg provides standardized evaluation metrics like [mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP) for object detection, and mean Average [Recall](https://www.ultralytics.com/glossary/recall) (mAR) for instance segmentation tasks, enabling effective comparison of model performance.
## Dataset Structure
The COCO-Seg dataset is partitioned into three subsets:
1. **Train2017**: This subset contains 118K images for training instance segmentation models.
2. **Val2017**: This subset includes 5K images used for validation purposes during model training.
3. **Test2017**: This subset encompasses 20K images used for testing and benchmarking the trained models. Ground truth annotations for this subset are not publicly available, and the results are submitted to the [COCO evaluation server](https://codalab.lisn.upsaclay.fr/competitions/7383) for performance evaluation.
## Applications
COCO-Seg is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in instance segmentation, such as the YOLO models. The large number of annotated images, the diversity of object categories, and the standardized evaluation metrics make it an indispensable resource for [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) researchers and practitioners.
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO-Seg dataset, the `coco.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml).
!!! example "ultralytics/cfg/datasets/coco.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/coco.yaml"
```
## Usage
To train a YOLO11n-seg model on the COCO-Seg dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo segment train data=coco.yaml model=yolo11n-seg.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
COCO-Seg, like its predecessor COCO, contains a diverse set of images with various object categories and complex scenes. However, COCO-Seg introduces more detailed instance segmentation masks for each object in the images. Here are some examples of images from the dataset, along with their corresponding instance segmentation masks:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/mosaiced-training-batch-3.avif)
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. [Mosaicing](https://docs.ultralytics.com/guides/hyperparameter-tuning/) is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This aids the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the COCO-Seg dataset and the benefits of using mosaicing during the training process.
## Citations and Acknowledgments
If you use the COCO-Seg dataset in your research or development work, please cite the original COCO paper and acknowledge the extension to COCO-Seg:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{lin2015microsoft,
title={Microsoft COCO: Common Objects in Context},
author={Tsung-Yi Lin and Michael Maire and Serge Belongie and Lubomir Bourdev and Ross Girshick and James Hays and Pietro Perona and Deva Ramanan and C. Lawrence Zitnick and Piotr Dollár},
year={2015},
eprint={1405.0312},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
We extend our thanks to the COCO Consortium for creating and maintaining this invaluable resource for the computer vision community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home).
## FAQ
### What is the COCO-Seg dataset and how does it differ from the original COCO dataset?
The [COCO-Seg](https://cocodataset.org/#home) dataset is an extension of the original COCO (Common Objects in Context) dataset, specifically designed for instance segmentation tasks. While it uses the same images as the COCO dataset, COCO-Seg includes more detailed segmentation annotations, making it a powerful resource for researchers and developers focusing on [object instance segmentation](https://docs.ultralytics.com/tasks/segment/).
### How can I train a YOLO11 model using the COCO-Seg dataset?
To train a YOLO11n-seg model on the COCO-Seg dataset for 100 epochs with an image size of 640, you can use the following code snippets. For a detailed list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo segment train data=coco.yaml model=yolo11n-seg.pt epochs=100 imgsz=640
```
### What are the key features of the COCO-Seg dataset?
The COCO-Seg dataset includes several key features:
- Retains the original 330K images from the COCO dataset.
- Annotates the same 80 object categories found in the original COCO.
- Provides more detailed instance segmentation masks for each object.
- Uses standardized evaluation metrics such as mean Average [Precision](https://www.ultralytics.com/glossary/precision) (mAP) for [object detection](https://www.ultralytics.com/glossary/object-detection) and mean Average Recall (mAR) for instance segmentation tasks.
### What pretrained models are available for COCO-Seg, and what are their performance metrics?
The COCO-Seg dataset supports multiple pretrained YOLO11 segmentation models with varying performance metrics. Here's a summary of the available models and their key metrics:
{% include "macros/yolo-seg-perf.md" %}
These models range from the lightweight YOLO11n-seg to the more powerful YOLO11x-seg, offering different trade-offs between speed and accuracy to suit various application requirements. For more information on model selection, visit the [Ultralytics models page](https://docs.ultralytics.com/models/).
### How is the COCO-Seg dataset structured and what subsets does it contain?
The COCO-Seg dataset is partitioned into three subsets for specific training and evaluation needs:
1. **Train2017**: Contains 118K images used primarily for training instance segmentation models.
2. **Val2017**: Comprises 5K images utilized for validation during the training process.
3. **Test2017**: Encompasses 20K images reserved for testing and benchmarking trained models. Note that ground truth annotations for this subset are not publicly available, and performance results are submitted to the [COCO evaluation server](https://codalab.lisn.upsaclay.fr/competitions/7383) for assessment.
For smaller experimentation needs, you might also consider using the [COCO8-seg dataset](https://docs.ultralytics.com/datasets/segment/coco8-seg/), which is a compact version containing just 8 images from the COCO train 2017 set.

View File

@ -0,0 +1,124 @@
---
comments: true
description: Discover the versatile and manageable COCO8-Seg dataset by Ultralytics, ideal for testing and debugging segmentation models or new detection approaches.
keywords: COCO8-Seg, Ultralytics, segmentation dataset, YOLO11, COCO 2017, model training, computer vision, dataset configuration
---
# COCO8-Seg Dataset
## Introduction
[Ultralytics](https://www.ultralytics.com/) COCO8-Seg is a small, but versatile [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation) dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging segmentation models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics).
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO8-Seg dataset, the `coco8-seg.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-seg.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-seg.yaml).
!!! example "ultralytics/cfg/datasets/coco8-seg.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/coco8-seg.yaml"
```
## Usage
To train a YOLO11n-seg model on the COCO8-Seg dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco8-seg.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo segment train data=coco8-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
Here are some examples of images from the COCO8-Seg dataset, along with their corresponding annotations:
<img src="https://github.com/ultralytics/docs/releases/download/0/mosaiced-training-batch-2.avif" alt="Dataset sample image" width="800">
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the COCO8-Seg dataset and the benefits of using mosaicing during the training process.
## Citations and Acknowledgments
If you use the COCO dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{lin2015microsoft,
title={Microsoft COCO: Common Objects in Context},
author={Tsung-Yi Lin and Michael Maire and Serge Belongie and Lubomir Bourdev and Ross Girshick and James Hays and Pietro Perona and Deva Ramanan and C. Lawrence Zitnick and Piotr Dollár},
year={2015},
eprint={1405.0312},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home).
## FAQ
### What is the COCO8-Seg dataset, and how is it used in Ultralytics YOLO11?
The **COCO8-Seg dataset** is a compact instance segmentation dataset by Ultralytics, consisting of the first 8 images from the COCO train 2017 set—4 images for training and 4 for validation. This dataset is tailored for testing and debugging segmentation models or experimenting with new detection methods. It is particularly useful with Ultralytics [YOLO11](https://github.com/ultralytics/ultralytics) and [HUB](https://hub.ultralytics.com/) for rapid iteration and pipeline error-checking before scaling to larger datasets. For detailed usage, refer to the model [Training](../../modes/train.md) page.
### How can I train a YOLO11n-seg model using the COCO8-Seg dataset?
To train a **YOLO11n-seg** model on the COCO8-Seg dataset for 100 epochs with an image size of 640, you can use Python or CLI commands. Here's a quick example:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-seg.pt") # Load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco8-seg.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo segment train data=coco8-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640
```
For a thorough explanation of available arguments and configuration options, you can check the [Training](../../modes/train.md) documentation.
### Why is the COCO8-Seg dataset important for model development and debugging?
The **COCO8-Seg dataset** is ideal for its manageability and diversity within a small size. It consists of only 8 images, providing a quick way to test and debug segmentation models or new detection approaches without the overhead of larger datasets. This makes it an efficient tool for sanity checks and pipeline error identification before committing to extensive training on large datasets. Learn more about dataset formats [here](https://docs.ultralytics.com/datasets/segment/).
### Where can I find the YAML configuration file for the COCO8-Seg dataset?
The YAML configuration file for the **COCO8-Seg dataset** is available in the Ultralytics repository. You can access the file directly [here](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-seg.yaml). The YAML file includes essential information about dataset paths, classes, and configuration settings required for model training and validation.
### What are some benefits of using mosaicing during training with the COCO8-Seg dataset?
Using **mosaicing** during training helps increase the diversity and variety of objects and scenes in each training batch. This technique combines multiple images into a single composite image, enhancing the model's ability to generalize to different object sizes, aspect ratios, and contexts within the scene. Mosaicing is beneficial for improving a model's robustness and [accuracy](https://www.ultralytics.com/glossary/accuracy), especially when working with small datasets like COCO8-Seg. For an example of mosaiced images, see the [Sample Images and Annotations](#sample-images-and-annotations) section.

View File

@ -0,0 +1,172 @@
---
comments: true
description: Explore the extensive Roboflow Crack Segmentation Dataset, perfect for transportation and public safety studies or self-driving car model development.
keywords: Roboflow, Crack Segmentation Dataset, Ultralytics, transportation safety, public safety, self-driving cars, computer vision, road safety, infrastructure maintenance, dataset
---
# Roboflow Universe Crack Segmentation Dataset
<a href="https://colab.research.google.com/github/ultralytics/notebooks/blob/main/notebooks/how-to-train-ultralytics-yolo-on-crack-segmentation-dataset.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open Crack Segmentation Dataset In Colab"></a>
The [Roboflow](https://roboflow.com/?ref=ultralytics) [Crack Segmentation Dataset](https://universe.roboflow.com/university-bswxt/crack-bphdr?ref=ultralytics) stands out as an extensive resource designed specifically for individuals involved in transportation and public safety studies. It is equally beneficial for those working on the development of self-driving car models or simply exploring [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) applications for recreational purposes.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/C4mc40YKm-g"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Crack segmentation using Ultralytics YOLOv9
</p>
Comprising a total of 4029 static images captured from diverse road and wall scenarios, this dataset emerges as a valuable asset for tasks related to crack segmentation. Whether you are delving into the intricacies of transportation research or seeking to enhance the [accuracy](https://www.ultralytics.com/glossary/accuracy) of your self-driving car models, this dataset provides a rich and varied collection of images to support your endeavors.
## Dataset Structure
The division of data within the Crack Segmentation Dataset is outlined as follows:
- **Training set**: Consists of 3717 images with corresponding annotations.
- **Testing set**: Comprises 112 images along with their respective annotations.
- **Validation set**: Includes 200 images with their corresponding annotations.
## Applications
Crack segmentation finds practical applications in [infrastructure maintenance](https://www.ultralytics.com/blog/using-ai-for-crack-detection-and-segmentation), aiding in the identification and assessment of structural damage. It also plays a crucial role in enhancing road safety by enabling automated systems to detect and address pavement cracks for timely repairs.
In industrial settings, crack detection using deep learning models like [Ultralytics YOLO](https://docs.ultralytics.com/models/) helps guarantee building integrity in construction, prevents costly downtimes in manufacturing, and makes road and pavement inspections safer and more effective. The ability to automatically identify and classify cracks based on severity levels allows maintenance teams to prioritize repairs efficiently.
## Dataset YAML
A YAML (Yet Another Markup Language) file is employed to outline the configuration of the dataset, encompassing details about paths, classes, and other pertinent information. Specifically, for the Crack Segmentation dataset, the `crack-seg.yaml` file is managed and accessible at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/crack-seg.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/crack-seg.yaml).
!!! example "ultralytics/cfg/datasets/crack-seg.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/crack-seg.yaml"
```
## Usage
To train Ultralytics YOLO11n model on the Crack Segmentation dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="crack-seg.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo segment train data=crack-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640
```
## Sample Data and Annotations
The Crack Segmentation dataset comprises a varied collection of images and videos captured from multiple perspectives. Below are instances of data from the dataset, accompanied by their respective annotations:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/crack-segmentation-sample.avif)
- This image presents an example of image [instance segmentation](https://docs.ultralytics.com/guides/instance-segmentation-and-tracking/), featuring annotated bounding boxes with masks outlining identified objects. The dataset includes a diverse array of images taken in different locations, environments, and densities, making it a comprehensive resource for developing models designed for this particular task.
- The example underscores the diversity and complexity found in the Crack segmentation dataset, emphasizing the crucial role of high-quality data in computer vision tasks.
## Citations and Acknowledgments
If you incorporate the crack segmentation dataset into your research or development endeavors, kindly reference the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{ crack-bphdr_dataset,
title = { crack Dataset },
type = { Open Source Dataset },
author = { University },
howpublished = { \url{ https://universe.roboflow.com/university-bswxt/crack-bphdr } },
url = { https://universe.roboflow.com/university-bswxt/crack-bphdr },
journal = { Roboflow Universe },
publisher = { Roboflow },
year = { 2022 },
month = { dec },
note = { visited on 2024-01-23 },
}
```
We would like to acknowledge the Roboflow team for creating and maintaining the Crack Segmentation dataset as a valuable resource for the road safety and research projects. For more information about the Crack segmentation dataset and its creators, visit the [Crack Segmentation Dataset Page](https://universe.roboflow.com/university-bswxt/crack-bphdr?ref=ultralytics).
## FAQ
### What is the Roboflow Crack Segmentation Dataset?
The [Roboflow Crack Segmentation Dataset](https://universe.roboflow.com/university-bswxt/crack-bphdr?ref=ultralytics) is a comprehensive collection of 4029 static images designed specifically for transportation and public safety studies. It is ideal for tasks such as self-driving car model development and infrastructure maintenance. The dataset includes training, testing, and validation sets, aiding in accurate crack detection and segmentation.
### How do I train a model using the Crack Segmentation Dataset with Ultralytics YOLO11?
To train an Ultralytics YOLO11 model on the Crack Segmentation dataset, use the following code snippets. Detailed instructions and further parameters can be found on the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="crack-seg.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo segment train data=crack-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640
```
### Why should I use the Crack Segmentation Dataset for my self-driving car project?
The Crack Segmentation Dataset is exceptionally suited for self-driving car projects due to its diverse collection of 4029 road and wall images, which provide a varied range of scenarios. This diversity enhances the accuracy and robustness of models trained for crack detection, crucial for maintaining road safety and ensuring timely infrastructure repairs. The dataset's comprehensive annotations make it ideal for [developing models](https://docs.ultralytics.com/guides/model-training-tips/) that can identify potential hazards on roadways.
### What unique features does Ultralytics YOLO offer for crack segmentation?
Ultralytics YOLO offers advanced real-time [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and classification capabilities that make it ideal for crack segmentation tasks. Its ability to handle large datasets and complex scenarios ensures high accuracy and efficiency. For example, the model [Training](../../modes/train.md), [Predict](../../modes/predict.md), and [Export](../../modes/export.md) modes cover comprehensive functionalities from training to deployment. Additionally, YOLO's [anchor-free detection](https://www.ultralytics.com/blog/benefits-ultralytics-yolo11-being-anchor-free-detector) approach improves performance on irregular shapes like cracks.
### How do I cite the Roboflow Crack Segmentation Dataset in my research paper?
If you incorporate the Crack Segmentation Dataset into your research, please use the following BibTeX reference:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{ crack-bphdr_dataset,
title = { crack Dataset },
type = { Open Source Dataset },
author = { University },
howpublished = { \url{ https://universe.roboflow.com/university-bswxt/crack-bphdr } },
url = { https://universe.roboflow.com/university-bswxt/crack-bphdr },
journal = { Roboflow Universe },
publisher = { Roboflow },
year = { 2022 },
month = { dec },
note = { visited on 2024-01-23 },
}
```
This citation format ensures proper accreditation to the creators of the dataset and acknowledges its use in your research.

View File

@ -0,0 +1,232 @@
---
comments: true
description: Explore the supported dataset formats for Ultralytics YOLO and learn how to prepare and use datasets for training object segmentation models.
keywords: Ultralytics, YOLO, instance segmentation, dataset formats, auto-annotation, COCO, segmentation models, training data
---
# Instance Segmentation Datasets Overview
Instance segmentation is a computer vision task that involves identifying and delineating individual objects within an image. This guide provides an overview of dataset formats supported by Ultralytics YOLO for instance segmentation tasks, along with instructions on how to prepare, convert, and use these datasets for training your models.
## Supported Dataset Formats
### Ultralytics YOLO format
The dataset label format used for training YOLO segmentation models is as follows:
1. One text file per image: Each image in the dataset has a corresponding text file with the same name as the image file and the ".txt" extension.
2. One row per object: Each row in the text file corresponds to one object instance in the image.
3. Object information per row: Each row contains the following information about the object instance:
- Object class index: An integer representing the class of the object (e.g., 0 for person, 1 for car, etc.).
- Object bounding coordinates: The bounding coordinates around the mask area, normalized to be between 0 and 1.
The format for a single row in the segmentation dataset file is as follows:
```
<class-index> <x1> <y1> <x2> <y2> ... <xn> <yn>
```
In this format, `<class-index>` is the index of the class for the object, and `<x1> <y1> <x2> <y2> ... <xn> <yn>` are the bounding coordinates of the object's segmentation mask. The coordinates are separated by spaces.
Here is an example of the YOLO dataset format for a single image with two objects made up of a 3-point segment and a 5-point segment.
```
0 0.681 0.485 0.670 0.487 0.676 0.487
1 0.504 0.000 0.501 0.004 0.498 0.004 0.493 0.010 0.492 0.0104
```
!!! tip
- The length of each row does **not** have to be equal.
- Each segmentation label must have a **minimum of 3 xy points**: `<class-index> <x1> <y1> <x2> <y2> <x3> <y3>`
### Dataset YAML format
The Ultralytics framework uses a YAML file format to define the dataset and model configuration for training Detection Models. Here is an example of the YAML format used for defining a detection dataset:
```yaml
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
path: ../datasets/coco8-seg # dataset root dir (absolute or relative; if relative, it's relative to default datasets_dir)
train: images/train # train images (relative to 'path') 4 images
val: images/val # val images (relative to 'path') 4 images
test: # test images (optional)
# Classes (80 COCO classes)
names:
0: person
1: bicycle
2: car
# ...
77: teddy bear
78: hair drier
79: toothbrush
```
The `train` and `val` fields specify the paths to the directories containing the training and validation images, respectively.
`names` is a dictionary of class names. The order of the names should match the order of the object class indices in the YOLO dataset files.
## Usage
!!! example
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="coco8-seg.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo segment train data=coco8-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640
```
## Supported Datasets
Ultralytics YOLO supports various datasets for instance segmentation tasks. Here's a list of the most commonly used ones:
- [COCO](coco.md): A comprehensive dataset for [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and captioning, featuring over 200K labeled images across a wide range of categories.
- [COCO8-seg](coco8-seg.md): A compact, 8-image subset of COCO designed for quick testing of segmentation model training, ideal for CI checks and workflow validation in the `ultralytics` repository.
- [COCO128-seg](coco.md): A smaller dataset for [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation) tasks, containing a subset of 128 COCO images with segmentation annotations.
- [Carparts-seg](carparts-seg.md): A specialized dataset focused on the segmentation of car parts, ideal for automotive applications. It includes a variety of vehicles with detailed annotations of individual car components.
- [Crack-seg](crack-seg.md): A dataset tailored for the segmentation of cracks in various surfaces. Essential for infrastructure maintenance and quality control, it provides detailed imagery for training models to identify structural weaknesses.
- [Package-seg](package-seg.md): A dataset dedicated to the segmentation of different types of packaging materials and shapes. It's particularly useful for logistics and warehouse automation, aiding in the development of systems for package handling and sorting.
### Adding your own dataset
If you have your own dataset and would like to use it for training segmentation models with Ultralytics YOLO format, ensure that it follows the format specified above under "Ultralytics YOLO format". Convert your annotations to the required format and specify the paths, number of classes, and class names in the YAML configuration file.
## Port or Convert Label Formats
### COCO Dataset Format to YOLO Format
You can easily convert labels from the popular COCO dataset format to the YOLO format using the following code snippet:
!!! example
=== "Python"
```python
from ultralytics.data.converter import convert_coco
convert_coco(labels_dir="path/to/coco/annotations/", use_segments=True)
```
This conversion tool can be used to convert the COCO dataset or any dataset in the COCO format to the Ultralytics YOLO format.
Remember to double-check if the dataset you want to use is compatible with your model and follows the necessary format conventions. Properly formatted datasets are crucial for training successful object detection models.
## Auto-Annotation
Auto-annotation is an essential feature that allows you to generate a segmentation dataset using a pre-trained detection model. It enables you to quickly and accurately annotate a large number of images without the need for manual labeling, saving time and effort.
### Generate Segmentation Dataset Using a Detection Model
To auto-annotate your dataset using the Ultralytics framework, you can use the `auto_annotate` function as shown below:
!!! example
=== "Python"
```python
from ultralytics.data.annotator import auto_annotate
auto_annotate(data="path/to/images", det_model="yolo11x.pt", sam_model="sam_b.pt")
```
{% include "macros/sam-auto-annotate.md" %}
The `auto_annotate` function takes the path to your images, along with optional arguments for specifying the pre-trained detection models i.e. [YOLO11](../../models/yolo11.md), [YOLOv8](../../models/yolov8.md) or other [models](../../models/index.md) and segmentation models i.e, [SAM](../../models/sam.md), [SAM2](../../models/sam-2.md) or [MobileSAM](../../models/mobile-sam.md), the device to run the models on, and the output directory for saving the annotated results.
By leveraging the power of pre-trained models, auto-annotation can significantly reduce the time and effort required for creating high-quality segmentation datasets. This feature is particularly useful for researchers and developers working with large image collections, as it allows them to focus on model development and evaluation rather than manual annotation.
### Visualize Dataset Annotations
Before training your model, it's often helpful to visualize your dataset annotations to ensure they're correct. Ultralytics provides a utility function for this purpose:
```python
from ultralytics.data.utils import visualize_image_annotations
label_map = { # Define the label map with all annotated class labels.
0: "person",
1: "car",
}
# Visualize
visualize_image_annotations(
"path/to/image.jpg", # Input image path.
"path/to/annotations.txt", # Annotation file path for the image.
label_map,
)
```
This function draws bounding boxes, labels objects with class names, and adjusts text color for better readability, helping you identify and correct any annotation errors before training.
### Converting Segmentation Masks to YOLO Format
If you have segmentation masks in binary format, you can convert them to the YOLO segmentation format using:
```python
from ultralytics.data.converter import convert_segment_masks_to_yolo_seg
# For datasets like COCO with 80 classes
convert_segment_masks_to_yolo_seg(masks_dir="path/to/masks_dir", output_dir="path/to/output_dir", classes=80)
```
This utility converts binary mask images into the YOLO segmentation format and saves them in the specified output directory.
## FAQ
### What dataset formats does Ultralytics YOLO support for instance segmentation?
Ultralytics YOLO supports several dataset formats for instance segmentation, with the primary format being its own Ultralytics YOLO format. Each image in your dataset needs a corresponding text file with object information segmented into multiple rows (one row per object), listing the class index and normalized bounding coordinates. For more detailed instructions on the YOLO dataset format, visit the [Instance Segmentation Datasets Overview](#instance-segmentation-datasets-overview).
### How can I convert COCO dataset annotations to the YOLO format?
Converting COCO format annotations to YOLO format is straightforward using Ultralytics tools. You can use the `convert_coco` function from the `ultralytics.data.converter` module:
```python
from ultralytics.data.converter import convert_coco
convert_coco(labels_dir="path/to/coco/annotations/", use_segments=True)
```
This script converts your COCO dataset annotations to the required YOLO format, making it suitable for training your YOLO models. For more details, refer to [Port or Convert Label Formats](#coco-dataset-format-to-yolo-format).
### How do I prepare a YAML file for training Ultralytics YOLO models?
To prepare a YAML file for training YOLO models with Ultralytics, you need to define the dataset paths and class names. Here's an example YAML configuration:
```yaml
path: ../datasets/coco8-seg # dataset root dir
train: images/train # train images (relative to 'path')
val: images/val # val images (relative to 'path')
names:
0: person
1: bicycle
2: car
# ...
```
Ensure you update the paths and class names according to your dataset. For more information, check the [Dataset YAML Format](#dataset-yaml-format) section.
### What is the auto-annotation feature in Ultralytics YOLO?
Auto-annotation in Ultralytics YOLO allows you to generate segmentation annotations for your dataset using a pre-trained detection model. This significantly reduces the need for manual labeling. You can use the `auto_annotate` function as follows:
```python
from ultralytics.data.annotator import auto_annotate
auto_annotate(data="path/to/images", det_model="yolo11x.pt", sam_model="sam_b.pt") # or sam_model="mobile_sam.pt"
```
This function automates the annotation process, making it faster and more efficient. For more details, explore the [Auto-Annotate Reference](https://docs.ultralytics.com/reference/data/annotator/#ultralytics.data.annotator.auto_annotate).

View File

@ -0,0 +1,178 @@
---
comments: true
description: Explore the Roboflow Package Segmentation Dataset. Optimize logistics and enhance vision models with curated images for package identification and sorting.
keywords: Roboflow, Package Segmentation Dataset, computer vision, package identification, logistics, warehouse automation, segmentation models, training data
---
# Roboflow Universe Package Segmentation Dataset
<a href="https://colab.research.google.com/github/ultralytics/notebooks/blob/main/notebooks/how-to-train-ultralytics-yolo-on-package-segmentation-dataset.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open Package Segmentation Dataset In Colab"></a>
The [Roboflow](https://roboflow.com/?ref=ultralytics) [Package Segmentation Dataset](https://universe.roboflow.com/factorypackage/factory_package?ref=ultralytics) is a curated collection of images specifically tailored for tasks related to package segmentation in the field of [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv). This dataset is designed to assist researchers, developers, and enthusiasts working on projects related to package identification, sorting, and handling.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/im7xBCnPURg"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to do Package Segmentation using Ultralytics YOLO11 | Industrial Packages 🎉
</p>
Containing a diverse set of images showcasing various packages in different contexts and environments, the dataset serves as a valuable resource for training and evaluating segmentation models. Whether you are engaged in logistics, warehouse automation, or any application requiring precise package analysis, the Package Segmentation Dataset provides a targeted and comprehensive set of images to enhance the performance of your computer vision algorithms.
## Dataset Structure
The distribution of data in the Package Segmentation Dataset is structured as follows:
- **Training set**: Encompasses 1920 images accompanied by their corresponding annotations.
- **Testing set**: Consists of 89 images, each paired with its respective annotations.
- **Validation set**: Comprises 188 images, each with corresponding annotations.
## Applications
Package segmentation, facilitated by the Package Segmentation Dataset, is crucial for optimizing logistics, enhancing last-mile delivery, improving manufacturing quality control, and contributing to smart city solutions. From e-commerce to security applications, this dataset is a key resource, fostering innovation in computer vision for diverse and efficient package analysis applications.
### Smart Warehouses and Logistics
In modern warehouses, [vision AI solutions](https://www.ultralytics.com/solutions) can streamline operations by automating package identification and sorting. Computer vision models trained on this dataset can quickly detect and segment packages in real-time, even in challenging environments with dim lighting or cluttered spaces. This leads to faster processing times, reduced errors, and improved overall efficiency in logistics operations.
### Quality Control and Damage Detection
Package segmentation models can be used to identify damaged packages by analyzing their shape and appearance. By detecting irregularities or deformations in package outlines, these models help ensure that only intact packages proceed through the supply chain, reducing customer complaints and return rates.
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the Package Segmentation dataset, the `package-seg.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/package-seg.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/package-seg.yaml).
!!! example "ultralytics/cfg/datasets/package-seg.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/package-seg.yaml"
```
## Usage
To train Ultralytics YOLO11n model on the Package Segmentation dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="package-seg.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo segment train data=package-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640
```
## Sample Data and Annotations
The Package Segmentation dataset comprises a varied collection of images and videos captured from multiple perspectives. Below are instances of data from the dataset, accompanied by their respective annotations:
![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/dataset-sample-image-1.avif)
- This image displays an instance of image [object detection](https://www.ultralytics.com/glossary/object-detection), featuring annotated bounding boxes with masks outlining recognized objects. The dataset incorporates a diverse collection of images taken in different locations, environments, and densities. It serves as a comprehensive resource for developing models specific to this task.
- The example emphasizes the diversity and complexity present in the dataset, underscoring the significance of high-quality data for computer vision tasks involving package segmentation.
## Benefits of Using YOLO11 for Package Segmentation
[Ultralytics YOLO11](https://docs.ultralytics.com/models/yolo11/) offers several advantages for package segmentation tasks:
1. **Speed and Accuracy Balance**: YOLO11 achieves higher precision with 22% fewer parameters than YOLOv8m, making it ideal for real-time package detection in fast-paced logistics environments.
2. **Adaptability**: Models trained with YOLO11 can adapt to various warehouse conditions, from dim lighting to cluttered spaces.
3. **Scalability**: During peak periods like holiday seasons, YOLO11 models can efficiently scale to handle increased package volumes without compromising performance.
4. **Integration Capabilities**: YOLO11 can be easily integrated with existing warehouse management systems to create end-to-end automated solutions.
## Citations and Acknowledgments
If you integrate the Package Segmentation dataset into your research or development initiatives, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@misc{ factory_package_dataset,
title = { factory_package Dataset },
type = { Open Source Dataset },
author = { factorypackage },
howpublished = { \url{ https://universe.roboflow.com/factorypackage/factory_package } },
url = { https://universe.roboflow.com/factorypackage/factory_package },
journal = { Roboflow Universe },
publisher = { Roboflow },
year = { 2024 },
month = { jan },
note = { visited on 2024-01-24 },
}
```
We express our gratitude to the Roboflow team for their efforts in creating and maintaining the Package Segmentation dataset, a valuable asset for logistics and research projects. For additional details about the Package Segmentation dataset and its creators, please visit the [Package Segmentation Dataset Page](https://universe.roboflow.com/factorypackage/factory_package?ref=ultralytics).
## FAQ
### What is the Roboflow Package Segmentation Dataset and how can it help in computer vision projects?
The [Roboflow Package Segmentation Dataset](https://universe.roboflow.com/factorypackage/factory_package?ref=ultralytics) is a curated collection of images tailored for tasks involving package segmentation. It includes diverse images of packages in various contexts, making it invaluable for training and evaluating segmentation models. This dataset is particularly useful for applications in logistics, warehouse automation, and any project requiring precise package analysis. It helps optimize logistics and enhance vision models for accurate package identification and sorting.
### How do I train an Ultralytics YOLO11 model on the Package Segmentation Dataset?
You can train an Ultralytics YOLO11n model using both Python and CLI methods. Use the snippets below:
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-seg.pt") # load a pretrained model
# Train the model
results = model.train(data="package-seg.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo segment train data=package-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640
```
Refer to the model [Training](../../modes/train.md) page for more details.
### What are the components of the Package Segmentation Dataset, and how is it structured?
The dataset is structured into three main components:
- **Training set**: Contains 1920 images with annotations.
- **Testing set**: Comprises 89 images with corresponding annotations.
- **Validation set**: Includes 188 images with annotations.
This structure ensures a balanced dataset for thorough model training, validation, and testing, enhancing the performance of segmentation algorithms.
### Why should I use Ultralytics YOLO11 with the Package Segmentation Dataset?
Ultralytics YOLO11 provides state-of-the-art [accuracy](https://www.ultralytics.com/glossary/accuracy) and speed for real-time object detection and segmentation tasks. Using it with the Package Segmentation Dataset allows you to leverage YOLO11's capabilities for precise package segmentation. This combination is especially beneficial for industries like logistics and warehouse automation, where accurate package identification is critical. For more information, check out our [page on YOLO11 segmentation](https://docs.ultralytics.com/models/yolo11/).
### How can I access and use the package-seg.yaml file for the Package Segmentation Dataset?
The `package-seg.yaml` file is hosted on Ultralytics' GitHub repository and contains essential information about the dataset's paths, classes, and configuration. You can download it from [here](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/package-seg.yaml). This file is crucial for configuring your models to utilize the dataset efficiently.
For more insights and practical examples, explore our [Usage](https://docs.ultralytics.com/usage/python/) section.

View File

@ -0,0 +1,142 @@
---
comments: true
description: Learn how to use Multi-Object Tracking with YOLO. Explore dataset formats, tracking algorithms, and implementation examples using Python or CLI for real-time object tracking.
keywords: YOLO, Multi-Object Tracking, Tracking Datasets, Python Tracking Example, CLI Tracking Example, Object Detection, Ultralytics, AI, Machine Learning, BoT-SORT, ByteTrack
---
# Multi-object Tracking Datasets Overview
Multi-object tracking is a critical component in video analytics that identifies objects and maintains unique IDs for each detected object across video frames. Ultralytics YOLO provides powerful tracking capabilities that can be applied to various domains including surveillance, sports analytics, and traffic monitoring.
## Dataset Format (Coming Soon)
Multi-Object Detector doesn't need standalone training and directly supports pre-trained detection, segmentation or Pose models. Support for training trackers alone is coming soon.
## Available Trackers
Ultralytics YOLO supports the following tracking algorithms:
- [BoT-SORT](https://github.com/NirAharon/BoT-SORT) - Use `botsort.yaml` to enable this tracker (default)
- [ByteTrack](https://github.com/ifzhang/ByteTrack) - Use `bytetrack.yaml` to enable this tracker
## Usage
!!! example
=== "Python"
```python
from ultralytics import YOLO
model = YOLO("yolo11n.pt")
results = model.track(source="https://youtu.be/LNwODJXcvt4", conf=0.3, iou=0.5, show=True)
```
=== "CLI"
```bash
yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" conf=0.3, iou=0.5 show
```
## Persisting Tracks Between Frames
For continuous tracking across video frames, you can use the `persist=True` parameter:
!!! example
=== "Python"
```python
import cv2
from ultralytics import YOLO
# Load the YOLO model
model = YOLO("yolo11n.pt")
# Open the video file
cap = cv2.VideoCapture("path/to/video.mp4")
while cap.isOpened():
success, frame = cap.read()
if success:
# Run tracking with persistence between frames
results = model.track(frame, persist=True)
# Visualize the results
annotated_frame = results[0].plot()
cv2.imshow("Tracking", annotated_frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
else:
break
cap.release()
cv2.destroyAllWindows()
```
## FAQ
### How do I use Multi-Object Tracking with Ultralytics YOLO?
To use Multi-Object Tracking with Ultralytics YOLO, you can start by using the Python or CLI examples provided. Here is how you can get started:
!!! example
=== "Python"
```python
from ultralytics import YOLO
model = YOLO("yolo11n.pt") # Load the YOLO11 model
results = model.track(source="https://youtu.be/LNwODJXcvt4", conf=0.3, iou=0.5, show=True)
```
=== "CLI"
```bash
yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" conf=0.3 iou=0.5 show
```
These commands load the YOLO11 model and use it for tracking objects in the given video source with specific confidence (`conf`) and [Intersection over Union](https://www.ultralytics.com/glossary/intersection-over-union-iou) (`iou`) thresholds. For more details, refer to the [track mode documentation](../../modes/track.md).
### What are the upcoming features for training trackers in Ultralytics?
Ultralytics is continuously enhancing its AI models. An upcoming feature will enable the training of standalone trackers. Until then, Multi-Object Detector leverages pre-trained detection, segmentation, or Pose models for tracking without requiring standalone training. Stay updated by following our [blog](https://www.ultralytics.com/blog) or checking the [upcoming features](../../reference/trackers/track.md).
### Why should I use Ultralytics YOLO for multi-object tracking?
Ultralytics YOLO is a state-of-the-art [object detection](https://www.ultralytics.com/glossary/object-detection) model known for its real-time performance and high [accuracy](https://www.ultralytics.com/glossary/accuracy). Using YOLO for multi-object tracking provides several advantages:
- **Real-time tracking:** Achieve efficient and high-speed tracking ideal for dynamic environments.
- **Flexibility with pre-trained models:** No need to train from scratch; simply use pre-trained detection, segmentation, or Pose models.
- **Ease of use:** Simple API integration with both Python and CLI makes setting up tracking pipelines straightforward.
- **Extensive documentation and community support:** Ultralytics provides comprehensive documentation and an active community forum to troubleshoot issues and enhance your tracking models.
For more details on setting up and using YOLO for tracking, visit our [track usage guide](../../modes/track.md).
### Can I use custom datasets for multi-object tracking with Ultralytics YOLO?
Yes, you can use custom datasets for multi-object tracking with Ultralytics YOLO. While support for standalone tracker training is an upcoming feature, you can already use pre-trained models on your custom datasets. Prepare your datasets in the appropriate format compatible with YOLO and follow the documentation to integrate them.
### How do I interpret the results from the Ultralytics YOLO tracking model?
After running a tracking job with Ultralytics YOLO, the results include various data points such as tracked object IDs, their bounding boxes, and the confidence scores. Here's a brief overview of how to interpret these results:
- **Tracked IDs:** Each object is assigned a unique ID, which helps in tracking it across frames.
- **Bounding boxes:** These indicate the location of tracked objects within the frame.
- **Confidence scores:** These reflect the model's confidence in detecting the tracked object.
For detailed guidance on interpreting and visualizing these results, refer to the [results handling guide](../../reference/engine/results.md).
### How can I customize the tracker configuration?
You can customize the tracker by creating a modified version of the tracker configuration file. Copy an existing tracker config file from [ultralytics/cfg/trackers](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/trackers), modify the parameters as needed, and specify this file when running the tracker:
```python
from ultralytics import YOLO
model = YOLO("yolo11n.pt")
results = model.track(source="video.mp4", tracker="custom_tracker.yaml")
```

View File

@ -0,0 +1,330 @@
---
comments: true
description: Learn to create line graphs, bar plots, and pie charts using Python with guided instructions and code snippets. Maximize your data visualization skills!
keywords: Ultralytics, YOLO11, data visualization, line graphs, bar plots, pie charts, Python, analytics, tutorial, guide
---
# Analytics using Ultralytics YOLO11
## Introduction
This guide provides a comprehensive overview of three fundamental types of [data visualizations](https://www.ultralytics.com/glossary/data-visualization): line graphs, bar plots, and pie charts. Each section includes step-by-step instructions and code snippets on how to create these visualizations using Python.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/tVuLIMt4DMY"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to generate Analytical Graphs using Ultralytics | Line Graphs, Bar Plots, Area and Pie Charts
</p>
### Visual Samples
| Line Graph | Bar Plot | Pie Chart |
| :------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------: |
| ![Line Graph](https://github.com/ultralytics/docs/releases/download/0/line-graph.avif) | ![Bar Plot](https://github.com/ultralytics/docs/releases/download/0/bar-plot.avif) | ![Pie Chart](https://github.com/ultralytics/docs/releases/download/0/pie-chart.avif) |
### Why Graphs are Important
- Line graphs are ideal for tracking changes over short and long periods and for comparing changes for multiple groups over the same period.
- Bar plots, on the other hand, are suitable for comparing quantities across different categories and showing relationships between a category and its numerical value.
- Lastly, pie charts are effective for illustrating proportions among categories and showing parts of a whole.
!!! example "Analytics using Ultralytics YOLO"
=== "CLI"
```bash
yolo solutions analytics show=True
# Pass the source
yolo solutions analytics source="path/to/video/file.mp4"
# Generate the pie chart
yolo solutions analytics analytics_type="pie" show=True
# Generate the bar plots
yolo solutions analytics analytics_type="bar" show=True
# Generate the area plots
yolo solutions analytics analytics_type="area" show=True
```
=== "Python"
```python
import cv2
from ultralytics import solutions
cap = cv2.VideoCapture("Path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
# Video writer
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
out = cv2.VideoWriter(
"analytics_output.avi",
cv2.VideoWriter_fourcc(*"MJPG"),
fps,
(1280, 720), # this is fixed
)
# Initialize analytics object
analytics = solutions.Analytics(
show=True, # display the output
analytics_type="line", # pass the analytics type, could be "pie", "bar" or "area".
model="yolo11n.pt", # path to the YOLO11 model file
# classes=[0, 2], # display analytics for specific detection classes
)
# Process video
frame_count = 0
while cap.isOpened():
success, im0 = cap.read()
if success:
frame_count += 1
results = analytics(im0, frame_count) # update analytics graph every frame
# print(results) # access the output
out.write(results.plot_im) # write the video file
else:
break
cap.release()
out.release()
cv2.destroyAllWindows() # destroy all opened windows
```
### `Analytics` Arguments
Here's a table outlining the Analytics arguments:
{% from "macros/solutions-args.md" import param_table %}
{{ param_table(["model", "analytics_type"]) }}
You can also leverage different [`track`](../modes/track.md) arguments in the `Analytics` solution.
{% from "macros/track-args.md" import param_table %}
{{ param_table(["tracker", "conf", "iou", "classes", "verbose", "device"]) }}
Additionally, the following visualization arguments are supported:
{% from "macros/visualization-args.md" import param_table %}
{{ param_table(["show", "line_width"]) }}
## Conclusion
Understanding when and how to use different types of visualizations is crucial for effective data analysis. Line graphs, bar plots, and pie charts are fundamental tools that can help you convey your data's story more clearly and effectively. The Ultralytics YOLO11 Analytics solution provides a streamlined way to generate these visualizations from your [object detection](https://www.ultralytics.com/glossary/object-detection) and tracking results, making it easier to extract meaningful insights from your visual data.
## FAQ
### How do I create a line graph using Ultralytics YOLO11 Analytics?
To create a line graph using Ultralytics YOLO11 Analytics, follow these steps:
1. Load a YOLO11 model and open your video file.
2. Initialize the `Analytics` class with the type set to "line."
3. Iterate through video frames, updating the line graph with relevant data, such as object counts per frame.
4. Save the output video displaying the line graph.
Example:
```python
import cv2
from ultralytics import solutions
cap = cv2.VideoCapture("Path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
out = cv2.VideoWriter(
"ultralytics_analytics.avi",
cv2.VideoWriter_fourcc(*"MJPG"),
fps,
(1280, 720), # this is fixed
)
analytics = solutions.Analytics(
analytics_type="line",
show=True,
)
frame_count = 0
while cap.isOpened():
success, im0 = cap.read()
if success:
frame_count += 1
results = analytics(im0, frame_count) # update analytics graph every frame
out.write(results.plot_im) # write the video file
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
```
For further details on configuring the `Analytics` class, visit the [Analytics using Ultralytics YOLO11](#analytics-using-ultralytics-yolo11) section.
### What are the benefits of using Ultralytics YOLO11 for creating bar plots?
Using Ultralytics YOLO11 for creating bar plots offers several benefits:
1. **Real-time Data Visualization**: Seamlessly integrate [object detection](https://www.ultralytics.com/glossary/object-detection) results into bar plots for dynamic updates.
2. **Ease of Use**: Simple API and functions make it straightforward to implement and visualize data.
3. **Customization**: Customize titles, labels, colors, and more to fit your specific requirements.
4. **Efficiency**: Efficiently handle large amounts of data and update plots in real-time during video processing.
Use the following example to generate a bar plot:
```python
import cv2
from ultralytics import solutions
cap = cv2.VideoCapture("Path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
out = cv2.VideoWriter(
"ultralytics_analytics.avi",
cv2.VideoWriter_fourcc(*"MJPG"),
fps,
(1280, 720), # this is fixed
)
analytics = solutions.Analytics(
analytics_type="bar",
show=True,
)
frame_count = 0
while cap.isOpened():
success, im0 = cap.read()
if success:
frame_count += 1
results = analytics(im0, frame_count) # update analytics graph every frame
out.write(results.plot_im) # write the video file
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
```
To learn more, visit the [Bar Plot](#visual-samples) section in the guide.
### Why should I use Ultralytics YOLO11 for creating pie charts in my data visualization projects?
Ultralytics YOLO11 is an excellent choice for creating pie charts because:
1. **Integration with Object Detection**: Directly integrate object detection results into pie charts for immediate insights.
2. **User-Friendly API**: Simple to set up and use with minimal code.
3. **Customizable**: Various customization options for colors, labels, and more.
4. **Real-time Updates**: Handle and visualize data in real-time, which is ideal for video analytics projects.
Here's a quick example:
```python
import cv2
from ultralytics import solutions
cap = cv2.VideoCapture("Path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
out = cv2.VideoWriter(
"ultralytics_analytics.avi",
cv2.VideoWriter_fourcc(*"MJPG"),
fps,
(1280, 720), # this is fixed
)
analytics = solutions.Analytics(
analytics_type="pie",
show=True,
)
frame_count = 0
while cap.isOpened():
success, im0 = cap.read()
if success:
frame_count += 1
results = analytics(im0, frame_count) # update analytics graph every frame
out.write(results.plot_im) # write the video file
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
```
For more information, refer to the [Pie Chart](#visual-samples) section in the guide.
### Can Ultralytics YOLO11 be used to track objects and dynamically update visualizations?
Yes, Ultralytics YOLO11 can be used to track objects and dynamically update visualizations. It supports tracking multiple objects in real-time and can update various visualizations like line graphs, bar plots, and pie charts based on the tracked objects' data.
Example for tracking and updating a line graph:
```python
import cv2
from ultralytics import solutions
cap = cv2.VideoCapture("Path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
out = cv2.VideoWriter(
"ultralytics_analytics.avi",
cv2.VideoWriter_fourcc(*"MJPG"),
fps,
(1280, 720), # this is fixed
)
analytics = solutions.Analytics(
analytics_type="line",
show=True,
)
frame_count = 0
while cap.isOpened():
success, im0 = cap.read()
if success:
frame_count += 1
results = analytics(im0, frame_count) # update analytics graph every frame
out.write(results.plot_im) # write the video file
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
```
To learn about the complete functionality, see the [Tracking](../modes/track.md) section.
### What makes Ultralytics YOLO11 different from other object detection solutions like [OpenCV](https://www.ultralytics.com/glossary/opencv) and [TensorFlow](https://www.ultralytics.com/glossary/tensorflow)?
Ultralytics YOLO11 stands out from other object detection solutions like OpenCV and TensorFlow for multiple reasons:
1. **State-of-the-art [Accuracy](https://www.ultralytics.com/glossary/accuracy)**: YOLO11 provides superior accuracy in object detection, segmentation, and classification tasks.
2. **Ease of Use**: User-friendly API allows for quick implementation and integration without extensive coding.
3. **Real-time Performance**: Optimized for high-speed inference, suitable for real-time applications.
4. **Diverse Applications**: Supports various tasks including multi-object tracking, custom model training, and exporting to different formats like ONNX, TensorRT, and CoreML.
5. **Comprehensive Documentation**: Extensive [documentation](https://docs.ultralytics.com/) and [blog resources](https://www.ultralytics.com/blog) to guide users through every step.
For more detailed comparisons and use cases, explore our [Ultralytics Blog](https://www.ultralytics.com/blog/ai-use-cases-transforming-your-future).

View File

@ -0,0 +1,228 @@
---
comments: true
description: Learn how to run YOLO11 on AzureML. Quickstart instructions for terminal and notebooks to harness Azure's cloud computing for efficient model training.
keywords: YOLO11, AzureML, machine learning, cloud computing, quickstart, terminal, notebooks, model training, Python SDK, AI, Ultralytics
---
# YOLO11 🚀 on AzureML
## What is Azure?
[Azure](https://azure.microsoft.com/) is Microsoft's [cloud computing](https://www.ultralytics.com/glossary/cloud-computing) platform, designed to help organizations move their workloads to the cloud from on-premises data centers. With the full spectrum of cloud services including those for computing, databases, analytics, [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml), and networking, users can pick and choose from these services to develop and scale new applications, or run existing applications, in the public cloud.
## What is Azure Machine Learning (AzureML)?
Azure Machine Learning, commonly referred to as AzureML, is a fully managed cloud service that enables data scientists and developers to efficiently embed predictive analytics into their applications, helping organizations use massive data sets and bring all the benefits of the cloud to machine learning. AzureML offers a variety of services and capabilities aimed at making machine learning accessible, easy to use, and scalable. It provides capabilities like automated machine learning, drag-and-drop model training, as well as a robust Python SDK so that developers can make the most out of their machine learning models.
## How Does AzureML Benefit YOLO Users?
For users of YOLO (You Only Look Once), AzureML provides a robust, scalable, and efficient platform to both train and deploy machine learning models. Whether you are looking to run quick prototypes or scale up to handle more extensive data, AzureML's flexible and user-friendly environment offers various tools and services to fit your needs. You can leverage AzureML to:
- Easily manage large datasets and computational resources for training.
- Utilize built-in tools for data preprocessing, feature selection, and model training.
- Collaborate more efficiently with capabilities for MLOps (Machine Learning Operations), including but not limited to monitoring, auditing, and versioning of models and data.
In the subsequent sections, you will find a quickstart guide detailing how to run YOLO11 object detection models using AzureML, either from a compute terminal or a notebook.
## Prerequisites
Before you can get started, make sure you have access to an AzureML workspace. If you don't have one, you can create a new [AzureML workspace](https://learn.microsoft.com/azure/machine-learning/concept-workspace?view=azureml-api-2) by following Azure's official documentation. This workspace acts as a centralized place to manage all AzureML resources.
## Create a compute instance
From your AzureML workspace, select Compute > Compute instances > New, select the instance with the resources you need.
<p align="center">
<img width="1280" src="https://github.com/ultralytics/docs/releases/download/0/create-compute-arrow.avif" alt="Create Azure Compute Instance">
</p>
## Quickstart from Terminal
Start your compute and open a Terminal:
<p align="center">
<img width="480" src="https://github.com/ultralytics/docs/releases/download/0/open-terminal.avif" alt="Open Terminal">
</p>
### Create virtualenv
Create your conda virtualenv with your favorite python version and install pip in it:
Python 3.13.1 is having some issues with some dependencies in AzureML.
```bash
conda create --name yolo11env -y python=3.12
conda activate yolo11env
conda install pip -y
```
Install the required dependencies:
```bash
cd ultralytics
pip install -r requirements.txt
pip install ultralytics
pip install onnx>=1.12.0
```
### Perform YOLO11 tasks
Predict:
```bash
yolo predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg'
```
Train a detection model for 10 [epochs](https://www.ultralytics.com/glossary/epoch) with an initial learning_rate of 0.01:
```bash
yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01
```
You can find more [instructions to use the Ultralytics CLI here](../quickstart.md#use-ultralytics-with-cli).
## Quickstart from a Notebook
### Create a new IPython kernel
Open the compute Terminal.
<p align="center">
<img width="480" src="https://github.com/ultralytics/docs/releases/download/0/open-terminal.avif" alt="Open Terminal">
</p>
From your compute terminal, you need to create a new ipykernel (with a specific python version - because Python 3.13.1 is having some issues with some dependencies in AzureML) that will be used by your notebook to manage your dependencies:
```bash
conda create --name yolo11env -y python=3.12
conda activate yolo11env
conda install pip -y
conda install ipykernel -y
python -m ipykernel install --user --name yolo11env --display-name "yolo11env"
```
Close your terminal and create a new notebook. From your Notebook, you can select the new kernel.
Then you can open a Notebook cell and install the required dependencies:
```bash
%%bash
source activate yolo11env
cd ultralytics
pip install -r requirements.txt
pip install ultralytics
pip install onnx>=1.12.0
```
Note that we need to use the `source activate yolo11env` for all the %%bash cells, to make sure that the %%bash cell uses environment we want.
Run some predictions using the [Ultralytics CLI](../quickstart.md#use-ultralytics-with-cli):
```bash
%%bash
source activate yolo11env
yolo predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg'
```
Or with the [Ultralytics Python interface](../quickstart.md#use-ultralytics-with-python), for example to train the model:
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n.pt") # load an official YOLO11n model
# Use the model
model.train(data="coco8.yaml", epochs=3) # train the model
metrics = model.val() # evaluate model performance on the validation set
results = model("https://ultralytics.com/images/bus.jpg") # predict on an image
path = model.export(format="onnx") # export the model to ONNX format
```
You can use either the Ultralytics CLI or Python interface for running YOLO11 tasks, as described in the terminal section above.
By following these steps, you should be able to get YOLO11 running quickly on AzureML for quick trials. For more advanced uses, you may refer to the full AzureML documentation linked at the beginning of this guide.
## Explore More with AzureML
This guide serves as an introduction to get you up and running with YOLO11 on AzureML. However, it only scratches the surface of what AzureML can offer. To delve deeper and unlock the full potential of AzureML for your machine learning projects, consider exploring the following resources:
- [Create a Data Asset](https://learn.microsoft.com/azure/machine-learning/how-to-create-data-assets): Learn how to set up and manage your data assets effectively within the AzureML environment.
- [Initiate an AzureML Job](https://learn.microsoft.com/azure/machine-learning/how-to-train-model): Get a comprehensive understanding of how to kickstart your machine learning training jobs on AzureML.
- [Register a Model](https://learn.microsoft.com/azure/machine-learning/how-to-manage-models): Familiarize yourself with model management practices including registration, versioning, and deployment.
- [Train YOLO11 with AzureML Python SDK](https://medium.com/@ouphi/how-to-train-the-yolov8-model-with-azure-machine-learning-python-sdk-8268696be8ba): Explore a step-by-step guide on using the AzureML Python SDK to train your YOLO11 models.
- [Train YOLO11 with AzureML CLI](https://medium.com/@ouphi/how-to-train-the-yolov8-model-with-azureml-and-the-az-cli-73d3c870ba8e): Discover how to utilize the command-line interface for streamlined training and management of YOLO11 models on AzureML.
## FAQ
### How do I run YOLO11 on AzureML for model training?
Running YOLO11 on AzureML for model training involves several steps:
1. **Create a Compute Instance**: From your AzureML workspace, navigate to Compute > Compute instances > New, and select the required instance.
2. **Setup Environment**: Start your compute instance, open a terminal, and create a conda environment, and don't forget to set your python version (python 3.13.1 is not supported yet) :
```bash
conda create --name yolo11env -y python=3.12
conda activate yolo11env
conda install pip -y
pip install ultralytics onnx>=1.12.0
```
3. **Run YOLO11 Tasks**: Use the Ultralytics CLI to train your model:
```bash
yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01
```
For more details, you can refer to the [instructions to use the Ultralytics CLI](../quickstart.md#use-ultralytics-with-cli).
### What are the benefits of using AzureML for YOLO11 training?
AzureML provides a robust and efficient ecosystem for training YOLO11 models:
- **Scalability**: Easily scale your compute resources as your data and model complexity grows.
- **MLOps Integration**: Utilize features like versioning, monitoring, and auditing to streamline ML operations.
- **Collaboration**: Share and manage resources within teams, enhancing collaborative workflows.
These advantages make AzureML an ideal platform for projects ranging from quick prototypes to large-scale deployments. For more tips, check out [AzureML Jobs](https://learn.microsoft.com/azure/machine-learning/how-to-train-model).
### How do I troubleshoot common issues when running YOLO11 on AzureML?
Troubleshooting common issues with YOLO11 on AzureML can involve the following steps:
- **Dependency Issues**: Ensure all required packages are installed. Refer to the `requirements.txt` file for dependencies.
- **Environment Setup**: Verify that your conda environment is correctly activated before running commands.
- **Resource Allocation**: Make sure your compute instances have sufficient resources to handle the training workload.
For additional guidance, review our [YOLO Common Issues](https://docs.ultralytics.com/guides/yolo-common-issues/) documentation.
### Can I use both the Ultralytics CLI and Python interface on AzureML?
Yes, AzureML allows you to use both the Ultralytics CLI and the Python interface seamlessly:
- **CLI**: Ideal for quick tasks and running standard scripts directly from the terminal.
```bash
yolo predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg'
```
- **Python Interface**: Useful for more complex tasks requiring custom coding and integration within notebooks.
```python
from ultralytics import YOLO
model = YOLO("yolo11n.pt")
model.train(data="coco8.yaml", epochs=3)
```
Refer to the quickstart guides for more detailed instructions [here](../quickstart.md#use-ultralytics-with-cli) and [here](../quickstart.md#use-ultralytics-with-python).
### What is the advantage of using Ultralytics YOLO11 over other [object detection](https://www.ultralytics.com/glossary/object-detection) models?
Ultralytics YOLO11 offers several unique advantages over competing object detection models:
- **Speed**: Faster inference and training times compared to models like Faster R-CNN and SSD.
- **[Accuracy](https://www.ultralytics.com/glossary/accuracy)**: High accuracy in detection tasks with features like anchor-free design and enhanced augmentation strategies.
- **Ease of Use**: Intuitive API and CLI for quick setup, making it accessible both to beginners and experts.
To explore more about YOLO11's features, visit the [Ultralytics YOLO](https://www.ultralytics.com/yolo) page for detailed insights.

View File

@ -0,0 +1,192 @@
---
comments: true
description: Learn to set up a Conda environment for Ultralytics projects. Follow our comprehensive guide for easy installation and initialization.
keywords: Ultralytics, Conda, setup, installation, environment, guide, machine learning, data science
---
# Conda Quickstart Guide for Ultralytics
<p align="center">
<img width="800" src="https://github.com/ultralytics/docs/releases/download/0/ultralytics-conda-package-visual.avif" alt="Ultralytics Conda Package Visual">
</p>
This guide provides a comprehensive introduction to setting up a Conda environment for your Ultralytics projects. Conda is an open-source package and environment management system that offers an excellent alternative to pip for installing packages and dependencies. Its isolated environments make it particularly well-suited for data science and [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) endeavors. For more details, visit the Ultralytics Conda package on [Anaconda](https://anaconda.org/conda-forge/ultralytics) and check out the Ultralytics feedstock repository for package updates on [GitHub](https://github.com/conda-forge/ultralytics-feedstock/).
[![Conda Version](https://img.shields.io/conda/vn/conda-forge/ultralytics?logo=condaforge)](https://anaconda.org/conda-forge/ultralytics)
[![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/ultralytics.svg)](https://anaconda.org/conda-forge/ultralytics)
[![Conda Recipe](https://img.shields.io/badge/recipe-ultralytics-green.svg)](https://anaconda.org/conda-forge/ultralytics)
[![Conda Platforms](https://img.shields.io/conda/pn/conda-forge/ultralytics.svg)](https://anaconda.org/conda-forge/ultralytics)
## What You Will Learn
- Setting up a Conda environment
- Installing Ultralytics via Conda
- Initializing Ultralytics in your environment
- Using Ultralytics Docker images with Conda
---
## Prerequisites
- You should have Anaconda or Miniconda installed on your system. If not, download and install it from [Anaconda](https://www.anaconda.com/) or [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/).
---
## Setting up a Conda Environment
First, let's create a new Conda environment. Open your terminal and run the following command:
```bash
conda create --name ultralytics-env python=3.11 -y
```
Activate the new environment:
```bash
conda activate ultralytics-env
```
---
## Installing Ultralytics
You can install the Ultralytics package from the conda-forge channel. Execute the following command:
```bash
conda install -c conda-forge ultralytics
```
### Note on CUDA Environment
If you're working in a CUDA-enabled environment, it's a good practice to install `ultralytics`, `pytorch`, and `pytorch-cuda` together to resolve any conflicts:
```bash
conda install -c pytorch -c nvidia -c conda-forge pytorch torchvision pytorch-cuda=11.8 ultralytics
```
---
## Using Ultralytics
With Ultralytics installed, you can now start using its robust features for [object detection](https://www.ultralytics.com/glossary/object-detection), [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), and more. For example, to predict an image, you can run:
```python
from ultralytics import YOLO
model = YOLO("yolo11n.pt") # initialize model
results = model("path/to/image.jpg") # perform inference
results[0].show() # display results for the first image
```
---
## Ultralytics Conda Docker Image
If you prefer using Docker, Ultralytics offers Docker images with a Conda environment included. You can pull these images from [DockerHub](https://hub.docker.com/r/ultralytics/ultralytics).
Pull the latest Ultralytics image:
```bash
# Set image name as a variable
t=ultralytics/ultralytics:latest-conda
# Pull the latest Ultralytics image from Docker Hub
sudo docker pull $t
```
Run the image:
```bash
# Run the Ultralytics image in a container with GPU support
sudo docker run -it --ipc=host --gpus all $t # all GPUs
sudo docker run -it --ipc=host --gpus '"device=2,3"' $t # specify GPUs
```
## Speeding Up Installation with Libmamba
If you're looking to [speed up the package installation](https://www.anaconda.com/blog/a-faster-conda-for-a-growing-community) process in Conda, you can opt to use `libmamba`, a fast, cross-platform, and dependency-aware package manager that serves as an alternative solver to Conda's default.
### How to Enable Libmamba
To enable `libmamba` as the solver for Conda, you can perform the following steps:
1. First, install the `conda-libmamba-solver` package. This can be skipped if your Conda version is 4.11 or above, as `libmamba` is included by default.
```bash
conda install conda-libmamba-solver
```
2. Next, configure Conda to use `libmamba` as the solver:
```bash
conda config --set solver libmamba
```
And that's it! Your Conda installation will now use `libmamba` as the solver, which should result in a faster package installation process.
---
Congratulations! You have successfully set up a Conda environment, installed the Ultralytics package, and are now ready to explore its rich functionalities. Feel free to dive deeper into the [Ultralytics documentation](../index.md) for more advanced tutorials and examples.
## FAQ
### What is the process for setting up a Conda environment for Ultralytics projects?
Setting up a Conda environment for Ultralytics projects is straightforward and ensures smooth package management. First, create a new Conda environment using the following command:
```bash
conda create --name ultralytics-env python=3.11 -y
```
Then, activate the new environment with:
```bash
conda activate ultralytics-env
```
Finally, install Ultralytics from the conda-forge channel:
```bash
conda install -c conda-forge ultralytics
```
### Why should I use Conda over pip for managing dependencies in Ultralytics projects?
Conda is a robust package and environment management system that offers several advantages over pip. It manages dependencies efficiently and ensures that all necessary libraries are compatible. Conda's isolated environments prevent conflicts between packages, which is crucial in data science and machine learning projects. Additionally, Conda supports binary package distribution, speeding up the installation process.
### Can I use Ultralytics YOLO in a CUDA-enabled environment for faster performance?
Yes, you can enhance performance by utilizing a CUDA-enabled environment. Ensure that you install `ultralytics`, `pytorch`, and `pytorch-cuda` together to avoid conflicts:
```bash
conda install -c pytorch -c nvidia -c conda-forge pytorch torchvision pytorch-cuda=11.8 ultralytics
```
This setup enables GPU acceleration, crucial for intensive tasks like [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) model training and inference. For more information, visit the [Ultralytics installation guide](../quickstart.md).
### What are the benefits of using Ultralytics Docker images with a Conda environment?
Using Ultralytics Docker images ensures a consistent and reproducible environment, eliminating "it works on my machine" issues. These images include a pre-configured Conda environment, simplifying the setup process. You can pull and run the latest Ultralytics Docker image with the following commands:
```bash
sudo docker pull ultralytics/ultralytics:latest-conda
sudo docker run -it --ipc=host --gpus all ultralytics/ultralytics:latest-conda
```
This approach is ideal for deploying applications in production or running complex workflows without manual configuration. Learn more about [Ultralytics Conda Docker Image](../quickstart.md).
### How can I speed up Conda package installation in my Ultralytics environment?
You can speed up the package installation process by using `libmamba`, a fast dependency solver for Conda. First, install the `conda-libmamba-solver` package:
```bash
conda install conda-libmamba-solver
```
Then configure Conda to use `libmamba` as the solver:
```bash
conda config --set solver libmamba
```
This setup provides faster and more efficient package management. For more tips on optimizing your environment, read about [libmamba installation](https://www.anaconda.com/blog/a-faster-conda-for-a-growing-community).

View File

@ -0,0 +1,286 @@
---
comments: true
description: Learn how to boost your Raspberry Pi's ML performance using Coral Edge TPU with Ultralytics YOLO11. Follow our detailed setup and installation guide.
keywords: Coral Edge TPU, Raspberry Pi, YOLO11, Ultralytics, TensorFlow Lite, ML inference, machine learning, AI, installation guide, setup tutorial
---
# Coral Edge TPU on a Raspberry Pi with Ultralytics YOLO11 🚀
<p align="center">
<img width="800" src="https://github.com/ultralytics/docs/releases/download/0/edge-tpu-usb-accelerator-and-pi.avif" alt="Raspberry Pi single board computer with USB Edge TPU accelerator">
</p>
## What is a Coral Edge TPU?
The Coral Edge TPU is a compact device that adds an Edge TPU coprocessor to your system. It enables low-power, high-performance ML inference for [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) Lite models. Read more at the [Coral Edge TPU home page](https://coral.ai/products/accelerator).
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/w4yHORvDBw0"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to Run Inference on Raspberry Pi using Google Coral Edge TPU
</p>
## Boost Raspberry Pi Model Performance with Coral Edge TPU
Many people want to run their models on an embedded or mobile device such as a Raspberry Pi, since they are very power efficient and can be used in many different applications. However, the inference performance on these devices is usually poor even when using formats like [ONNX](../integrations/onnx.md) or [OpenVINO](../integrations/openvino.md). The Coral Edge TPU is a great solution to this problem, since it can be used with a Raspberry Pi and accelerate inference performance greatly.
## Edge TPU on Raspberry Pi with TensorFlow Lite (New)⭐
The [existing guide](https://coral.ai/docs/accelerator/get-started/) by Coral on how to use the Edge TPU with a Raspberry Pi is outdated, and the current Coral Edge TPU runtime builds do not work with the current TensorFlow Lite runtime versions anymore. In addition to that, Google seems to have completely abandoned the Coral project, and there have not been any updates between 2021 and 2025. This guide will show you how to get the Edge TPU working with the latest versions of the TensorFlow Lite runtime and an updated Coral Edge TPU runtime on a Raspberry Pi single board computer (SBC).
## Prerequisites
- [Raspberry Pi 4B](https://www.raspberrypi.com/products/raspberry-pi-4-model-b/) (2GB or more recommended) or [Raspberry Pi 5](https://www.raspberrypi.com/products/raspberry-pi-5/) (Recommended)
- [Raspberry Pi OS](https://www.raspberrypi.com/software/) Bullseye/Bookworm (64-bit) with desktop (Recommended)
- [Coral USB Accelerator](https://coral.ai/products/accelerator/)
- A non-ARM based platform for exporting an Ultralytics [PyTorch](https://www.ultralytics.com/glossary/pytorch) model
## Installation Walkthrough
This guide assumes that you already have a working Raspberry Pi OS install and have installed `ultralytics` and all dependencies. To get `ultralytics` installed, visit the [quickstart guide](../quickstart.md) to get setup before continuing here.
### Installing the Edge TPU runtime
First, we need to install the Edge TPU runtime. There are many different versions available, so you need to choose the right version for your operating system.
The high frequency version runs the Edge TPU at a higher clock speed, which improves performance. However, it might result in the Edge TPU thermal throttling, so it is recommended to have some sort of cooling mechanism in place.
| Raspberry Pi OS | High frequency mode | Version to download |
| --------------- | :-----------------: | ------------------------------------------ |
| Bullseye 32bit | No | `libedgetpu1-std_ ... .bullseye_armhf.deb` |
| Bullseye 64bit | No | `libedgetpu1-std_ ... .bullseye_arm64.deb` |
| Bullseye 32bit | Yes | `libedgetpu1-max_ ... .bullseye_armhf.deb` |
| Bullseye 64bit | Yes | `libedgetpu1-max_ ... .bullseye_arm64.deb` |
| Bookworm 32bit | No | `libedgetpu1-std_ ... .bookworm_armhf.deb` |
| Bookworm 64bit | No | `libedgetpu1-std_ ... .bookworm_arm64.deb` |
| Bookworm 32bit | Yes | `libedgetpu1-max_ ... .bookworm_armhf.deb` |
| Bookworm 64bit | Yes | `libedgetpu1-max_ ... .bookworm_arm64.deb` |
[Download the latest version from here](https://github.com/feranick/libedgetpu/releases).
After downloading the file, you can install it with the following command:
```bash
sudo dpkg -i path/to/package.deb
```
After installing the runtime, you need to plug in your Coral Edge TPU into a USB 3.0 port on your Raspberry Pi. This is because, according to the official guide, a new `udev` rule needs to take effect after installation.
???+ warning "Important"
If you already have the Coral Edge TPU runtime installed, uninstall it using the following command.
```bash
# If you installed the standard version
sudo apt remove libedgetpu1-std
# If you installed the high frequency version
sudo apt remove libedgetpu1-max
```
## Export to Edge TPU
To use the Edge TPU, you need to convert your model into a compatible format. It is recommended that you run export on Google Colab, x86_64 Linux machine, using the official [Ultralytics Docker container](docker-quickstart.md), or using [Ultralytics HUB](../hub/quickstart.md), since the Edge TPU compiler is not available on ARM. See the [Export Mode](../modes/export.md) for the available arguments.
!!! example "Exporting the model"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("path/to/model.pt") # Load an official model or custom model
# Export the model
model.export(format="edgetpu")
```
=== "CLI"
```bash
yolo export model=path/to/model.pt format=edgetpu # Export an official model or custom model
```
The exported model will be saved in the `<model_name>_saved_model/` folder with the name `<model_name>_full_integer_quant_edgetpu.tflite`. It is important that your model ends with the suffix `_edgetpu.tflite`, otherwise ultralytics doesn't know that you're using an Edge TPU model.
## Running the model
Before you can actually run the model, you will need to install the correct libraries.
If `tensorflow` is installed, uninstall tensorflow with the following command:
```bash
pip uninstall tensorflow tensorflow-aarch64
```
Then install/update `tflite-runtime`:
```bash
pip install -U tflite-runtime
```
Now you can run inference using the following code:
!!! example "Running the model"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("path/to/<model_name>_full_integer_quant_edgetpu.tflite") # Load an official model or custom model
# Run Prediction
model.predict("path/to/source.png")
```
=== "CLI"
```bash
yolo predict model=path/to/<model_name>_full_integer_quant_edgetpu.tflite source=path/to/source.png # Load an official model or custom model
```
Find comprehensive information on the [Predict](../modes/predict.md) page for full prediction mode details.
!!! note "Inference with multiple Edge TPUs"
If you have multiple Edge TPUs you can use the following code to select a specific TPU.
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("path/to/<model_name>_full_integer_quant_edgetpu.tflite") # Load an official model or custom model
# Run Prediction
model.predict("path/to/source.png") # Inference defaults to the first TPU
model.predict("path/to/source.png", device="tpu:0") # Select the first TPU
model.predict("path/to/source.png", device="tpu:1") # Select the second TPU
```
## Benchmarks
!!! tip "Benchmarks"
Tested with Raspberry Pi Os Bookworm 64-Bit and a USB Coral Edge TPU.
!!! note
Shown is the inference time, pre-/postprocessing is not included.
=== "Raspberry Pi 4B 2GB"
| Image Size | Model | Standard Inference Time (ms) | High Frequency Inference Time (ms) |
|------------|---------|------------------------------|------------------------------------|
| 320 | YOLOv8n | 32.2 | 26.7 |
| 320 | YOLOv8s | 47.1 | 39.8 |
| 512 | YOLOv8n | 73.5 | 60.7 |
| 512 | YOLOv8s | 149.6 | 125.3 |
=== "Raspberry Pi 5 8GB"
| Image Size | Model | Standard Inference Time (ms) | High Frequency Inference Time (ms) |
|------------|---------|------------------------------|------------------------------------|
| 320 | YOLOv8n | 22.2 | 16.7 |
| 320 | YOLOv8s | 40.1 | 32.2 |
| 512 | YOLOv8n | 53.5 | 41.6 |
| 512 | YOLOv8s | 132.0 | 103.3 |
On average:
- The Raspberry Pi 5 is 22% faster with the standard mode than the Raspberry Pi 4B.
- The Raspberry Pi 5 is 30.2% faster with the high frequency mode than the Raspberry Pi 4B.
- The high frequency mode is 28.4% faster than the standard mode.
## FAQ
### What is a Coral Edge TPU and how does it enhance Raspberry Pi's performance with Ultralytics YOLO11?
The Coral Edge TPU is a compact device designed to add an Edge TPU coprocessor to your system. This coprocessor enables low-power, high-performance [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) inference, particularly optimized for TensorFlow Lite models. When using a Raspberry Pi, the Edge TPU accelerates ML model inference, significantly boosting performance, especially for Ultralytics YOLO11 models. You can read more about the Coral Edge TPU on their [home page](https://coral.ai/products/accelerator).
### How do I install the Coral Edge TPU runtime on a Raspberry Pi?
To install the Coral Edge TPU runtime on your Raspberry Pi, download the appropriate `.deb` package for your Raspberry Pi OS version from [this link](https://github.com/feranick/libedgetpu/releases). Once downloaded, use the following command to install it:
```bash
sudo dpkg -i path/to/package.deb
```
Make sure to uninstall any previous Coral Edge TPU runtime versions by following the steps outlined in the [Installation Walkthrough](#installation-walkthrough) section.
### Can I export my Ultralytics YOLO11 model to be compatible with Coral Edge TPU?
Yes, you can export your Ultralytics YOLO11 model to be compatible with the Coral Edge TPU. It is recommended to perform the export on Google Colab, an x86_64 Linux machine, or using the [Ultralytics Docker container](docker-quickstart.md). You can also use [Ultralytics HUB](../hub/quickstart.md) for exporting. Here is how you can export your model using Python and CLI:
!!! example "Exporting the model"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("path/to/model.pt") # Load an official model or custom model
# Export the model
model.export(format="edgetpu")
```
=== "CLI"
```bash
yolo export model=path/to/model.pt format=edgetpu # Export an official model or custom model
```
For more information, refer to the [Export Mode](../modes/export.md) documentation.
### What should I do if TensorFlow is already installed on my Raspberry Pi, but I want to use tflite-runtime instead?
If you have TensorFlow installed on your Raspberry Pi and need to switch to `tflite-runtime`, you'll need to uninstall TensorFlow first using:
```bash
pip uninstall tensorflow tensorflow-aarch64
```
Then, install or update `tflite-runtime` with the following command:
```bash
pip install -U tflite-runtime
```
For detailed instructions, refer to the [Running the Model](#running-the-model) section.
### How do I run inference with an exported YOLO11 model on a Raspberry Pi using the Coral Edge TPU?
After exporting your YOLO11 model to an Edge TPU-compatible format, you can run inference using the following code snippets:
!!! example "Running the model"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("path/to/edgetpu_model.tflite") # Load an official model or custom model
# Run Prediction
model.predict("path/to/source.png")
```
=== "CLI"
```bash
yolo predict model=path/to/edgetpu_model.tflite source=path/to/source.png # Load an official model or custom model
```
Comprehensive details on full prediction mode features can be found on the [Predict Page](../modes/predict.md).

View File

@ -0,0 +1,197 @@
---
comments: true
description: Data collection and annotation are vital steps in any computer vision project. Explore the tools, techniques, and best practices for collecting and annotating data.
keywords: What is Data Annotation, Data Annotation Tools, Annotating Data, Avoiding Bias in Data Collection, Ethical Data Collection, Annotation Strategies
---
# Data Collection and Annotation Strategies for Computer Vision
## Introduction
The key to success in any [computer vision project](./steps-of-a-cv-project.md) starts with effective data collection and annotation strategies. The quality of the data directly impacts model performance, so it's important to understand the best practices related to data collection and data annotation.
Every consideration regarding the data should closely align with [your project's goals](./defining-project-goals.md). Changes in your annotation strategies could shift the project's focus or effectiveness and vice versa. With this in mind, let's take a closer look at the best ways to approach data collection and annotation.
## Setting Up Classes and Collecting Data
Collecting images and video for a computer vision project involves defining the number of classes, sourcing data, and considering ethical implications. Before you start gathering your data, you need to be clear about:
### Choosing the Right Classes for Your Project
One of the first questions when starting a computer vision project is how many classes to include. You need to determine the class membership, which involves the different categories or labels that you want your model to recognize and differentiate. The number of classes should be determined by the specific goals of your project.
For example, if you want to monitor traffic, your classes might include "car," "truck," "bus," "motorcycle," and "bicycle." On the other hand, for tracking items in a store, your classes could be "fruits," "vegetables," "beverages," and "snacks." Defining classes based on your project goals helps keep your dataset relevant and focused.
When you define your classes, another important distinction to make is whether to choose coarse or fine class counts. 'Count' refers to the number of distinct classes you are interested in. This decision influences the granularity of your data and the complexity of your model. Here are the considerations for each approach:
- **Coarse Class-Count**: These are broader, more inclusive categories, such as "vehicle" and "non-vehicle." They simplify annotation and require fewer computational resources but provide less detailed information, potentially limiting the model's effectiveness in complex scenarios.
- **Fine Class-Count**: More categories with finer distinctions, such as "sedan," "SUV," "pickup truck," and "motorcycle." They capture more detailed information, improving model accuracy and performance. However, they are more time-consuming and labor-intensive to annotate and require more computational resources.
Starting with more specific classes can be very helpful, especially in complex projects where details are important. More specific classes lets you collect more detailed data, gain deeper insights, and establish clearer distinctions between categories. Not only does it improve the accuracy of the model, but it also makes it easier to adjust the model later if needed, saving both time and resources.
### Sources of Data
You can use public datasets or gather your own custom data. Public datasets like those on [Kaggle](https://www.kaggle.com/datasets) and [Google Dataset Search Engine](https://datasetsearch.research.google.com/) offer well-annotated, standardized data, making them great starting points for training and validating models.
Custom data collection, on the other hand, allows you to customize your dataset to your specific needs. You might capture images and videos with cameras or drones, scrape the web for images, or use existing internal data from your organization. Custom data gives you more control over its quality and relevance. Combining both public and custom data sources helps create a diverse and comprehensive dataset.
### Avoiding Bias in Data Collection
Bias occurs when certain groups or scenarios are underrepresented or overrepresented in your dataset. It leads to a model that performs well on some data but poorly on others. It's crucial to avoid [bias in AI](https://www.ultralytics.com/glossary/bias-in-ai) so that your computer vision model can perform well in a variety of scenarios.
Here is how you can avoid bias while collecting data:
- **Diverse Sources**: Collect data from many sources to capture different perspectives and scenarios.
- **Balanced Representation**: Include balanced representation from all relevant groups. For example, consider different ages, genders, and ethnicities.
- **Continuous Monitoring**: Regularly review and update your dataset to identify and address any emerging biases.
- **Bias Mitigation Techniques**: Use methods like oversampling underrepresented classes, [data augmentation](https://www.ultralytics.com/glossary/data-augmentation), and fairness-aware algorithms.
Following these practices helps create a more robust and fair model that can generalize well in real-world applications.
## What is Data Annotation?
Data annotation is the process of labeling data to make it usable for training [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models. In computer vision, this means labeling images or videos with the information that a model needs to learn from. Without properly annotated data, models cannot accurately learn the relationships between inputs and outputs.
### Types of Data Annotation
Depending on the specific requirements of a [computer vision task](../tasks/index.md), there are different types of data annotation. Here are some examples:
- **Bounding Boxes**: Rectangular boxes drawn around objects in an image, used primarily for object detection tasks. These boxes are defined by their top-left and bottom-right coordinates.
- **Polygons**: Detailed outlines for objects, allowing for more precise annotation than bounding boxes. Polygons are used in tasks like [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), where the shape of the object is important.
- **Masks**: Binary masks where each pixel is either part of an object or the background. Masks are used in [semantic segmentation](https://www.ultralytics.com/glossary/semantic-segmentation) tasks to provide pixel-level detail.
- **Keypoints**: Specific points marked within an image to identify locations of interest. Keypoints are used in tasks like [pose estimation](../tasks/pose.md) and facial landmark detection.
<p align="center">
<img width="100%" src="https://github.com/ultralytics/docs/releases/download/0/types-of-data-annotation.avif" alt="Types of Data Annotation">
</p>
### Common Annotation Formats
After selecting a type of annotation, it's important to choose the appropriate format for storing and sharing annotations.
Commonly used formats include [COCO](../datasets/detect/coco.md), which supports various annotation types like [object detection](https://www.ultralytics.com/glossary/object-detection), keypoint detection, stuff segmentation, [panoptic segmentation](https://www.ultralytics.com/glossary/panoptic-segmentation), and image captioning, stored in JSON. [Pascal VOC](../datasets/detect/voc.md) uses XML files and is popular for object detection tasks. YOLO, on the other hand, creates a .txt file for each image, containing annotations like object class, coordinates, height, and width, making it suitable for object detection.
### Techniques of Annotation
Now, assuming you've chosen a type of annotation and format, it's time to establish clear and objective labeling rules. These rules are like a roadmap for consistency and [accuracy](https://www.ultralytics.com/glossary/accuracy) throughout the annotation process. Key aspects of these rules include:
- **Clarity and Detail**: Make sure your instructions are clear. Use examples and illustrations to understand what's expected.
- **Consistency**: Keep your annotations uniform. Set standard criteria for annotating different types of data, so all annotations follow the same rules.
- **Reducing Bias**: Stay neutral. Train yourself to be objective and minimize personal biases to ensure fair annotations.
- **Efficiency**: Work smarter, not harder. Use tools and workflows that automate repetitive tasks, making the annotation process faster and more efficient.
Regularly reviewing and updating your labeling rules will help keep your annotations accurate, consistent, and aligned with your project goals.
### Popular Annotation Tools
Let's say you are ready to annotate now. There are several open-source tools available to help streamline the data annotation process. Here are some useful open annotation tools:
- **[Label Studio](https://github.com/HumanSignal/label-studio)**: A flexible tool that supports a wide range of annotation tasks and includes features for managing projects and quality control.
- **[CVAT](https://github.com/cvat-ai/cvat)**: A powerful tool that supports various annotation formats and customizable workflows, making it suitable for complex projects.
- **[Labelme](https://github.com/wkentaro/labelme)**: A simple and easy-to-use tool that allows for quick annotation of images with polygons, making it ideal for straightforward tasks.
- **[LabelImg](https://github.com/HumanSignal/labelImg)**: An easy-to-use graphical image annotation tool that's particularly good for creating bounding box annotations in YOLO format.
<p align="center">
<img width="100%" src="https://github.com/ultralytics/docs/releases/download/0/labelme-instance-segmentation-annotation.avif" alt="LabelMe Overview">
</p>
These open-source tools are budget-friendly and provide a range of features to meet different annotation needs.
### Some More Things to Consider Before Annotating Data
Before you dive into annotating your data, there are a few more things to keep in mind. You should be aware of accuracy, [precision](https://www.ultralytics.com/glossary/precision), outliers, and quality control to avoid labeling your data in a counterproductive manner.
#### Understanding Accuracy and Precision
It's important to understand the difference between accuracy and precision and how it relates to annotation. Accuracy refers to how close the annotated data is to the true values. It helps us measure how closely the labels reflect real-world scenarios. Precision indicates the consistency of annotations. It checks if you are giving the same label to the same object or feature throughout the dataset. High accuracy and precision lead to better-trained models by reducing noise and improving the model's ability to generalize from the [training data](https://www.ultralytics.com/glossary/training-data).
<p align="center">
<img width="100%" src="https://github.com/ultralytics/docs/releases/download/0/example-of-precision.avif" alt="Example of Precision">
</p>
#### Identifying Outliers
Outliers are data points that deviate quite a bit from other observations in the dataset. With respect to annotations, an outlier could be an incorrectly labeled image or an annotation that doesn't fit with the rest of the dataset. Outliers are concerning because they can distort the model's learning process, leading to inaccurate predictions and poor generalization.
You can use various methods to detect and correct outliers:
- **Statistical Techniques**: To detect outliers in numerical features like pixel values, [bounding box](https://www.ultralytics.com/glossary/bounding-box) coordinates, or object sizes, you can use methods such as box plots, histograms, or z-scores.
- **Visual Techniques**: To spot anomalies in categorical features like object classes, colors, or shapes, use visual methods like plotting images, labels, or heat maps.
- **Algorithmic Methods**: Use tools like clustering (e.g., K-means clustering, [DBSCAN](https://www.ultralytics.com/glossary/dbscan-density-based-spatial-clustering-of-applications-with-noise)) and [anomaly detection](https://www.ultralytics.com/glossary/anomaly-detection) algorithms to identify outliers based on data distribution patterns.
#### Quality Control of Annotated Data
Just like other technical projects, quality control is a must for annotated data. It is a good practice to regularly check annotations to make sure they are accurate and consistent. This can be done in a few different ways:
- Reviewing samples of annotated data
- Using automated tools to spot common errors
- Having another person double-check the annotations
If you are working with multiple people, consistency between different annotators is important. Good inter-annotator agreement means that the guidelines are clear and everyone is following them the same way. It keeps everyone on the same page and the annotations consistent.
While reviewing, if you find errors, correct them and update the guidelines to avoid future mistakes. Provide feedback to annotators and offer regular training to help reduce errors. Having a strong process for handling errors keeps your dataset accurate and reliable.
## Efficient Data Labeling Strategies
To make the process of data labeling smoother and more effective, consider implementing these strategies:
- **Clear Annotation Guidelines**: Provide detailed instructions with examples to ensure all annotators interpret tasks consistently. For instance, when labeling birds, specify whether to include the entire bird or just specific parts.
- **Regular Quality Checks**: Set benchmarks and use specific metrics to review work, maintaining high standards through continuous feedback.
- **Use Pre-annotation Tools**: Many modern annotation platforms offer AI-assisted pre-annotation features that can significantly speed up the process by automatically generating initial annotations that humans can then refine.
- **Implement Active Learning**: This approach prioritizes labeling the most informative samples first, which can reduce the total number of annotations needed while maintaining model performance.
- **Batch Processing**: Group similar images together for annotation to maintain consistency and improve efficiency.
These strategies can help maintain high-quality annotations while reducing the time and resources required for the labeling process.
## Share Your Thoughts with the Community
Bouncing your ideas and queries off other [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) enthusiasts can help accelerate your projects. Here are some great ways to learn, troubleshoot, and network:
### Where to Find Help and Support
- **GitHub Issues:** Visit the YOLO11 GitHub repository and use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to raise questions, report bugs, and suggest features. The community and maintainers are there to help with any issues you face.
- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to connect with other users and developers, get support, share knowledge, and brainstorm ideas.
### Official Documentation
- **Ultralytics YOLO11 Documentation:** Refer to the [official YOLO11 documentation](./index.md) for thorough guides and valuable insights on numerous computer vision tasks and projects.
## Conclusion
By following the best practices for collecting and annotating data, avoiding bias, and using the right tools and techniques, you can significantly improve your model's performance. Engaging with the community and using available resources will keep you informed and help you troubleshoot issues effectively. Remember, quality data is the foundation of a successful project, and the right strategies will help you build robust and reliable models.
## FAQ
### What is the best way to avoid bias in data collection for computer vision projects?
Avoiding bias in data collection ensures that your computer vision model performs well across various scenarios. To minimize bias, consider collecting data from diverse sources to capture different perspectives and scenarios. Ensure balanced representation among all relevant groups, such as different ages, genders, and ethnicities. Regularly review and update your dataset to identify and address any emerging biases. Techniques such as oversampling underrepresented classes, data augmentation, and fairness-aware algorithms can also help mitigate bias. By employing these strategies, you maintain a robust and fair dataset that enhances your model's generalization capability.
### How can I ensure high consistency and accuracy in data annotation?
Ensuring high consistency and accuracy in data annotation involves establishing clear and objective labeling guidelines. Your instructions should be detailed, with examples and illustrations to clarify expectations. Consistency is achieved by setting standard criteria for annotating various data types, ensuring all annotations follow the same rules. To reduce personal biases, train annotators to stay neutral and objective. Regular reviews and updates of labeling rules help maintain accuracy and alignment with project goals. Using automated tools to check for consistency and getting feedback from other annotators also contribute to maintaining high-quality annotations.
### How many images do I need for training Ultralytics YOLO models?
For effective [transfer learning](https://www.ultralytics.com/glossary/transfer-learning) and object detection with Ultralytics YOLO models, start with a minimum of a few hundred annotated objects per class. If training for just one class, begin with at least 100 annotated images and train for approximately 100 [epochs](https://www.ultralytics.com/glossary/epoch). More complex tasks might require thousands of images per class to achieve high reliability and performance. Quality annotations are crucial, so ensure your data collection and annotation processes are rigorous and aligned with your project's specific goals. Explore detailed training strategies in the [YOLO11 training guide](../modes/train.md).
### What are some popular tools for data annotation?
Several popular open-source tools can streamline the data annotation process:
- **[Label Studio](https://github.com/HumanSignal/label-studio)**: A flexible tool supporting various annotation tasks, project management, and quality control features.
- **[CVAT](https://www.cvat.ai/)**: Offers multiple annotation formats and customizable workflows, making it suitable for complex projects.
- **[Labelme](https://github.com/wkentaro/labelme)**: Ideal for quick and straightforward image annotation with polygons.
- **[LabelImg](https://github.com/HumanSignal/labelImg)**: Perfect for creating bounding box annotations in YOLO format with a simple interface.
These tools can help enhance the efficiency and accuracy of your annotation workflows. For extensive feature lists and guides, refer to our [data annotation tools documentation](../datasets/index.md).
### What types of data annotation are commonly used in computer vision?
Different types of data annotation cater to various computer vision tasks:
- **Bounding Boxes**: Used primarily for object detection, these are rectangular boxes around objects in an image.
- **Polygons**: Provide more precise object outlines suitable for instance segmentation tasks.
- **Masks**: Offer pixel-level detail, used in semantic segmentation to differentiate objects from the background.
- **Keypoints**: Identify specific points of interest within an image, useful for tasks like pose estimation and facial landmark detection.
Selecting the appropriate annotation type depends on your project's requirements. Learn more about how to implement these annotations and their formats in our [data annotation guide](#what-is-data-annotation).

View File

@ -0,0 +1,427 @@
---
comments: true
description: Learn how to deploy Ultralytics YOLO11 on NVIDIA Jetson devices using TensorRT and DeepStream SDK. Explore performance benchmarks and maximize AI capabilities.
keywords: Ultralytics, YOLO11, NVIDIA Jetson, JetPack, AI deployment, embedded systems, deep learning, TensorRT, DeepStream SDK, computer vision
---
# Ultralytics YOLO11 on NVIDIA Jetson using DeepStream SDK and TensorRT
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/wWmXKIteRLA"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to Run Multiple Streams with DeepStream SDK on Jetson Nano using Ultralytics YOLO11
</p>
This comprehensive guide provides a detailed walkthrough for deploying Ultralytics YOLO11 on [NVIDIA Jetson](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/) devices using DeepStream SDK and TensorRT. Here we use TensorRT to maximize the inference performance on the Jetson platform.
<img width="1024" src="https://github.com/ultralytics/docs/releases/download/0/deepstream-nvidia-jetson.avif" alt="DeepStream on NVIDIA Jetson">
!!! note
This guide has been tested with [NVIDIA Jetson Orin Nano Super Developer Kit](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/jetson-orin/nano-super-developer-kit) running the latest stable JetPack release of [JP6.1](https://developer.nvidia.com/embedded/jetpack-sdk-61),
[Seeed Studio reComputer J4012](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) which is based on NVIDIA Jetson Orin NX 16GB running JetPack release of [JP5.1.3](https://developer.nvidia.com/embedded/jetpack-sdk-513) and [Seeed Studio reComputer J1020 v2](https://www.seeedstudio.com/reComputer-J1020-v2-p-5498.html) which is based on NVIDIA Jetson Nano 4GB running JetPack release of [JP4.6.4](https://developer.nvidia.com/jetpack-sdk-464). It is expected to work across all the NVIDIA Jetson hardware lineup including latest and legacy.
## What is NVIDIA DeepStream?
[NVIDIA's DeepStream SDK](https://developer.nvidia.com/deepstream-sdk) is a complete streaming analytics toolkit based on GStreamer for AI-based multi-sensor processing, video, audio, and image understanding. It's ideal for vision AI developers, software partners, startups, and OEMs building IVA (Intelligent Video Analytics) apps and services. You can now create stream-processing pipelines that incorporate [neural networks](https://www.ultralytics.com/glossary/neural-network-nn) and other complex processing tasks like tracking, video encoding/decoding, and video rendering. These pipelines enable real-time analytics on video, image, and sensor data. DeepStream's multi-platform support gives you a faster, easier way to develop vision AI applications and services on-premise, at the edge, and in the cloud.
## Prerequisites
Before you start to follow this guide:
- Visit our documentation, [Quick Start Guide: NVIDIA Jetson with Ultralytics YOLO11](nvidia-jetson.md) to set up your NVIDIA Jetson device with Ultralytics YOLO11
- Install [DeepStream SDK](https://developer.nvidia.com/deepstream-getting-started) according to the JetPack version
- For JetPack 4.6.4, install [DeepStream 6.0.1](https://docs.nvidia.com/metropolis/deepstream/6.0.1/dev-guide/text/DS_Quickstart.html)
- For JetPack 5.1.3, install [DeepStream 6.3](https://docs.nvidia.com/metropolis/deepstream/6.3/dev-guide/text/DS_Quickstart.html)
- For JetPack 6.1, install [DeepStream 7.1](https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_Installation.html)
!!! tip
In this guide we have used the Debian package method of installing DeepStream SDK to the Jetson device. You can also visit the [DeepStream SDK on Jetson (Archived)](https://developer.nvidia.com/embedded/deepstream-on-jetson-downloads-archived) to access legacy versions of DeepStream.
## DeepStream Configuration for YOLO11
Here we are using [marcoslucianops/DeepStream-Yolo](https://github.com/marcoslucianops/DeepStream-Yolo) GitHub repository which includes NVIDIA DeepStream SDK support for YOLO models. We appreciate the efforts of marcoslucianops for his contributions!
1. Install Ultralytics with necessary dependencies
```bash
cd ~
pip install -U pip
git clone https://github.com/ultralytics/ultralytics
cd ultralytics
pip install -e ".[export]" onnxslim
```
2. Clone the DeepStream-Yolo repository
```bash
cd ~
git clone https://github.com/marcoslucianops/DeepStream-Yolo
```
3. Copy the `export_yoloV8.py` file from `DeepStream-Yolo/utils` directory to the `ultralytics` folder
```bash
cp ~/DeepStream-Yolo/utils/export_yoloV8.py ~/ultralytics
cd ultralytics
```
!!! note
`export_yoloV8.py` works for both YOLOv8 and YOLO11 models.
4. Download Ultralytics YOLO11 detection model (.pt) of your choice from [YOLO11 releases](https://github.com/ultralytics/assets/releases). Here we use [yolo11s.pt](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt).
```bash
wget https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt
```
!!! note
You can also use a [custom trained YOLO11 model](https://docs.ultralytics.com/modes/train/).
5. Convert model to ONNX
```bash
python3 export_yoloV8.py -w yolo11s.pt
```
!!! note "Pass the below arguments to the above command"
For DeepStream 6.0.1, use opset 12 or lower. The default opset is 16.
```bash
--opset 12
```
To change the inference size (default: 640)
```bash
-s SIZE
--size SIZE
-s HEIGHT WIDTH
--size HEIGHT WIDTH
```
Example for 1280:
```bash
-s 1280
or
-s 1280 1280
```
To simplify the ONNX model (DeepStream >= 6.0)
```bash
--simplify
```
To use dynamic batch-size (DeepStream >= 6.1)
```bash
--dynamic
```
To use static batch-size (example for batch-size = 4)
```bash
--batch 4
```
6. Copy the generated `.onnx` model file and `labels.txt` file to the `DeepStream-Yolo` folder
```bash
cp yolo11s.pt.onnx labels.txt ~/DeepStream-Yolo
cd ~/DeepStream-Yolo
```
7. Set the CUDA version according to the JetPack version installed
For JetPack 4.6.4:
```bash
export CUDA_VER=10.2
```
For JetPack 5.1.3:
```bash
export CUDA_VER=11.4
```
For Jetpack 6.1:
```bash
export CUDA_VER=12.6
```
8. Compile the library
```bash
make -C nvdsinfer_custom_impl_Yolo clean && make -C nvdsinfer_custom_impl_Yolo
```
9. Edit the `config_infer_primary_yoloV8.txt` file according to your model (for YOLO11s with 80 classes)
```bash
[property]
...
onnx-file=yolo11s.pt.onnx
...
num-detected-classes=80
...
```
10. Edit the `deepstream_app_config` file
```bash
...
[primary-gie]
...
config-file=config_infer_primary_yoloV8.txt
```
11. You can also change the video source in `deepstream_app_config` file. Here a default video file is loaded
```bash
...
[source0]
...
uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4
```
### Run Inference
```bash
deepstream-app -c deepstream_app_config.txt
```
!!! note
It will take a long time to generate the TensorRT engine file before starting the inference. So please be patient.
<div align=center><img width=1000 src="https://github.com/ultralytics/docs/releases/download/0/yolov8-with-deepstream.avif" alt="YOLO11 with deepstream"></div>
!!! tip
If you want to convert the model to FP16 precision, simply set `model-engine-file=model_b1_gpu0_fp16.engine` and `network-mode=2` inside `config_infer_primary_yoloV8.txt`
## INT8 Calibration
If you want to use INT8 precision for inference, you need to follow the steps below
!!! note
Currently INT8 does not work with TensorRT 10.x. This section of the guide has been tested with TensorRT 8.x which is expected to work.
1. Set `OPENCV` environment variable
```bash
export OPENCV=1
```
2. Compile the library
```bash
make -C nvdsinfer_custom_impl_Yolo clean && make -C nvdsinfer_custom_impl_Yolo
```
3. For COCO dataset, download the [val2017](http://images.cocodataset.org/zips/val2017.zip), extract, and move to `DeepStream-Yolo` folder
4. Make a new directory for calibration images
```bash
mkdir calibration
```
5. Run the following to select 1000 random images from COCO dataset to run calibration
```bash
for jpg in $(ls -1 val2017/*.jpg | sort -R | head -1000); do \
cp ${jpg} calibration/; \
done
```
!!! note
NVIDIA recommends at least 500 images to get a good [accuracy](https://www.ultralytics.com/glossary/accuracy). On this example, 1000 images are chosen to get better accuracy (more images = more accuracy). You can set it from **head -1000**. For example, for 2000 images, **head -2000**. This process can take a long time.
6. Create the `calibration.txt` file with all selected images
```bash
realpath calibration/*jpg > calibration.txt
```
7. Set environment variables
```bash
export INT8_CALIB_IMG_PATH=calibration.txt
export INT8_CALIB_BATCH_SIZE=1
```
!!! note
Higher INT8_CALIB_BATCH_SIZE values will result in more accuracy and faster calibration speed. Set it according to you GPU memory.
8. Update the `config_infer_primary_yoloV8.txt` file
From
```bash
...
model-engine-file=model_b1_gpu0_fp32.engine
#int8-calib-file=calib.table
...
network-mode=0
...
```
To
```bash
...
model-engine-file=model_b1_gpu0_int8.engine
int8-calib-file=calib.table
...
network-mode=1
...
```
### Run Inference
```bash
deepstream-app -c deepstream_app_config.txt
```
## MultiStream Setup
To set up multiple streams under a single deepstream application, you can do the following changes to the `deepstream_app_config.txt` file
1. Change the rows and columns to build a grid display according to the number of streams you want to have. For example, for 4 streams, we can add 2 rows and 2 columns.
```bash
[tiled-display]
rows=2
columns=2
```
2. Set `num-sources=4` and add `uri` of all the 4 streams
```bash
[source0]
enable=1
type=3
uri=<path_to_video>
uri=<path_to_video>
uri=<path_to_video>
uri=<path_to_video>
num-sources=4
```
### Run Inference
```bash
deepstream-app -c deepstream_app_config.txt
```
<div align=center><img width=1000 src="https://github.com/ultralytics/docs/releases/download/0/multistream-setup.avif" alt="Multistream setup"></div>
## Benchmark Results
The following benchmarks summarizes how YOLO11 models perform at different TensorRT precision levels with an input size of 640x640 on NVIDIA Jetson Orin NX 16GB.
### Comparison Chart
<div align=center><img width=1000 src="https://github.com/ultralytics/assets/releases/download/v0.0.0/jetson-deepstream-benchmarks.avif" alt="Jetson DeepStream Benchmarks Chart"></div>
### Detailed Comparison Table
!!! tip "Performance"
=== "YOLO11n"
| Format | Status | Inference time (ms/im) |
|-----------------|--------|------------------------|
| TensorRT (FP32) | ✅ | 8.64 |
| TensorRT (FP16) | ✅ | 5.27 |
| TensorRT (INT8) | ✅ | 4.54 |
=== "YOLO11s"
| Format | Status | Inference time (ms/im) |
|-----------------|--------|------------------------|
| TensorRT (FP32) | ✅ | 14.53 |
| TensorRT (FP16) | ✅ | 7.91 |
| TensorRT (INT8) | ✅ | 6.05 |
=== "YOLO11m"
| Format | Status | Inference time (ms/im) |
|-----------------|--------|------------------------|
| TensorRT (FP32) | ✅ | 32.05 |
| TensorRT (FP16) | ✅ | 15.55 |
| TensorRT (INT8) | ✅ | 10.43 |
=== "YOLO11l"
| Format | Status | Inference time (ms/im) |
|-----------------|--------|------------------------|
| TensorRT (FP32) | ✅ | 39.68 |
| TensorRT (FP16) | ✅ | 19.88 |
| TensorRT (INT8) | ✅ | 13.64 |
=== "YOLO11x"
| Format | Status | Inference time (ms/im) |
|-----------------|--------|------------------------|
| TensorRT (FP32) | ✅ | 80.65 |
| TensorRT (FP16) | ✅ | 39.06 |
| TensorRT (INT8) | ✅ | 22.83 |
## Acknowledgements
This guide was initially created by our friends at Seeed Studio, Lakshantha and Elaine.
## FAQ
### How do I set up Ultralytics YOLO11 on an NVIDIA Jetson device?
To set up Ultralytics YOLO11 on an [NVIDIA Jetson](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/) device, you first need to install the [DeepStream SDK](https://developer.nvidia.com/deepstream-getting-started) compatible with your JetPack version. Follow the step-by-step guide in our [Quick Start Guide](nvidia-jetson.md) to configure your NVIDIA Jetson for YOLO11 deployment.
### What is the benefit of using TensorRT with YOLO11 on NVIDIA Jetson?
Using TensorRT with YOLO11 optimizes the model for inference, significantly reducing latency and improving throughput on NVIDIA Jetson devices. TensorRT provides high-performance, low-latency [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) inference through layer fusion, precision calibration, and kernel auto-tuning. This leads to faster and more efficient execution, particularly useful for real-time applications like video analytics and autonomous machines.
### Can I run Ultralytics YOLO11 with DeepStream SDK across different NVIDIA Jetson hardware?
Yes, the guide for deploying Ultralytics YOLO11 with the DeepStream SDK and TensorRT is compatible across the entire NVIDIA Jetson lineup. This includes devices like the Jetson Orin NX 16GB with [JetPack 5.1.3](https://developer.nvidia.com/embedded/jetpack-sdk-513) and the Jetson Nano 4GB with [JetPack 4.6.4](https://developer.nvidia.com/jetpack-sdk-464). Refer to the section [DeepStream Configuration for YOLO11](#deepstream-configuration-for-yolo11) for detailed steps.
### How can I convert a YOLO11 model to ONNX for DeepStream?
To convert a YOLO11 model to ONNX format for deployment with DeepStream, use the `utils/export_yoloV8.py` script from the [DeepStream-Yolo](https://github.com/marcoslucianops/DeepStream-Yolo) repository.
Here's an example command:
```bash
python3 utils/export_yoloV8.py -w yolo11s.pt --opset 12 --simplify
```
For more details on model conversion, check out our [model export section](../modes/export.md).
### What are the performance benchmarks for YOLO on NVIDIA Jetson Orin NX?
The performance of YOLO11 models on NVIDIA Jetson Orin NX 16GB varies based on TensorRT precision levels. For example, YOLO11s models achieve:
- **FP32 Precision**: 14.6 ms/im, 68.5 FPS
- **FP16 Precision**: 7.94 ms/im, 126 FPS
- **INT8 Precision**: 5.95 ms/im, 168 FPS
These benchmarks underscore the efficiency and capability of using TensorRT-optimized YOLO11 models on NVIDIA Jetson hardware. For further details, see our [Benchmark Results](#benchmark-results) section.

View File

@ -0,0 +1,178 @@
---
comments: true
description: Learn how to define clear goals and objectives for your computer vision project with our practical guide. Includes tips on problem statements, measurable objectives, and key decisions.
keywords: computer vision, project planning, problem statement, measurable objectives, dataset preparation, model selection, YOLO11, Ultralytics
---
# A Practical Guide for Defining Your Computer Vision Project
## Introduction
The first step in any computer vision project is defining what you want to achieve. It's crucial to have a clear roadmap from the start, which includes everything from data collection to deploying your model.
If you need a quick refresher on the basics of a computer vision project, take a moment to read our guide on [the key steps in a computer vision project](./steps-of-a-cv-project.md). It'll give you a solid overview of the whole process. Once you're caught up, come back here to dive into how exactly you can define and refine the goals for your project.
Now, let's get to the heart of defining a clear problem statement for your project and exploring the key decisions you'll need to make along the way.
## Defining A Clear Problem Statement
Setting clear goals and objectives for your project is the first big step toward finding the most effective solutions. Let's understand how you can clearly define your project's problem statement:
- **Identify the Core Issue:** Pinpoint the specific challenge your computer vision project aims to solve.
- **Determine the Scope:** Define the boundaries of your problem.
- **Consider End Users and Stakeholders:** Identify who will be affected by the solution.
- **Analyze Project Requirements and Constraints:** Assess available resources (time, budget, personnel) and identify any technical or regulatory constraints.
### Example of a Business Problem Statement
Let's walk through an example.
Consider a computer vision project where you want to [estimate the speed of vehicles](./speed-estimation.md) on a highway. The core issue is that current speed monitoring methods are inefficient and error-prone due to outdated radar systems and manual processes. The project aims to develop a real-time computer vision system that can replace legacy [speed estimation](https://www.ultralytics.com/blog/ultralytics-yolov8-for-speed-estimation-in-computer-vision-projects) systems.
<p align="center">
<img width="100%" src="https://github.com/ultralytics/docs/releases/download/0/speed-estimation-using-yolov8.avif" alt="Speed Estimation Using YOLO11">
</p>
Primary users include traffic management authorities and law enforcement, while secondary stakeholders are highway planners and the public benefiting from safer roads. Key requirements involve evaluating budget, time, and personnel, as well as addressing technical needs like high-resolution cameras and real-time data processing. Additionally, regulatory constraints on privacy and [data security](https://www.ultralytics.com/glossary/data-security) must be considered.
### Setting Measurable Objectives
Setting measurable objectives is key to the success of a computer vision project. These goals should be clear, achievable, and time-bound.
For example, if you are developing a system to estimate vehicle speeds on a highway. You could consider the following measurable objectives:
- To achieve at least 95% [accuracy](https://www.ultralytics.com/glossary/accuracy) in speed detection within six months, using a dataset of 10,000 vehicle images.
- The system should be able to process real-time video feeds at 30 frames per second with minimal delay.
By setting specific and quantifiable goals, you can effectively track progress, identify areas for improvement, and ensure the project stays on course.
## The Connection Between The Problem Statement and The Computer Vision Tasks
Your problem statement helps you conceptualize which computer vision task can solve your issue.
For example, if your problem is monitoring vehicle speeds on a highway, the relevant computer vision task is object tracking. [Object tracking](../modes/track.md) is suitable because it allows the system to continuously follow each vehicle in the video feed, which is crucial for accurately calculating their speeds.
<p align="center">
<img width="100%" src="https://github.com/ultralytics/docs/releases/download/0/example-of-object-tracking.avif" alt="Example of Object Tracking">
</p>
Other tasks, like [object detection](../tasks/detect.md), are not suitable as they don't provide continuous location or movement information. Once you've identified the appropriate computer vision task, it guides several critical aspects of your project, like model selection, dataset preparation, and model training approaches.
## Which Comes First: Model Selection, Dataset Preparation, or Model Training Approach?
The order of model selection, dataset preparation, and training approach depends on the specifics of your project. Here are a few tips to help you decide:
- **Clear Understanding of the Problem**: If your problem and objectives are well-defined, start with model selection. Then, prepare your dataset and decide on the training approach based on the model's requirements.
- **Example**: Start by selecting a model for a traffic monitoring system that estimates vehicle speeds. Choose an object tracking model, gather and annotate highway videos, and then train the model with techniques for real-time video processing.
- **Unique or Limited Data**: If your project is constrained by unique or limited data, begin with dataset preparation. For instance, if you have a rare dataset of medical images, annotate and prepare the data first. Then, select a model that performs well on such data, followed by choosing a suitable training approach.
- **Example**: Prepare the data first for a facial recognition system with a small dataset. Annotate it, then select a model that works well with limited data, such as a pre-trained model for [transfer learning](https://www.ultralytics.com/glossary/transfer-learning). Finally, decide on a training approach, including [data augmentation](https://www.ultralytics.com/glossary/data-augmentation), to expand the dataset.
- **Need for Experimentation**: In projects where experimentation is crucial, start with the training approach. This is common in research projects where you might initially test different training techniques. Refine your model selection after identifying a promising method and prepare the dataset based on your findings.
- **Example**: In a project exploring new methods for detecting manufacturing defects, start with experimenting on a small data subset. Once you find a promising technique, select a model tailored to those findings and prepare a comprehensive dataset.
## Common Discussion Points in the Community
Next, let's look at a few common discussion points in the community regarding computer vision tasks and project planning.
### What Are the Different Computer Vision Tasks?
The most popular computer vision tasks include [image classification](https://www.ultralytics.com/glossary/image-classification), [object detection](https://www.ultralytics.com/glossary/object-detection), and [image segmentation](https://www.ultralytics.com/glossary/image-segmentation).
<p align="center">
<img width="100%" src="https://github.com/ultralytics/docs/releases/download/0/image-classification-vs-object-detection-vs-image-segmentation.avif" alt="Overview of Computer Vision Tasks">
</p>
For a detailed explanation of various tasks, please take a look at the Ultralytics Docs page on [YOLO11 Tasks](../tasks/index.md).
### Can a Pre-trained Model Remember Classes It Knew Before Custom Training?
No, pre-trained models don't "remember" classes in the traditional sense. They learn patterns from massive datasets, and during custom training (fine-tuning), these patterns are adjusted for your specific task. The model's capacity is limited, and focusing on new information can overwrite some previous learnings.
<p align="center">
<img width="100%" src="https://github.com/ultralytics/docs/releases/download/0/overview-of-transfer-learning.avif" alt="Overview of Transfer Learning">
</p>
If you want to use the classes the model was pre-trained on, a practical approach is to use two models: one retains the original performance, and the other is fine-tuned for your specific task. This way, you can combine the outputs of both models. There are other options like freezing layers, using the pre-trained model as a feature extractor, and task-specific branching, but these are more complex solutions and require more expertise.
### How Do Deployment Options Affect My Computer Vision Project?
[Model deployment options](./model-deployment-options.md) critically impact the performance of your computer vision project. For instance, the deployment environment must handle the computational load of your model. Here are some practical examples:
- **Edge Devices**: Deploying on edge devices like smartphones or IoT devices requires lightweight models due to their limited computational resources. Example technologies include [TensorFlow Lite](../integrations/tflite.md) and [ONNX Runtime](../integrations/onnx.md), which are optimized for such environments.
- **Cloud Servers**: Cloud deployments can handle more complex models with larger computational demands. Cloud platforms like [AWS](../integrations/amazon-sagemaker.md), Google Cloud, and Azure offer robust hardware options that can scale based on the project's needs.
- **On-Premise Servers**: For scenarios requiring high [data privacy](https://www.ultralytics.com/glossary/data-privacy) and security, deploying on-premise might be necessary. This involves significant upfront hardware investment but allows full control over the data and infrastructure.
- **Hybrid Solutions**: Some projects might benefit from a hybrid approach, where some processing is done on the edge, while more complex analyses are offloaded to the cloud. This can balance performance needs with cost and latency considerations.
Each deployment option offers different benefits and challenges, and the choice depends on specific project requirements like performance, cost, and security.
## Connecting with the Community
Connecting with other computer vision enthusiasts can be incredibly helpful for your projects by providing support, solutions, and new ideas. Here are some great ways to learn, troubleshoot, and network:
### Community Support Channels
- **GitHub Issues:** Head over to the YOLO11 GitHub repository. You can use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to raise questions, report bugs, and suggest features. The community and maintainers can assist with specific problems you encounter.
- **Ultralytics Discord Server:** Become part of the [Ultralytics Discord server](https://discord.com/invite/ultralytics). Connect with fellow users and developers, seek support, exchange knowledge, and discuss ideas.
### Comprehensive Guides and Documentation
- **Ultralytics YOLO11 Documentation:** Explore the [official YOLO11 documentation](./index.md) for in-depth guides and valuable tips on various computer vision tasks and projects.
## Conclusion
Defining a clear problem and setting measurable goals is key to a successful computer vision project. We've highlighted the importance of being clear and focused from the start. Having specific goals helps avoid oversight. Also, staying connected with others in the community through platforms like [GitHub](https://github.com/ultralytics/ultralytics) or [Discord](https://discord.com/invite/ultralytics) is important for learning and staying current. In short, good planning and engaging with the community is a huge part of successful computer vision projects.
## FAQ
### How do I define a clear problem statement for my Ultralytics computer vision project?
To define a clear problem statement for your Ultralytics computer vision project, follow these steps:
1. **Identify the Core Issue:** Pinpoint the specific challenge your project aims to solve.
2. **Determine the Scope:** Clearly outline the boundaries of your problem.
3. **Consider End Users and Stakeholders:** Identify who will be affected by your solution.
4. **Analyze Project Requirements and Constraints:** Assess available resources and any technical or regulatory limitations.
Providing a well-defined problem statement ensures that the project remains focused and aligned with your objectives. For a detailed guide, refer to our [practical guide](#defining-a-clear-problem-statement).
### Why should I use Ultralytics YOLO11 for speed estimation in my computer vision project?
Ultralytics YOLO11 is ideal for speed estimation because of its real-time object tracking capabilities, high accuracy, and robust performance in detecting and monitoring vehicle speeds. It overcomes inefficiencies and inaccuracies of traditional radar systems by leveraging cutting-edge computer vision technology. Check out our blog on [speed estimation using YOLO11](https://www.ultralytics.com/blog/ultralytics-yolov8-for-speed-estimation-in-computer-vision-projects) for more insights and practical examples.
### How do I set effective measurable objectives for my computer vision project with Ultralytics YOLO11?
Set effective and measurable objectives using the SMART criteria:
- **Specific:** Define clear and detailed goals.
- **Measurable:** Ensure objectives are quantifiable.
- **Achievable:** Set realistic targets within your capabilities.
- **Relevant:** Align objectives with your overall project goals.
- **Time-bound:** Set deadlines for each objective.
For example, "Achieve 95% accuracy in speed detection within six months using a 10,000 vehicle image dataset." This approach helps track progress and identifies areas for improvement. Read more about [setting measurable objectives](#setting-measurable-objectives).
### How do deployment options affect the performance of my Ultralytics YOLO models?
Deployment options critically impact the performance of your Ultralytics YOLO models. Here are key options:
- **Edge Devices:** Use lightweight models like [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) Lite or ONNX Runtime for deployment on devices with limited resources.
- **Cloud Servers:** Utilize robust cloud platforms like AWS, Google Cloud, or Azure for handling complex models.
- **On-Premise Servers:** High data privacy and security needs may require on-premise deployments.
- **Hybrid Solutions:** Combine edge and cloud approaches for balanced performance and cost-efficiency.
For more information, refer to our [detailed guide on model deployment options](./model-deployment-options.md).
### What are the most common challenges in defining the problem for a computer vision project with Ultralytics?
Common challenges include:
- Vague or overly broad problem statements.
- Unrealistic objectives.
- Lack of stakeholder alignment.
- Insufficient understanding of technical constraints.
- Underestimating data requirements.
Address these challenges through thorough initial research, clear communication with stakeholders, and iterative refinement of the problem statement and objectives. Learn more about these challenges in our [Computer Vision Project guide](steps-of-a-cv-project.md).

View File

@ -0,0 +1,161 @@
---
comments: true
description: Learn how to calculate distances between objects using Ultralytics YOLO11 for accurate spatial positioning and scene understanding.
keywords: Ultralytics, YOLO11, distance calculation, computer vision, object tracking, spatial positioning
---
# Distance Calculation using Ultralytics YOLO11
## What is Distance Calculation?
Measuring the gap between two objects is known as distance calculation within a specified space. In the case of [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics), the [bounding box](https://www.ultralytics.com/glossary/bounding-box) centroid is employed to calculate the distance for bounding boxes highlighted by the user.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/LE8am1QoVn4"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Distance Calculation using Ultralytics YOLO11
</p>
## Visuals
| Distance Calculation using Ultralytics YOLO11 |
| :---------------------------------------------------------------------------------------------------------------------------: |
| ![Ultralytics YOLO11 Distance Calculation](https://github.com/ultralytics/docs/releases/download/0/distance-calculation.avif) |
## Advantages of Distance Calculation?
- **Localization [Precision](https://www.ultralytics.com/glossary/precision):** Enhances accurate spatial positioning in [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) tasks.
- **Size Estimation:** Allows estimation of object size for better contextual understanding.
- **Scene Understanding:** Improves 3D scene comprehension for better decision-making in applications like [autonomous vehicles](https://www.ultralytics.com/glossary/autonomous-vehicles) and surveillance systems.
- **Collision Avoidance:** Enables systems to detect potential collisions by monitoring distances between moving objects.
- **Spatial Analysis:** Facilitates analysis of object relationships and interactions within the monitored environment.
???+ tip "Distance Calculation"
- Click on any two bounding boxes with Left Mouse click for distance calculation
- Mouse Right Click will delete all drawn points
- Mouse Left Click can be used to draw points
???+ warning "Distance is Estimate"
Distance will be an estimate and may not be fully accurate, as it is calculated using 2-dimensional data,
which lacks information about the object's depth.
!!! example "Distance Calculation using Ultralytics YOLO"
=== "Python"
```python
import cv2
from ultralytics import solutions
cap = cv2.VideoCapture("Path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
# Video writer
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
video_writer = cv2.VideoWriter("distance_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
# Initialize distance calculation object
distancecalculator = solutions.DistanceCalculation(
model="yolo11n.pt", # path to the YOLO11 model file.
show=True, # display the output
)
# Process video
while cap.isOpened():
success, im0 = cap.read()
if not success:
print("Video frame is empty or processing is complete.")
break
results = distancecalculator(im0)
print(results) # access the output
video_writer.write(results.plot_im) # write the processed frame.
cap.release()
video_writer.release()
cv2.destroyAllWindows() # destroy all opened windows
```
### `DistanceCalculation()` Arguments
Here's a table with the `DistanceCalculation` arguments:
{% from "macros/solutions-args.md" import param_table %}
{{ param_table(["model"]) }}
You can also make use of various `track` arguments in the `DistanceCalculation` solution.
{% from "macros/track-args.md" import param_table %}
{{ param_table(["tracker", "conf", "iou", "classes", "verbose", "device"]) }}
Moreover, the following visualization arguments are available:
{% from "macros/visualization-args.md" import param_table %}
{{ param_table(["show", "line_width"]) }}
## Implementation Details
The `DistanceCalculation` class works by tracking objects across video frames and calculating the Euclidean distance between the centroids of selected bounding boxes. When you click on two objects, the solution:
1. Extracts the centroids (center points) of the selected bounding boxes
2. Calculates the Euclidean distance between these centroids in pixels
3. Displays the distance on the frame with a connecting line between the objects
The implementation uses the `mouse_event_for_distance` method to handle mouse interactions, allowing users to select objects and clear selections as needed. The `process` method handles the frame-by-frame processing, tracking objects, and calculating distances.
## Applications
Distance calculation with YOLO11 has numerous practical applications:
- **Retail Analytics:** Measure customer proximity to products and analyze store layout effectiveness
- **Industrial Safety:** Monitor safe distances between workers and machinery
- **Traffic Management:** Analyze vehicle spacing and detect tailgating
- **Sports Analysis:** Calculate distances between players, the ball, and key field positions
- **Healthcare:** Ensure proper distancing in waiting areas and monitor patient movement
- **Robotics:** Enable robots to maintain appropriate distances from obstacles and people
## FAQ
### How do I calculate distances between objects using Ultralytics YOLO11?
To calculate distances between objects using [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics), you need to identify the bounding box centroids of the detected objects. This process involves initializing the `DistanceCalculation` class from Ultralytics' `solutions` module and using the model's tracking outputs to calculate the distances.
### What are the advantages of using distance calculation with Ultralytics YOLO11?
Using distance calculation with Ultralytics YOLO11 offers several advantages:
- **Localization Precision:** Provides accurate spatial positioning for objects.
- **Size Estimation:** Helps estimate physical sizes, contributing to better contextual understanding.
- **Scene Understanding:** Enhances 3D scene comprehension, aiding improved decision-making in applications like autonomous driving and surveillance.
- **Real-time Processing:** Performs calculations on-the-fly, making it suitable for live video analysis.
- **Integration Capabilities:** Works seamlessly with other YOLO11 solutions like [object tracking](../modes/track.md) and [speed estimation](speed-estimation.md).
### Can I perform distance calculation in real-time video streams with Ultralytics YOLO11?
Yes, you can perform distance calculation in real-time video streams with Ultralytics YOLO11. The process involves capturing video frames using [OpenCV](https://www.ultralytics.com/glossary/opencv), running YOLO11 [object detection](https://www.ultralytics.com/glossary/object-detection), and using the `DistanceCalculation` class to calculate distances between objects in successive frames. For a detailed implementation, see the [video stream example](#distance-calculation-using-ultralytics-yolo11).
### How do I delete points drawn during distance calculation using Ultralytics YOLO11?
To delete points drawn during distance calculation with Ultralytics YOLO11, you can use a right mouse click. This action will clear all the points you have drawn. For more details, refer to the note section under the [distance calculation example](#distance-calculation-using-ultralytics-yolo11).
### What are the key arguments for initializing the DistanceCalculation class in Ultralytics YOLO11?
The key arguments for initializing the `DistanceCalculation` class in Ultralytics YOLO11 include:
- `model`: Path to the YOLO11 model file.
- `tracker`: Tracking algorithm to use (default is 'botsort.yaml').
- `conf`: Confidence threshold for detections.
- `show`: Flag to display the output.
For an exhaustive list and default values, see the [arguments of DistanceCalculation](#distancecalculation-arguments).

Some files were not shown because too many files have changed in this diff Show More