Skip to content

Commit e2b7bc0

Browse files
Ben Milankoglenn-jocher
andauthored
YouTube Livestream Detection (#2752)
* Youtube livestream detection * dependancy update to auto install pafy * Remove print * include youtube_dl in deps * PEP8 reformat * youtube url check fix * reduce lines * add comment * update check_requirements * stream framerate fix * Update README.md * cleanup * PEP8 * remove cap.retrieve() failure code Co-authored-by: Glenn Jocher <[email protected]>
1 parent 9029759 commit e2b7bc0

File tree

4 files changed

+30
-20
lines changed

4 files changed

+30
-20
lines changed

README.md

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,9 +92,8 @@ $ python detect.py --source 0 # webcam
9292
file.mp4 # video
9393
path/ # directory
9494
path/*.jpg # glob
95-
rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa # rtsp stream
96-
rtmp://192.168.1.105/live/test # rtmp stream
97-
http://112.50.243.8/PLTV/88888888/224/3221225900/1.m3u8 # http stream
95+
'https://youtu.be/NUsoVlDFqZg' # YouTube video
96+
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
9897
```
9998

10099
To run inference on example images in `data/images`:

detect.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ def detect(save_img=False):
1919
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
2020
save_img = not opt.nosave and not source.endswith('.txt') # save inference images
2121
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
22-
('rtsp://', 'rtmp://', 'http://'))
22+
('rtsp://', 'rtmp://', 'http://', 'https://'))
2323

2424
# Directories
2525
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run

utils/datasets.py

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@
2020
from torch.utils.data import Dataset
2121
from tqdm import tqdm
2222

23-
from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \
24-
clean_str
23+
from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \
24+
resample_segments, clean_str
2525
from utils.torch_utils import torch_distributed_zero_first
2626

2727
# Parameters
@@ -275,14 +275,20 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32):
275275
for i, s in enumerate(sources):
276276
# Start the thread to read frames from the video stream
277277
print(f'{i + 1}/{n}: {s}... ', end='')
278-
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
278+
url = eval(s) if s.isnumeric() else s
279+
if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video
280+
check_requirements(('pafy', 'youtube_dl'))
281+
import pafy
282+
url = pafy.new(url).getbest(preftype="mp4").url
283+
cap = cv2.VideoCapture(url)
279284
assert cap.isOpened(), f'Failed to open {s}'
280285
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
281286
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
282-
fps = cap.get(cv2.CAP_PROP_FPS) % 100
287+
self.fps = cap.get(cv2.CAP_PROP_FPS) % 100
288+
283289
_, self.imgs[i] = cap.read() # guarantee first frame
284290
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
285-
print(f' success ({w}x{h} at {fps:.2f} FPS).')
291+
print(f' success ({w}x{h} at {self.fps:.2f} FPS).')
286292
thread.start()
287293
print('') # newline
288294

@@ -303,7 +309,7 @@ def update(self, index, cap):
303309
success, im = cap.retrieve()
304310
self.imgs[index] = im if success else self.imgs[index] * 0
305311
n = 0
306-
time.sleep(0.01) # wait time
312+
time.sleep(1 / self.fps) # wait time
307313

308314
def __iter__(self):
309315
self.count = -1
@@ -444,7 +450,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
444450
gb += self.imgs[i].nbytes
445451
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
446452
pbar.close()
447-
453+
448454
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
449455
# Cache dataset labels, check images and read shapes
450456
x = {} # dict
@@ -489,7 +495,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''):
489495
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
490496
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
491497
pbar.close()
492-
498+
493499
if nf == 0:
494500
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
495501

@@ -1034,6 +1040,7 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_
10341040
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
10351041
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
10361042

1043+
10371044
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False):
10381045
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
10391046
Usage: from utils.datasets import *; autosplit('../coco128')

utils/general.py

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -91,17 +91,20 @@ def check_git_status():
9191
print(e)
9292

9393

94-
def check_requirements(file='requirements.txt', exclude=()):
95-
# Check installed dependencies meet requirements
94+
def check_requirements(requirements='requirements.txt', exclude=()):
95+
# Check installed dependencies meet requirements (pass *.txt file or list of packages)
9696
import pkg_resources as pkg
9797
prefix = colorstr('red', 'bold', 'requirements:')
98-
file = Path(file)
99-
if not file.exists():
100-
print(f"{prefix} {file.resolve()} not found, check failed.")
101-
return
98+
if isinstance(requirements, (str, Path)): # requirements.txt file
99+
file = Path(requirements)
100+
if not file.exists():
101+
print(f"{prefix} {file.resolve()} not found, check failed.")
102+
return
103+
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
104+
else: # list or tuple of packages
105+
requirements = [x for x in requirements if x not in exclude]
102106

103107
n = 0 # number of packages updates
104-
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
105108
for r in requirements:
106109
try:
107110
pkg.require(r)
@@ -111,7 +114,8 @@ def check_requirements(file='requirements.txt', exclude=()):
111114
print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode())
112115

113116
if n: # if packages updated
114-
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n" \
117+
source = file.resolve() if 'file' in locals() else requirements
118+
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
115119
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
116120
print(emojis(s)) # emoji-safe
117121

0 commit comments

Comments
 (0)