diff --git a/README.md b/README.md
index 103256f8a..2df72b749 100644
--- a/README.md
+++ b/README.md
@@ -479,7 +479,8 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
     --no-download-archive           Do not use archive file (default)
     --max-downloads NUMBER          Abort after downloading NUMBER files
     --break-on-existing             Stop the download process when encountering
-                                    a file that is in the archive
+                                    a file that is in the archive supplied with
+                                    the --download-archive option
     --no-break-on-existing          Do not stop the download process when
                                     encountering a file that is in the archive
                                     (default)
diff --git a/test/test_traversal.py b/test/test_traversal.py
index cc0228d27..d48606e99 100644
--- a/test/test_traversal.py
+++ b/test/test_traversal.py
@@ -490,7 +490,7 @@ class TestTraversalHelpers:
             {'url': 'https://example.com/subs/en', 'name': 'en'},
         ], [..., {
             'id': 'name',
-            'ext': ['url', {lambda x: determine_ext(x, default_ext=None)}],
+            'ext': ['url', {determine_ext(default_ext=None)}],
             'url': 'url',
         }, all, {subs_list_to_dict(ext='ext')}]) == {
             'de': [{'url': 'https://example.com/subs/de.ass', 'ext': 'ass'}],
diff --git a/test/test_utils.py b/test/test_utils.py
index 04f91547a..b5f35736b 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -2156,7 +2156,7 @@ Line 1
         assert callable(int_or_none(scale=10)), 'missing positional parameter should apply partially'
         assert int_or_none(10, scale=0.1) == 100, 'positionally passed argument should call function'
         assert int_or_none(v=10) == 10, 'keyword passed positional should call function'
-        assert int_or_none(scale=0.1)(10) == 100, 'call after partial applicatino should call the function'
+        assert int_or_none(scale=0.1)(10) == 100, 'call after partial application should call the function'
 
         assert callable(join_nonempty(delim=', ')), 'varargs positional should apply partially'
         assert callable(join_nonempty()), 'varargs positional should apply partially'
diff --git a/yt_dlp/extractor/afreecatv.py b/yt_dlp/extractor/afreecatv.py
index 83e510d1a..6682a8981 100644
--- a/yt_dlp/extractor/afreecatv.py
+++ b/yt_dlp/extractor/afreecatv.py
@@ -154,7 +154,7 @@ class AfreecaTVIE(AfreecaTVBaseIE):
             'title': ('title', {str}),
             'uploader': ('writer_nick', {str}),
             'uploader_id': ('bj_id', {str}),
-            'duration': ('total_file_duration', {functools.partial(int_or_none, scale=1000)}),
+            'duration': ('total_file_duration', {int_or_none(scale=1000)}),
             'thumbnail': ('thumb', {url_or_none}),
         })
 
@@ -178,7 +178,7 @@ class AfreecaTVIE(AfreecaTVBaseIE):
                 'title': f'{common_info.get("title") or "Untitled"} (part {file_num})',
                 'formats': formats,
                 **traverse_obj(file_element, {
-                    'duration': ('duration', {functools.partial(int_or_none, scale=1000)}),
+                    'duration': ('duration', {int_or_none(scale=1000)}),
                     'timestamp': ('file_start', {unified_timestamp}),
                 }),
             })
@@ -234,7 +234,7 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
             'catch_list', lambda _, v: v['files'][0]['file'], {
                 'id': ('files', 0, 'file_info_key', {str}),
                 'url': ('files', 0, 'file', {url_or_none}),
-                'duration': ('files', 0, 'duration', {functools.partial(int_or_none, scale=1000)}),
+                'duration': ('files', 0, 'duration', {int_or_none(scale=1000)}),
                 'title': ('title', {str}),
                 'uploader': ('writer_nick', {str}),
                 'uploader_id': ('writer_id', {str}),
diff --git a/yt_dlp/extractor/allstar.py b/yt_dlp/extractor/allstar.py
index 5ea1c30e3..697d83c1e 100644
--- a/yt_dlp/extractor/allstar.py
+++ b/yt_dlp/extractor/allstar.py
@@ -71,7 +71,7 @@ class AllstarBaseIE(InfoExtractor):
             'thumbnails': (('clipImageThumb', 'clipImageSource'), {'url': {media_url_or_none}}),
             'duration': ('clipLength', {int_or_none}),
             'filesize': ('clipSizeBytes', {int_or_none}),
-            'timestamp': ('createdDate', {functools.partial(int_or_none, scale=1000)}),
+            'timestamp': ('createdDate', {int_or_none(scale=1000)}),
             'uploader': ('username', {str}),
             'uploader_id': ('user', '_id', {str}),
             'view_count': ('views', {int_or_none}),
diff --git a/yt_dlp/extractor/bandcamp.py b/yt_dlp/extractor/bandcamp.py
index 0abe05982..939c2800e 100644
--- a/yt_dlp/extractor/bandcamp.py
+++ b/yt_dlp/extractor/bandcamp.py
@@ -1,4 +1,3 @@
-import functools
 import json
 import random
 import re
@@ -10,7 +9,6 @@ from ..utils import (
     ExtractorError,
     extract_attributes,
     float_or_none,
-    get_element_html_by_id,
     int_or_none,
     parse_filesize,
     str_or_none,
@@ -21,7 +19,7 @@ from ..utils import (
     url_or_none,
     urljoin,
 )
-from ..utils.traversal import traverse_obj
+from ..utils.traversal import find_element, traverse_obj
 
 
 class BandcampIE(InfoExtractor):
@@ -45,6 +43,8 @@ class BandcampIE(InfoExtractor):
             'uploader_url': 'https://youtube-dl.bandcamp.com',
             'uploader_id': 'youtube-dl',
             'thumbnail': 'https://f4.bcbits.com/img/a3216802731_5.jpg',
+            'artists': ['youtube-dl "\'/\\ä↭'],
+            'album_artists': ['youtube-dl "\'/\\ä↭'],
         },
         'skip': 'There is a limit of 200 free downloads / month for the test song',
     }, {
@@ -271,6 +271,18 @@ class BandcampAlbumIE(BandcampIE):  # XXX: Do not subclass from concrete IE
                     'timestamp': 1311756226,
                     'upload_date': '20110727',
                     'uploader': 'Blazo',
+                    'thumbnail': 'https://f4.bcbits.com/img/a1721150828_5.jpg',
+                    'album_artists': ['Blazo'],
+                    'uploader_url': 'https://blazo.bandcamp.com',
+                    'release_date': '20110727',
+                    'release_timestamp': 1311724800.0,
+                    'track': 'Intro',
+                    'uploader_id': 'blazo',
+                    'track_number': 1,
+                    'album': 'Jazz Format Mixtape vol.1',
+                    'artists': ['Blazo'],
+                    'duration': 19.335,
+                    'track_id': '1353101989',
                 },
             },
             {
@@ -282,6 +294,18 @@ class BandcampAlbumIE(BandcampIE):  # XXX: Do not subclass from concrete IE
                     'timestamp': 1311757238,
                     'upload_date': '20110727',
                     'uploader': 'Blazo',
+                    'track': 'Kero One - Keep It Alive (Blazo remix)',
+                    'release_date': '20110727',
+                    'track_id': '38097443',
+                    'track_number': 2,
+                    'duration': 181.467,
+                    'uploader_url': 'https://blazo.bandcamp.com',
+                    'album': 'Jazz Format Mixtape vol.1',
+                    'uploader_id': 'blazo',
+                    'album_artists': ['Blazo'],
+                    'artists': ['Blazo'],
+                    'thumbnail': 'https://f4.bcbits.com/img/a1721150828_5.jpg',
+                    'release_timestamp': 1311724800.0,
                 },
             },
         ],
@@ -289,6 +313,7 @@ class BandcampAlbumIE(BandcampIE):  # XXX: Do not subclass from concrete IE
             'title': 'Jazz Format Mixtape vol.1',
             'id': 'jazz-format-mixtape-vol-1',
             'uploader_id': 'blazo',
+            'description': 'md5:38052a93217f3ffdc033cd5dbbce2989',
         },
         'params': {
             'playlistend': 2,
@@ -363,10 +388,10 @@ class BandcampWeeklyIE(BandcampIE):  # XXX: Do not subclass from concrete IE
     _VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*?&)?show=(?P<id>\d+)'
     _TESTS = [{
         'url': 'https://bandcamp.com/?show=224',
-        'md5': 'b00df799c733cf7e0c567ed187dea0fd',
+        'md5': '61acc9a002bed93986b91168aa3ab433',
         'info_dict': {
             'id': '224',
-            'ext': 'opus',
+            'ext': 'mp3',
             'title': 'BC Weekly April 4th 2017 - Magic Moments',
             'description': 'md5:5d48150916e8e02d030623a48512c874',
             'duration': 5829.77,
@@ -376,7 +401,7 @@ class BandcampWeeklyIE(BandcampIE):  # XXX: Do not subclass from concrete IE
             'episode_id': '224',
         },
         'params': {
-            'format': 'opus-lo',
+            'format': 'mp3-128',
         },
     }, {
         'url': 'https://bandcamp.com/?blah/blah@&show=228',
@@ -484,7 +509,7 @@ class BandcampUserIE(InfoExtractor):
             or re.findall(r'<div[^>]+trackTitle["\'][^"\']+["\']([^"\']+)', webpage))
 
         yield from traverse_obj(webpage, (
-            {functools.partial(get_element_html_by_id, 'music-grid')}, {extract_attributes},
+            {find_element(id='music-grid', html=True)}, {extract_attributes},
             'data-client-items', {json.loads}, ..., 'page_url', {str}))
 
     def _real_extract(self, url):
@@ -493,4 +518,4 @@ class BandcampUserIE(InfoExtractor):
 
         return self.playlist_from_matches(
             self._yield_items(webpage), uploader, f'Discography of {uploader}',
-            getter=functools.partial(urljoin, url))
+            getter=urljoin(url))
diff --git a/yt_dlp/extractor/bbc.py b/yt_dlp/extractor/bbc.py
index 3af923f95..89fcf4425 100644
--- a/yt_dlp/extractor/bbc.py
+++ b/yt_dlp/extractor/bbc.py
@@ -1284,9 +1284,9 @@ class BBCIE(BBCCoUkIE):  # XXX: Do not subclass from concrete IE
                 **traverse_obj(model, {
                     'title': ('title', {str}),
                     'thumbnail': ('imageUrl', {lambda u: urljoin(url, u.replace('$recipe', 'raw'))}),
-                    'description': ('synopses', ('long', 'medium', 'short'), {str}, {lambda x: x or None}, any),
+                    'description': ('synopses', ('long', 'medium', 'short'), {str}, filter, any),
                     'duration': ('versions', 0, 'duration', {int}),
-                    'timestamp': ('versions', 0, 'availableFrom', {functools.partial(int_or_none, scale=1000)}),
+                    'timestamp': ('versions', 0, 'availableFrom', {int_or_none(scale=1000)}),
                 }),
             }
 
@@ -1386,7 +1386,7 @@ class BBCIE(BBCCoUkIE):  # XXX: Do not subclass from concrete IE
                     formats = traverse_obj(media_data, ('playlist', lambda _, v: url_or_none(v['url']), {
                         'url': ('url', {url_or_none}),
                         'ext': ('format', {str}),
-                        'tbr': ('bitrate', {functools.partial(int_or_none, scale=1000)}),
+                        'tbr': ('bitrate', {int_or_none(scale=1000)}),
                     }))
                     if formats:
                         entry = {
@@ -1398,7 +1398,7 @@ class BBCIE(BBCCoUkIE):  # XXX: Do not subclass from concrete IE
                                 'title': ('title', {str}),
                                 'thumbnail': ('imageUrl', {lambda u: urljoin(url, u.replace('$recipe', 'raw'))}),
                                 'description': ('synopses', ('long', 'medium', 'short'), {str}, any),
-                                'timestamp': ('firstPublished', {functools.partial(int_or_none, scale=1000)}),
+                                'timestamp': ('firstPublished', {int_or_none(scale=1000)}),
                             }),
                         }
                         done = True
@@ -1428,7 +1428,7 @@ class BBCIE(BBCCoUkIE):  # XXX: Do not subclass from concrete IE
             if not entry.get('timestamp'):
                 entry['timestamp'] = traverse_obj(next_data, (
                     ..., 'contents', is_type('timestamp'), 'model',
-                    'timestamp', {functools.partial(int_or_none, scale=1000)}, any))
+                    'timestamp', {int_or_none(scale=1000)}, any))
             entries.append(entry)
             return self.playlist_result(
                 entries, playlist_id, playlist_title, playlist_description)
diff --git a/yt_dlp/extractor/bibeltv.py b/yt_dlp/extractor/bibeltv.py
index 666b51c56..ad00245de 100644
--- a/yt_dlp/extractor/bibeltv.py
+++ b/yt_dlp/extractor/bibeltv.py
@@ -1,4 +1,3 @@
-import functools
 
 from .common import InfoExtractor
 from ..utils import (
@@ -50,7 +49,7 @@ class BibelTVBaseIE(InfoExtractor):
             **traverse_obj(data, {
                 'title': 'title',
                 'description': 'description',
-                'duration': ('duration', {functools.partial(int_or_none, scale=1000)}),
+                'duration': ('duration', {int_or_none(scale=1000)}),
                 'timestamp': ('schedulingStart', {parse_iso8601}),
                 'season_number': 'seasonNumber',
                 'episode_number': 'episodeNumber',
diff --git a/yt_dlp/extractor/bilibili.py b/yt_dlp/extractor/bilibili.py
index 62f68fbc6..02ea67707 100644
--- a/yt_dlp/extractor/bilibili.py
+++ b/yt_dlp/extractor/bilibili.py
@@ -109,7 +109,7 @@ class BilibiliBaseIE(InfoExtractor):
 
         fragments = traverse_obj(play_info, ('durl', lambda _, v: url_or_none(v['url']), {
             'url': ('url', {url_or_none}),
-            'duration': ('length', {functools.partial(float_or_none, scale=1000)}),
+            'duration': ('length', {float_or_none(scale=1000)}),
             'filesize': ('size', {int_or_none}),
         }))
         if fragments:
@@ -124,7 +124,7 @@ class BilibiliBaseIE(InfoExtractor):
                     'quality': ('quality', {int_or_none}),
                     'format_id': ('quality', {str_or_none}),
                     'format_note': ('quality', {lambda x: format_names.get(x)}),
-                    'duration': ('timelength', {functools.partial(float_or_none, scale=1000)}),
+                    'duration': ('timelength', {float_or_none(scale=1000)}),
                 }),
                 **parse_resolution(format_names.get(play_info.get('quality'))),
             })
@@ -1585,7 +1585,7 @@ class BilibiliPlaylistIE(BilibiliSpaceListBaseIE):
                 'title': ('title', {str}),
                 'uploader': ('upper', 'name', {str}),
                 'uploader_id': ('upper', 'mid', {str_or_none}),
-                'timestamp': ('ctime', {int_or_none}, {lambda x: x or None}),
+                'timestamp': ('ctime', {int_or_none}, filter),
                 'thumbnail': ('cover', {url_or_none}),
             })),
         }
diff --git a/yt_dlp/extractor/bluesky.py b/yt_dlp/extractor/bluesky.py
index 42edd1107..0e58a0932 100644
--- a/yt_dlp/extractor/bluesky.py
+++ b/yt_dlp/extractor/bluesky.py
@@ -382,7 +382,7 @@ class BlueskyIE(InfoExtractor):
                 'age_limit': (
                     'labels', ..., 'val', {lambda x: 18 if x in ('sexual', 'porn', 'graphic-media') else None}, any),
                 'description': (*record_path, 'text', {str}, filter),
-                'title': (*record_path, 'text', {lambda x: x.replace('\n', '')}, {truncate_string(left=50)}),
+                'title': (*record_path, 'text', {lambda x: x.replace('\n', ' ')}, {truncate_string(left=50)}),
             }),
         })
         return entries
diff --git a/yt_dlp/extractor/bpb.py b/yt_dlp/extractor/bpb.py
index 7fe089944..d7bf58b36 100644
--- a/yt_dlp/extractor/bpb.py
+++ b/yt_dlp/extractor/bpb.py
@@ -1,35 +1,20 @@
-import functools
 import re
 
 from .common import InfoExtractor
 from ..utils import (
     clean_html,
     extract_attributes,
-    get_element_text_and_html_by_tag,
-    get_elements_by_class,
     join_nonempty,
     js_to_json,
     mimetype2ext,
     unified_strdate,
     url_or_none,
     urljoin,
-    variadic,
 )
-from ..utils.traversal import traverse_obj
-
-
-def html_get_element(tag=None, cls=None):
-    assert tag or cls, 'One of tag or class is required'
-
-    if cls:
-        func = functools.partial(get_elements_by_class, cls, tag=tag)
-    else:
-        func = functools.partial(get_element_text_and_html_by_tag, tag)
-
-    def html_get_element_wrapper(html):
-        return variadic(func(html))[0]
-
-    return html_get_element_wrapper
+from ..utils.traversal import (
+    find_element,
+    traverse_obj,
+)
 
 
 class BpbIE(InfoExtractor):
@@ -41,12 +26,12 @@ class BpbIE(InfoExtractor):
         'info_dict': {
             'id': '297',
             'ext': 'mp4',
-            'creator': 'Kooperative Berlin',
-            'description': 'md5:f4f75885ba009d3e2b156247a8941ce6',
-            'release_date': '20160115',
+            'creators': ['Kooperative Berlin'],
+            'description': r're:Joachim Gauck, .*\n\nKamera: .*',
+            'release_date': '20150716',
             'series': 'Interview auf dem Geschichtsforum 1989 | 2009',
-            'tags': ['Friedliche Revolution', 'Erinnerungskultur', 'Vergangenheitspolitik', 'DDR 1949 - 1990', 'Freiheitsrecht', 'BStU', 'Deutschland'],
-            'thumbnail': 'https://www.bpb.de/cache/images/7/297_teaser_16x9_1240.jpg?8839D',
+            'tags': [],
+            'thumbnail': r're:https?://www\.bpb\.de/cache/images/7/297_teaser_16x9_1240\.jpg.*',
             'title': 'Joachim Gauck zu 1989 und die Erinnerung an die DDR',
             'uploader': 'Bundeszentrale für politische Bildung',
         },
@@ -55,11 +40,12 @@ class BpbIE(InfoExtractor):
         'info_dict': {
             'id': '522184',
             'ext': 'mp4',
-            'creator': 'Institute for Strategic Dialogue Germany gGmbH (ISD)',
+            'creators': ['Institute for Strategic Dialogue Germany gGmbH (ISD)'],
             'description': 'md5:f83c795ff8f825a69456a9e51fc15903',
             'release_date': '20230621',
-            'tags': ['Desinformation', 'Ukraine', 'Russland', 'Geflüchtete'],
-            'thumbnail': 'https://www.bpb.de/cache/images/4/522184_teaser_16x9_1240.png?EABFB',
+            'series': 'Narrative über den Krieg Russlands gegen die Ukraine (NUK)',
+            'tags': [],
+            'thumbnail': r're:https://www\.bpb\.de/cache/images/4/522184_teaser_16x9_1240\.png.*',
             'title': 'md5:9b01ccdbf58dbf9e5c9f6e771a803b1c',
             'uploader': 'Bundeszentrale für politische Bildung',
         },
@@ -68,11 +54,12 @@ class BpbIE(InfoExtractor):
         'info_dict': {
             'id': '518789',
             'ext': 'mp4',
-            'creator': 'Institute for Strategic Dialogue Germany gGmbH (ISD)',
+            'creators': ['Institute for Strategic Dialogue Germany gGmbH (ISD)'],
             'description': 'md5:85228aed433e84ff0ff9bc582abd4ea8',
             'release_date': '20230302',
-            'tags': ['Desinformation', 'Ukraine', 'Russland', 'Geflüchtete'],
-            'thumbnail': 'https://www.bpb.de/cache/images/9/518789_teaser_16x9_1240.jpeg?56D0D',
+            'series': 'Narrative über den Krieg Russlands gegen die Ukraine (NUK)',
+            'tags': [],
+            'thumbnail': r're:https://www\.bpb\.de/cache/images/9/518789_teaser_16x9_1240\.jpeg.*',
             'title': 'md5:3e956f264bb501f6383f10495a401da4',
             'uploader': 'Bundeszentrale für politische Bildung',
         },
@@ -84,12 +71,12 @@ class BpbIE(InfoExtractor):
         'info_dict': {
             'id': '315813',
             'ext': 'mp3',
-            'creator': 'Axel Schröder',
+            'creators': ['Axel Schröder'],
             'description': 'md5:eda9d1af34e5912efef5baf54fba4427',
             'release_date': '20200921',
             'series': 'Auf Endlagersuche. Der deutsche Weg zu einem sicheren Atommülllager',
             'tags': ['Atomenergie', 'Endlager', 'hoch-radioaktiver Abfall', 'Endlagersuche', 'Atommüll', 'Atomendlager', 'Gorleben', 'Deutschland'],
-            'thumbnail': 'https://www.bpb.de/cache/images/3/315813_teaser_16x9_1240.png?92A94',
+            'thumbnail': r're:https://www\.bpb\.de/cache/images/3/315813_teaser_16x9_1240\.png.*',
             'title': 'Folge 1: Eine Einführung',
             'uploader': 'Bundeszentrale für politische Bildung',
         },
@@ -98,12 +85,12 @@ class BpbIE(InfoExtractor):
         'info_dict': {
             'id': '517806',
             'ext': 'mp3',
-            'creator': 'Bundeszentrale für politische Bildung',
+            'creators': ['Bundeszentrale für politische Bildung'],
             'description': 'md5:594689600e919912aade0b2871cc3fed',
             'release_date': '20230127',
             'series': 'Vorträge des Fachtags "Modernisierer. Grenzgänger. Anstifter. Sechs Jahrzehnte \'Neue Rechte\'"',
             'tags': ['Rechtsextremismus', 'Konservatismus', 'Konservativismus', 'neue Rechte', 'Rechtspopulismus', 'Schnellroda', 'Deutschland'],
-            'thumbnail': 'https://www.bpb.de/cache/images/6/517806_teaser_16x9_1240.png?7A7A0',
+            'thumbnail': r're:https://www\.bpb\.de/cache/images/6/517806_teaser_16x9_1240\.png.*',
             'title': 'Die Weltanschauung der "Neuen Rechten"',
             'uploader': 'Bundeszentrale für politische Bildung',
         },
@@ -147,7 +134,7 @@ class BpbIE(InfoExtractor):
         video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
-        title_result = traverse_obj(webpage, ({html_get_element(cls='opening-header__title')}, {self._TITLE_RE.match}))
+        title_result = traverse_obj(webpage, ({find_element(cls='opening-header__title')}, {self._TITLE_RE.match}))
         json_lds = list(self._yield_json_ld(webpage, video_id, fatal=False))
 
         return {
@@ -156,15 +143,15 @@ class BpbIE(InfoExtractor):
             # This metadata could be interpreted otherwise, but it fits "series" the most
             'series': traverse_obj(title_result, ('series', {str.strip})) or None,
             'description': join_nonempty(*traverse_obj(webpage, [(
-                {html_get_element(cls='opening-intro')},
-                [{html_get_element(tag='bpb-accordion-item')}, {html_get_element(cls='text-content')}],
+                {find_element(cls='opening-intro')},
+                [{find_element(tag='bpb-accordion-item')}, {find_element(cls='text-content')}],
             ), {clean_html}]), delim='\n\n') or None,
-            'creator': self._html_search_meta('author', webpage),
+            'creators': traverse_obj(self._html_search_meta('author', webpage), all),
             'uploader': self._html_search_meta('publisher', webpage),
             'release_date': unified_strdate(self._html_search_meta('date', webpage)),
             'tags': traverse_obj(json_lds, (..., 'keywords', {lambda x: x.split(',')}, ...)),
             **traverse_obj(self._parse_vue_attributes('bpb-player', webpage, video_id), {
                 'formats': (':sources', ..., {self._process_source}),
-                'thumbnail': ('poster', {lambda x: urljoin(url, x)}),
+                'thumbnail': ('poster', {urljoin(url)}),
             }),
         }
diff --git a/yt_dlp/extractor/bravotv.py b/yt_dlp/extractor/bravotv.py
index ec72f0d88..0b2c44798 100644
--- a/yt_dlp/extractor/bravotv.py
+++ b/yt_dlp/extractor/bravotv.py
@@ -145,10 +145,9 @@ class BravoTVIE(AdobePassIE):
         tp_metadata = self._download_json(
             update_url_query(tp_url, {'format': 'preview'}), video_id, fatal=False)
 
-        seconds_or_none = lambda x: float_or_none(x, 1000)
         chapters = traverse_obj(tp_metadata, ('chapters', ..., {
-            'start_time': ('startTime', {seconds_or_none}),
-            'end_time': ('endTime', {seconds_or_none}),
+            'start_time': ('startTime', {float_or_none(scale=1000)}),
+            'end_time': ('endTime', {float_or_none(scale=1000)}),
         }))
         # prune pointless single chapters that span the entire duration from short videos
         if len(chapters) == 1 and not traverse_obj(chapters, (0, 'end_time')):
@@ -168,8 +167,8 @@ class BravoTVIE(AdobePassIE):
             **merge_dicts(traverse_obj(tp_metadata, {
                 'title': 'title',
                 'description': 'description',
-                'duration': ('duration', {seconds_or_none}),
-                'timestamp': ('pubDate', {seconds_or_none}),
+                'duration': ('duration', {float_or_none(scale=1000)}),
+                'timestamp': ('pubDate', {float_or_none(scale=1000)}),
                 'season_number': (('pl1$seasonNumber', 'nbcu$seasonNumber'), {int_or_none}),
                 'episode_number': (('pl1$episodeNumber', 'nbcu$episodeNumber'), {int_or_none}),
                 'series': (('pl1$show', 'nbcu$show'), (None, ...), {str}),
diff --git a/yt_dlp/extractor/bundestag.py b/yt_dlp/extractor/bundestag.py
index 71f772665..3dacbbd24 100644
--- a/yt_dlp/extractor/bundestag.py
+++ b/yt_dlp/extractor/bundestag.py
@@ -8,11 +8,13 @@ from ..utils import (
     bug_reports_message,
     clean_html,
     format_field,
-    get_element_text_and_html_by_tag,
     int_or_none,
     url_or_none,
 )
-from ..utils.traversal import traverse_obj
+from ..utils.traversal import (
+    find_element,
+    traverse_obj,
+)
 
 
 class BundestagIE(InfoExtractor):
@@ -115,9 +117,8 @@ class BundestagIE(InfoExtractor):
             note='Downloading metadata overlay', fatal=False,
         ), {
             'title': (
-                {functools.partial(get_element_text_and_html_by_tag, 'h3')}, 0,
-                {functools.partial(re.sub, r'<span[^>]*>[^<]+</span>', '')}, {clean_html}),
-            'description': ({functools.partial(get_element_text_and_html_by_tag, 'p')}, 0, {clean_html}),
+                {find_element(tag='h3')}, {functools.partial(re.sub, r'<span[^>]*>[^<]+</span>', '')}, {clean_html}),
+            'description': ({find_element(tag='p')}, {clean_html}),
         }))
 
         return result
diff --git a/yt_dlp/extractor/caffeinetv.py b/yt_dlp/extractor/caffeinetv.py
index aa107f858..ea5134d2f 100644
--- a/yt_dlp/extractor/caffeinetv.py
+++ b/yt_dlp/extractor/caffeinetv.py
@@ -53,7 +53,7 @@ class CaffeineTVIE(InfoExtractor):
                 'like_count': ('like_count', {int_or_none}),
                 'view_count': ('view_count', {int_or_none}),
                 'comment_count': ('comment_count', {int_or_none}),
-                'tags': ('tags', ..., {str}, {lambda x: x or None}),
+                'tags': ('tags', ..., {str}, filter),
                 'uploader': ('user', 'name', {str}),
                 'uploader_id': (((None, 'user'), 'username'), {str}, any),
                 'is_live': ('is_live', {bool}),
@@ -62,7 +62,7 @@ class CaffeineTVIE(InfoExtractor):
                 'title': ('broadcast_title', {str}),
                 'duration': ('content_duration', {int_or_none}),
                 'timestamp': ('broadcast_start_time', {parse_iso8601}),
-                'thumbnail': ('preview_image_path', {lambda x: urljoin(url, x)}),
+                'thumbnail': ('preview_image_path', {urljoin(url)}),
             }),
             'age_limit': {
                 # assume Apple Store ratings: https://en.wikipedia.org/wiki/Mobile_software_content_rating_system
diff --git a/yt_dlp/extractor/cbc.py b/yt_dlp/extractor/cbc.py
index b44c23fa1..c0cf3da3d 100644
--- a/yt_dlp/extractor/cbc.py
+++ b/yt_dlp/extractor/cbc.py
@@ -453,8 +453,8 @@ class CBCPlayerIE(InfoExtractor):
 
         chapters = traverse_obj(data, (
             'media', 'chapters', lambda _, v: float(v['startTime']) is not None, {
-                'start_time': ('startTime', {functools.partial(float_or_none, scale=1000)}),
-                'end_time': ('endTime', {functools.partial(float_or_none, scale=1000)}),
+                'start_time': ('startTime', {float_or_none(scale=1000)}),
+                'end_time': ('endTime', {float_or_none(scale=1000)}),
                 'title': ('name', {str}),
             }))
         # Filter out pointless single chapters with start_time==0 and no end_time
@@ -465,8 +465,8 @@ class CBCPlayerIE(InfoExtractor):
             **traverse_obj(data, {
                 'title': ('title', {str}),
                 'description': ('description', {str.strip}),
-                'thumbnail': ('image', 'url', {url_or_none}, {functools.partial(update_url, query=None)}),
-                'timestamp': ('publishedAt', {functools.partial(float_or_none, scale=1000)}),
+                'thumbnail': ('image', 'url', {url_or_none}, {update_url(query=None)}),
+                'timestamp': ('publishedAt', {float_or_none(scale=1000)}),
                 'media_type': ('media', 'clipType', {str}),
                 'series': ('showName', {str}),
                 'season_number': ('media', 'season', {int_or_none}),
diff --git a/yt_dlp/extractor/cbsnews.py b/yt_dlp/extractor/cbsnews.py
index 972e11119..b01c0efd5 100644
--- a/yt_dlp/extractor/cbsnews.py
+++ b/yt_dlp/extractor/cbsnews.py
@@ -96,7 +96,7 @@ class CBSNewsBaseIE(InfoExtractor):
             **traverse_obj(item, {
                 'title': (None, ('fulltitle', 'title')),
                 'description': 'dek',
-                'timestamp': ('timestamp', {lambda x: float_or_none(x, 1000)}),
+                'timestamp': ('timestamp', {float_or_none(scale=1000)}),
                 'duration': ('duration', {float_or_none}),
                 'subtitles': ('captions', {get_subtitles}),
                 'thumbnail': ('images', ('hd', 'sd'), {url_or_none}),
diff --git a/yt_dlp/extractor/chzzk.py b/yt_dlp/extractor/chzzk.py
index b9c5e3ac0..aec77ac45 100644
--- a/yt_dlp/extractor/chzzk.py
+++ b/yt_dlp/extractor/chzzk.py
@@ -1,5 +1,3 @@
-import functools
-
 from .common import InfoExtractor
 from ..utils import (
     UserNotLive,
@@ -77,7 +75,7 @@ class CHZZKLiveIE(InfoExtractor):
             'thumbnails': thumbnails,
             **traverse_obj(live_detail, {
                 'title': ('liveTitle', {str}),
-                'timestamp': ('openDate', {functools.partial(parse_iso8601, delimiter=' ')}),
+                'timestamp': ('openDate', {parse_iso8601(delimiter=' ')}),
                 'concurrent_view_count': ('concurrentUserCount', {int_or_none}),
                 'view_count': ('accumulateCount', {int_or_none}),
                 'channel': ('channel', 'channelName', {str}),
@@ -176,7 +174,7 @@ class CHZZKVideoIE(InfoExtractor):
             **traverse_obj(video_meta, {
                 'title': ('videoTitle', {str}),
                 'thumbnail': ('thumbnailImageUrl', {url_or_none}),
-                'timestamp': ('publishDateAt', {functools.partial(float_or_none, scale=1000)}),
+                'timestamp': ('publishDateAt', {float_or_none(scale=1000)}),
                 'view_count': ('readCount', {int_or_none}),
                 'duration': ('duration', {int_or_none}),
                 'channel': ('channel', 'channelName', {str}),
diff --git a/yt_dlp/extractor/cineverse.py b/yt_dlp/extractor/cineverse.py
index c8c6c48c2..124c874e2 100644
--- a/yt_dlp/extractor/cineverse.py
+++ b/yt_dlp/extractor/cineverse.py
@@ -3,6 +3,7 @@ import re
 from .common import InfoExtractor
 from ..utils import (
     filter_dict,
+    float_or_none,
     int_or_none,
     parse_age_limit,
     smuggle_url,
@@ -85,7 +86,7 @@ class CineverseIE(CineverseBaseIE):
                 'title': 'title',
                 'id': ('details', 'item_id'),
                 'description': ('details', 'description'),
-                'duration': ('duration', {lambda x: x / 1000}),
+                'duration': ('duration', {float_or_none(scale=1000)}),
                 'cast': ('details', 'cast', {lambda x: x.split(', ')}),
                 'modified_timestamp': ('details', 'updated_by', 0, 'update_time', 'time', {int_or_none}),
                 'season_number': ('details', 'season', {int_or_none}),
diff --git a/yt_dlp/extractor/cnn.py b/yt_dlp/extractor/cnn.py
index cfcec9d1f..8148762c5 100644
--- a/yt_dlp/extractor/cnn.py
+++ b/yt_dlp/extractor/cnn.py
@@ -1,4 +1,3 @@
-import functools
 import json
 import re
 
@@ -199,7 +198,7 @@ class CNNIE(InfoExtractor):
                     'timestamp': ('data-publish-date', {parse_iso8601}),
                     'thumbnail': (
                         'data-poster-image-override', {json.loads}, 'big', 'uri', {url_or_none},
-                        {functools.partial(update_url, query='c=original')}),
+                        {update_url(query='c=original')}),
                     'display_id': 'data-video-slug',
                 }),
                 **traverse_obj(video_data, {
diff --git a/yt_dlp/extractor/common.py b/yt_dlp/extractor/common.py
index 7e6e6227d..01915acf2 100644
--- a/yt_dlp/extractor/common.py
+++ b/yt_dlp/extractor/common.py
@@ -1578,7 +1578,9 @@ class InfoExtractor:
         if default is not NO_DEFAULT:
             fatal = False
         for mobj in re.finditer(JSON_LD_RE, html):
-            json_ld_item = self._parse_json(mobj.group('json_ld'), video_id, fatal=fatal)
+            json_ld_item = self._parse_json(
+                mobj.group('json_ld'), video_id, fatal=fatal,
+                errnote=False if default is not NO_DEFAULT else None)
             for json_ld in variadic(json_ld_item):
                 if isinstance(json_ld, dict):
                     yield json_ld
diff --git a/yt_dlp/extractor/condenast.py b/yt_dlp/extractor/condenast.py
index 9c02cd342..0c84cfdab 100644
--- a/yt_dlp/extractor/condenast.py
+++ b/yt_dlp/extractor/condenast.py
@@ -12,6 +12,7 @@ from ..utils import (
     parse_iso8601,
     strip_or_none,
     try_get,
+    urljoin,
 )
 
 
@@ -112,8 +113,7 @@ class CondeNastIE(InfoExtractor):
         m_paths = re.finditer(
             r'(?s)<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]', webpage)
         paths = orderedSet(m.group(1) for m in m_paths)
-        build_url = lambda path: urllib.parse.urljoin(base_url, path)
-        entries = [self.url_result(build_url(path), 'CondeNast') for path in paths]
+        entries = [self.url_result(urljoin(base_url, path), 'CondeNast') for path in paths]
         return self.playlist_result(entries, playlist_title=title)
 
     def _extract_video_params(self, webpage, display_id):
diff --git a/yt_dlp/extractor/crunchyroll.py b/yt_dlp/extractor/crunchyroll.py
index 1b124c655..8faed179b 100644
--- a/yt_dlp/extractor/crunchyroll.py
+++ b/yt_dlp/extractor/crunchyroll.py
@@ -456,7 +456,7 @@ class CrunchyrollBetaIE(CrunchyrollCmsBaseIE):
                 }),
             }),
             **traverse_obj(metadata, {
-                'duration': ('duration_ms', {lambda x: float_or_none(x, 1000)}),
+                'duration': ('duration_ms', {float_or_none(scale=1000)}),
                 'timestamp': ('upload_date', {parse_iso8601}),
                 'series': ('series_title', {str}),
                 'series_id': ('series_id', {str}),
@@ -484,7 +484,7 @@ class CrunchyrollBetaIE(CrunchyrollCmsBaseIE):
                 }),
             }),
             **traverse_obj(metadata, {
-                'duration': ('duration_ms', {lambda x: float_or_none(x, 1000)}),
+                'duration': ('duration_ms', {float_or_none(scale=1000)}),
                 'age_limit': ('maturity_ratings', -1, {parse_age_limit}),
             }),
         }
diff --git a/yt_dlp/extractor/dangalplay.py b/yt_dlp/extractor/dangalplay.py
index 50e4136b5..f7b243234 100644
--- a/yt_dlp/extractor/dangalplay.py
+++ b/yt_dlp/extractor/dangalplay.py
@@ -40,7 +40,7 @@ class DangalPlayBaseIE(InfoExtractor):
                 'id': ('content_id', {str}),
                 'title': ('display_title', {str}),
                 'episode': ('title', {str}),
-                'series': ('show_name', {str}, {lambda x: x or None}),
+                'series': ('show_name', {str}, filter),
                 'series_id': ('catalog_id', {str}),
                 'duration': ('duration', {int_or_none}),
                 'release_timestamp': ('release_date_uts', {int_or_none}),
diff --git a/yt_dlp/extractor/err.py b/yt_dlp/extractor/err.py
index 7896cdbdc..d4139c6f3 100644
--- a/yt_dlp/extractor/err.py
+++ b/yt_dlp/extractor/err.py
@@ -207,7 +207,7 @@ class ERRJupiterIE(InfoExtractor):
             **traverse_obj(data, {
                 'title': ('heading', {str}),
                 'alt_title': ('subHeading', {str}),
-                'description': (('lead', 'body'), {clean_html}, {lambda x: x or None}),
+                'description': (('lead', 'body'), {clean_html}, filter),
                 'timestamp': ('created', {int_or_none}),
                 'modified_timestamp': ('updated', {int_or_none}),
                 'release_timestamp': (('scheduleStart', 'publicStart'), {int_or_none}),
diff --git a/yt_dlp/extractor/ilpost.py b/yt_dlp/extractor/ilpost.py
index 2868f0c62..da203cf5f 100644
--- a/yt_dlp/extractor/ilpost.py
+++ b/yt_dlp/extractor/ilpost.py
@@ -1,4 +1,3 @@
-import functools
 
 from .common import InfoExtractor
 from ..utils import (
@@ -63,7 +62,7 @@ class IlPostIE(InfoExtractor):
                 'url': ('podcast_raw_url', {url_or_none}),
                 'thumbnail': ('image', {url_or_none}),
                 'timestamp': ('timestamp', {int_or_none}),
-                'duration': ('milliseconds', {functools.partial(float_or_none, scale=1000)}),
+                'duration': ('milliseconds', {float_or_none(scale=1000)}),
                 'availability': ('free', {lambda v: 'public' if v else 'subscriber_only'}),
             }),
         }
diff --git a/yt_dlp/extractor/jiocinema.py b/yt_dlp/extractor/jiocinema.py
index 30d98ba79..94c85064e 100644
--- a/yt_dlp/extractor/jiocinema.py
+++ b/yt_dlp/extractor/jiocinema.py
@@ -326,11 +326,11 @@ class JioCinemaIE(JioCinemaBaseIE):
                 # fallback metadata
                 'title': ('name', {str}),
                 'description': ('fullSynopsis', {str}),
-                'series': ('show', 'name', {str}, {lambda x: x or None}),
+                'series': ('show', 'name', {str}, filter),
                 'season': ('tournamentName', {str}, {lambda x: x if x != 'Season 0' else None}),
-                'season_number': ('episode', 'season', {int_or_none}, {lambda x: x or None}),
+                'season_number': ('episode', 'season', {int_or_none}, filter),
                 'episode': ('fullTitle', {str}),
-                'episode_number': ('episode', 'episodeNo', {int_or_none}, {lambda x: x or None}),
+                'episode_number': ('episode', 'episodeNo', {int_or_none}, filter),
                 'age_limit': ('ageNemonic', {parse_age_limit}),
                 'duration': ('totalDuration', {float_or_none}),
                 'thumbnail': ('images', {url_or_none}),
@@ -338,10 +338,10 @@ class JioCinemaIE(JioCinemaBaseIE):
             **traverse_obj(metadata, ('result', 0, {
                 'title': ('fullTitle', {str}),
                 'description': ('fullSynopsis', {str}),
-                'series': ('showName', {str}, {lambda x: x or None}),
-                'season': ('seasonName', {str}, {lambda x: x or None}),
+                'series': ('showName', {str}, filter),
+                'season': ('seasonName', {str}, filter),
                 'season_number': ('season', {int_or_none}),
-                'season_id': ('seasonId', {str}, {lambda x: x or None}),
+                'season_id': ('seasonId', {str}, filter),
                 'episode': ('fullTitle', {str}),
                 'episode_number': ('episode', {int_or_none}),
                 'timestamp': ('uploadTime', {int_or_none}),
diff --git a/yt_dlp/extractor/kick.py b/yt_dlp/extractor/kick.py
index bd21e5950..1f001d421 100644
--- a/yt_dlp/extractor/kick.py
+++ b/yt_dlp/extractor/kick.py
@@ -1,4 +1,3 @@
-import functools
 
 from .common import InfoExtractor
 from ..networking import HEADRequest
@@ -137,7 +136,7 @@ class KickVODIE(KickBaseIE):
                 'uploader': ('livestream', 'channel', 'user', 'username', {str}),
                 'uploader_id': ('livestream', 'channel', 'user_id', {int}, {str_or_none}),
                 'timestamp': ('created_at', {parse_iso8601}),
-                'duration': ('livestream', 'duration', {functools.partial(float_or_none, scale=1000)}),
+                'duration': ('livestream', 'duration', {float_or_none(scale=1000)}),
                 'thumbnail': ('livestream', 'thumbnail', {url_or_none}),
                 'categories': ('livestream', 'categories', ..., 'name', {str}),
                 'view_count': ('views', {int_or_none}),
diff --git a/yt_dlp/extractor/kika.py b/yt_dlp/extractor/kika.py
index 852a4de3f..69f4a3ce0 100644
--- a/yt_dlp/extractor/kika.py
+++ b/yt_dlp/extractor/kika.py
@@ -119,7 +119,7 @@ class KikaIE(InfoExtractor):
                         'width': ('frameWidth', {int_or_none}),
                         'height': ('frameHeight', {int_or_none}),
                         # NB: filesize is 0 if unknown, bitrate is -1 if unknown
-                        'filesize': ('fileSize', {int_or_none}, {lambda x: x or None}),
+                        'filesize': ('fileSize', {int_or_none}, filter),
                         'abr': ('bitrateAudio', {int_or_none}, {lambda x: None if x == -1 else x}),
                         'vbr': ('bitrateVideo', {int_or_none}, {lambda x: None if x == -1 else x}),
                     }),
diff --git a/yt_dlp/extractor/laracasts.py b/yt_dlp/extractor/laracasts.py
index 4494c4b79..4a61d6ab1 100644
--- a/yt_dlp/extractor/laracasts.py
+++ b/yt_dlp/extractor/laracasts.py
@@ -32,7 +32,7 @@ class LaracastsBaseIE(InfoExtractor):
             VimeoIE, url_transparent=True,
             **traverse_obj(episode, {
                 'id': ('id', {int}, {str_or_none}),
-                'webpage_url': ('path', {lambda x: urljoin('https://laracasts.com', x)}),
+                'webpage_url': ('path', {urljoin('https://laracasts.com')}),
                 'title': ('title', {clean_html}),
                 'season_number': ('chapter', {int_or_none}),
                 'episode_number': ('position', {int_or_none}),
@@ -104,7 +104,7 @@ class LaracastsPlaylistIE(LaracastsBaseIE):
                 'description': ('body', {clean_html}),
                 'thumbnail': (('large_thumbnail', 'thumbnail'), {url_or_none}, any),
                 'duration': ('runTime', {parse_duration}),
-                'categories': ('taxonomy', 'name', {str}, {lambda x: x and [x]}),
+                'categories': ('taxonomy', 'name', {str}, all, filter),
                 'tags': ('topics', ..., 'name', {str}),
                 'modified_date': ('lastUpdated', {unified_strdate}),
             }),
diff --git a/yt_dlp/extractor/lbry.py b/yt_dlp/extractor/lbry.py
index 322852dd6..0445b7cbf 100644
--- a/yt_dlp/extractor/lbry.py
+++ b/yt_dlp/extractor/lbry.py
@@ -66,7 +66,7 @@ class LBRYBaseIE(InfoExtractor):
             'license': ('value', 'license', {str}),
             'timestamp': ('timestamp', {int_or_none}),
             'release_timestamp': ('value', 'release_time', {int_or_none}),
-            'tags': ('value', 'tags', ..., {lambda x: x or None}),
+            'tags': ('value', 'tags', ..., filter),
             'duration': ('value', stream_type, 'duration', {int_or_none}),
             'channel': ('signing_channel', 'value', 'title', {str}),
             'channel_id': ('signing_channel', 'claim_id', {str}),
diff --git a/yt_dlp/extractor/learningonscreen.py b/yt_dlp/extractor/learningonscreen.py
index dcf83144c..f4b51e66c 100644
--- a/yt_dlp/extractor/learningonscreen.py
+++ b/yt_dlp/extractor/learningonscreen.py
@@ -6,13 +6,11 @@ from ..utils import (
     ExtractorError,
     clean_html,
     extract_attributes,
-    get_element_by_class,
-    get_element_html_by_id,
     join_nonempty,
     parse_duration,
     unified_timestamp,
 )
-from ..utils.traversal import traverse_obj
+from ..utils.traversal import find_element, traverse_obj
 
 
 class LearningOnScreenIE(InfoExtractor):
@@ -32,28 +30,24 @@ class LearningOnScreenIE(InfoExtractor):
 
     def _real_initialize(self):
         if not self._get_cookies('https://learningonscreen.ac.uk/').get('PHPSESSID-BOB-LIVE'):
-            self.raise_login_required(
-                'Use --cookies for authentication. See '
-                ' https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp  '
-                'for how to manually pass cookies', method=None)
+            self.raise_login_required(method='session_cookies')
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         details = traverse_obj(webpage, (
-            {functools.partial(get_element_html_by_id, 'programme-details')}, {
-                'title': ({functools.partial(re.search, r'<h2>([^<]+)</h2>')}, 1, {clean_html}),
+            {find_element(id='programme-details', html=True)}, {
+                'title': ({find_element(tag='h2')}, {clean_html}),
                 'timestamp': (
-                    {functools.partial(get_element_by_class, 'broadcast-date')},
+                    {find_element(cls='broadcast-date')},
                     {functools.partial(re.match, r'([^<]+)')}, 1, {unified_timestamp}),
                 'duration': (
-                    {functools.partial(get_element_by_class, 'prog-running-time')},
-                    {clean_html}, {parse_duration}),
+                    {find_element(cls='prog-running-time')}, {clean_html}, {parse_duration}),
             }))
 
         title = details.pop('title', None) or traverse_obj(webpage, (
-            {functools.partial(get_element_html_by_id, 'add-to-existing-playlist')},
+            {find_element(id='add-to-existing-playlist', html=True)},
             {extract_attributes}, 'data-record-title', {clean_html}))
 
         entries = self._parse_html5_media_entries(
diff --git a/yt_dlp/extractor/listennotes.py b/yt_dlp/extractor/listennotes.py
index 61eae95ed..9d68e1830 100644
--- a/yt_dlp/extractor/listennotes.py
+++ b/yt_dlp/extractor/listennotes.py
@@ -6,12 +6,10 @@ from ..utils import (
     extract_attributes,
     get_element_by_class,
     get_element_html_by_id,
-    get_element_text_and_html_by_tag,
     parse_duration,
     strip_or_none,
-    traverse_obj,
-    try_call,
 )
+from ..utils.traversal import find_element, traverse_obj
 
 
 class ListenNotesIE(InfoExtractor):
@@ -22,14 +20,14 @@ class ListenNotesIE(InfoExtractor):
         'info_dict': {
             'id': 'KrDgvNb_u1n',
             'ext': 'mp3',
-            'title': 'md5:32236591a921adf17bbdbf0441b6c0e9',
-            'description': 'md5:c581ed197eeddcee55a67cdb547c8cbd',
-            'duration': 2148.0,
-            'channel': 'Thriving on Overload',
+            'title': r're:Tim O’Reilly on noticing things other people .{113}',
+            'description': r're:(?s)‘’We shape reality by what we notice and .{27459}',
+            'duration': 2215.0,
+            'channel': 'Amplifying Cognition',
             'channel_id': 'ed84wITivxF',
             'episode_id': 'e1312583fa7b4e24acfbb5131050be00',
-            'thumbnail': 'https://production.listennotes.com/podcasts/thriving-on-overload-ross-dawson-1wb_KospA3P-ed84wITivxF.300x300.jpg',
-            'channel_url': 'https://www.listennotes.com/podcasts/thriving-on-overload-ross-dawson-ed84wITivxF/',
+            'thumbnail': 'https://cdn-images-3.listennotes.com/podcasts/amplifying-cognition-ross-dawson-Iemft4Gdr0k-ed84wITivxF.300x300.jpg',
+            'channel_url': 'https://www.listennotes.com/podcasts/amplifying-cognition-ross-dawson-ed84wITivxF/',
             'cast': ['Tim O’Reilly', 'Cookie Monster', 'Lao Tzu', 'Wallace Steven', 'Eric Raymond', 'Christine Peterson', 'John Maynard Keyne', 'Ross Dawson'],
         },
     }, {
@@ -39,13 +37,13 @@ class ListenNotesIE(InfoExtractor):
             'id': 'lwEA3154JzG',
             'ext': 'mp3',
             'title': 'Episode 177: WireGuard with Jason Donenfeld',
-            'description': 'md5:24744f36456a3e95f83c1193a3458594',
+            'description': r're:(?s)Jason Donenfeld lead developer joins us this hour to discuss WireGuard, .{3169}',
             'duration': 3861.0,
             'channel': 'Ask Noah Show',
             'channel_id': '4DQTzdS5-j7',
             'episode_id': '8c8954b95e0b4859ad1eecec8bf6d3a4',
             'channel_url': 'https://www.listennotes.com/podcasts/ask-noah-show-noah-j-chelliah-4DQTzdS5-j7/',
-            'thumbnail': 'https://production.listennotes.com/podcasts/ask-noah-show-noah-j-chelliah-cfbRUw9Gs3F-4DQTzdS5-j7.300x300.jpg',
+            'thumbnail': 'https://cdn-images-3.listennotes.com/podcasts/ask-noah-show-noah-j-chelliah-gD7vG150cxf-4DQTzdS5-j7.300x300.jpg',
             'cast': ['noah showlink', 'noah show', 'noah dashboard', 'jason donenfeld'],
         },
     }]
@@ -70,7 +68,7 @@ class ListenNotesIE(InfoExtractor):
             'id': audio_id,
             'url': data['audio'],
             'title': (data.get('data-title')
-                      or try_call(lambda: get_element_text_and_html_by_tag('h1', webpage)[0])
+                      or traverse_obj(webpage, ({find_element(tag='h1')}, {clean_html}))
                       or self._html_search_meta(('og:title', 'title', 'twitter:title'), webpage, 'title')),
             'description': (self._clean_description(get_element_by_class('ln-text-p', webpage))
                             or strip_or_none(description)),
diff --git a/yt_dlp/extractor/lsm.py b/yt_dlp/extractor/lsm.py
index f5be08f97..56c06d745 100644
--- a/yt_dlp/extractor/lsm.py
+++ b/yt_dlp/extractor/lsm.py
@@ -114,7 +114,7 @@ class LSMLREmbedIE(InfoExtractor):
     def _real_extract(self, url):
         query = parse_qs(url)
         video_id = traverse_obj(query, (
-            ('show', 'id'), 0, {int_or_none}, {lambda x: x or None}, {str_or_none}), get_all=False)
+            ('show', 'id'), 0, {int_or_none}, filter, {str_or_none}), get_all=False)
         webpage = self._download_webpage(url, video_id)
 
         player_data, media_data = self._search_regex(
diff --git a/yt_dlp/extractor/magentamusik.py b/yt_dlp/extractor/magentamusik.py
index 5bfc0a154..24c46a152 100644
--- a/yt_dlp/extractor/magentamusik.py
+++ b/yt_dlp/extractor/magentamusik.py
@@ -57,6 +57,6 @@ class MagentaMusikIE(InfoExtractor):
                 'duration': ('runtimeInSeconds', {int_or_none}),
                 'location': ('countriesOfProduction', {list}, {lambda x: join_nonempty(*x, delim=', ')}),
                 'release_year': ('yearOfProduction', {int_or_none}),
-                'categories': ('mainGenre', {str}, {lambda x: x and [x]}),
+                'categories': ('mainGenre', {str}, all, filter),
             })),
         }
diff --git a/yt_dlp/extractor/mediastream.py b/yt_dlp/extractor/mediastream.py
index ae0fb2aed..d2a22f98f 100644
--- a/yt_dlp/extractor/mediastream.py
+++ b/yt_dlp/extractor/mediastream.py
@@ -17,7 +17,7 @@ class MediaStreamBaseIE(InfoExtractor):
     _BASE_URL_RE = r'https?://mdstrm\.com/(?:embed|live-stream)'
 
     def _extract_mediastream_urls(self, webpage):
-        yield from traverse_obj(list(self._yield_json_ld(webpage, None, fatal=False)), (
+        yield from traverse_obj(list(self._yield_json_ld(webpage, None, default={})), (
             lambda _, v: v['@type'] == 'VideoObject', ('embedUrl', 'contentUrl'),
             {lambda x: x if re.match(rf'{self._BASE_URL_RE}/\w+', x) else None}))
 
diff --git a/yt_dlp/extractor/mixch.py b/yt_dlp/extractor/mixch.py
index 9b7c7b89b..7832784b2 100644
--- a/yt_dlp/extractor/mixch.py
+++ b/yt_dlp/extractor/mixch.py
@@ -66,7 +66,7 @@ class MixchIE(InfoExtractor):
             note='Downloading comments', errnote='Failed to download comments'), (..., {
                 'author': ('name', {str}),
                 'author_id': ('user_id', {str_or_none}),
-                'id': ('message_id', {str}, {lambda x: x or None}),
+                'id': ('message_id', {str}, filter),
                 'text': ('body', {str}),
                 'timestamp': ('created', {int}),
             }))
diff --git a/yt_dlp/extractor/monstercat.py b/yt_dlp/extractor/monstercat.py
index 930c13e27..f17b91f5a 100644
--- a/yt_dlp/extractor/monstercat.py
+++ b/yt_dlp/extractor/monstercat.py
@@ -4,15 +4,11 @@ from .common import InfoExtractor
 from ..utils import (
     clean_html,
     extract_attributes,
-    get_element_by_class,
-    get_element_html_by_class,
-    get_element_text_and_html_by_tag,
     int_or_none,
     strip_or_none,
-    traverse_obj,
-    try_call,
     unified_strdate,
 )
+from ..utils.traversal import find_element, traverse_obj
 
 
 class MonstercatIE(InfoExtractor):
@@ -26,19 +22,21 @@ class MonstercatIE(InfoExtractor):
             'thumbnail': 'https://www.monstercat.com/release/742779548009/cover',
             'release_date': '20230711',
             'album': 'The Secret Language of Trees',
-            'album_artist': 'BT',
+            'album_artists': ['BT'],
         },
     }]
 
     def _extract_tracks(self, table, album_meta):
         for td in re.findall(r'<tr[^<]*>((?:(?!</tr>)[\w\W])+)', table):  # regex by chatgpt due to lack of get_elements_by_tag
-            title = clean_html(try_call(
-                lambda: get_element_by_class('d-inline-flex flex-column', td).partition(' <span')[0]))
-            ids = extract_attributes(try_call(lambda: get_element_html_by_class('btn-play cursor-pointer mr-small', td)) or '')
+            title = traverse_obj(td, (
+                {find_element(cls='d-inline-flex flex-column')},
+                {lambda x: x.partition(' <span')}, 0, {clean_html}))
+            ids = traverse_obj(td, (
+                {find_element(cls='btn-play cursor-pointer mr-small', html=True)}, {extract_attributes})) or {}
             track_id = ids.get('data-track-id')
             release_id = ids.get('data-release-id')
 
-            track_number = int_or_none(try_call(lambda: get_element_by_class('py-xsmall', td)))
+            track_number = traverse_obj(td, ({find_element(cls='py-xsmall')}, {int_or_none}))
             if not track_id or not release_id:
                 self.report_warning(f'Skipping track {track_number}, ID(s) not found')
                 self.write_debug(f'release_id={release_id!r} track_id={track_id!r}')
@@ -48,7 +46,7 @@ class MonstercatIE(InfoExtractor):
                 'title': title,
                 'track': title,
                 'track_number': track_number,
-                'artist': clean_html(try_call(lambda: get_element_by_class('d-block fs-xxsmall', td))),
+                'artists': traverse_obj(td, ({find_element(cls='d-block fs-xxsmall')}, {clean_html}, all)),
                 'url': f'https://www.monstercat.com/api/release/{release_id}/track-stream/{track_id}',
                 'id': track_id,
                 'ext': 'mp3',
@@ -57,20 +55,19 @@ class MonstercatIE(InfoExtractor):
     def _real_extract(self, url):
         url_id = self._match_id(url)
         html = self._download_webpage(url, url_id)
-        # wrap all `get_elements` in `try_call`, HTMLParser has problems with site's html
-        tracklist_table = try_call(lambda: get_element_by_class('table table-small', html)) or ''
-
-        title = try_call(lambda: get_element_text_and_html_by_tag('h1', html)[0])
-        date = traverse_obj(html, ({lambda html: get_element_by_class('font-italic mb-medium d-tablet-none d-phone-block',
-                            html).partition('Released ')}, 2, {strip_or_none}, {unified_strdate}))
+        # NB: HTMLParser may choke on this html; use {find_element} or try_call(lambda: get_element...)
+        tracklist_table = traverse_obj(html, {find_element(cls='table table-small')}) or ''
+        title = traverse_obj(html, ({find_element(tag='h1')}, {clean_html}))
 
         album_meta = {
             'title': title,
             'album': title,
             'thumbnail': f'https://www.monstercat.com/release/{url_id}/cover',
-            'album_artist': try_call(
-                lambda: get_element_by_class('h-normal text-uppercase mb-desktop-medium mb-smallish', html)),
-            'release_date': date,
+            'album_artists': traverse_obj(html, (
+                {find_element(cls='h-normal text-uppercase mb-desktop-medium mb-smallish')}, {clean_html}, all)),
+            'release_date': traverse_obj(html, (
+                {find_element(cls='font-italic mb-medium d-tablet-none d-phone-block')},
+                {lambda x: x.partition('Released ')}, 2, {strip_or_none}, {unified_strdate})),
         }
 
         return self.playlist_result(
diff --git a/yt_dlp/extractor/nebula.py b/yt_dlp/extractor/nebula.py
index cb8f6a67d..42ef25f17 100644
--- a/yt_dlp/extractor/nebula.py
+++ b/yt_dlp/extractor/nebula.py
@@ -86,7 +86,7 @@ class NebulaBaseIE(InfoExtractor):
 
     def _extract_video_metadata(self, episode):
         channel_url = traverse_obj(
-            episode, (('channel_slug', 'class_slug'), {lambda x: urljoin('https://nebula.tv/', x)}), get_all=False)
+            episode, (('channel_slug', 'class_slug'), {urljoin('https://nebula.tv/')}), get_all=False)
         return {
             'id': episode['id'].partition(':')[2],
             **traverse_obj(episode, {
diff --git a/yt_dlp/extractor/nekohacker.py b/yt_dlp/extractor/nekohacker.py
index 537158e87..7168a2080 100644
--- a/yt_dlp/extractor/nekohacker.py
+++ b/yt_dlp/extractor/nekohacker.py
@@ -6,12 +6,10 @@ from ..utils import (
     determine_ext,
     extract_attributes,
     get_element_by_class,
-    get_element_text_and_html_by_tag,
     parse_duration,
-    traverse_obj,
-    try_call,
     url_or_none,
 )
+from ..utils.traversal import find_element, traverse_obj
 
 
 class NekoHackerIE(InfoExtractor):
@@ -35,7 +33,7 @@ class NekoHackerIE(InfoExtractor):
                     'acodec': 'mp3',
                     'release_date': '20221101',
                     'album': 'Nekoverse',
-                    'artist': 'Neko Hacker',
+                    'artists': ['Neko Hacker'],
                     'track': 'Spaceship',
                     'track_number': 1,
                     'duration': 195.0,
@@ -53,7 +51,7 @@ class NekoHackerIE(InfoExtractor):
                     'acodec': 'mp3',
                     'release_date': '20221101',
                     'album': 'Nekoverse',
-                    'artist': 'Neko Hacker',
+                    'artists': ['Neko Hacker'],
                     'track': 'City Runner',
                     'track_number': 2,
                     'duration': 148.0,
@@ -71,7 +69,7 @@ class NekoHackerIE(InfoExtractor):
                     'acodec': 'mp3',
                     'release_date': '20221101',
                     'album': 'Nekoverse',
-                    'artist': 'Neko Hacker',
+                    'artists': ['Neko Hacker'],
                     'track': 'Nature Talk',
                     'track_number': 3,
                     'duration': 174.0,
@@ -89,7 +87,7 @@ class NekoHackerIE(InfoExtractor):
                     'acodec': 'mp3',
                     'release_date': '20221101',
                     'album': 'Nekoverse',
-                    'artist': 'Neko Hacker',
+                    'artists': ['Neko Hacker'],
                     'track': 'Crystal World',
                     'track_number': 4,
                     'duration': 199.0,
@@ -115,7 +113,7 @@ class NekoHackerIE(InfoExtractor):
                     'acodec': 'mp3',
                     'release_date': '20210115',
                     'album': '進め!むじなカンパニー',
-                    'artist': 'Neko Hacker',
+                    'artists': ['Neko Hacker'],
                     'track': 'md5:1a5fcbc96ca3c3265b1c6f9f79f30fd0',
                     'track_number': 1,
                 },
@@ -132,7 +130,7 @@ class NekoHackerIE(InfoExtractor):
                     'acodec': 'mp3',
                     'release_date': '20210115',
                     'album': '進め!むじなカンパニー',
-                    'artist': 'Neko Hacker',
+                    'artists': ['Neko Hacker'],
                     'track': 'むじな de なじむ feat. 六科なじむ (CV: 日高里菜 )',
                     'track_number': 2,
                 },
@@ -149,7 +147,7 @@ class NekoHackerIE(InfoExtractor):
                     'acodec': 'mp3',
                     'release_date': '20210115',
                     'album': '進め!むじなカンパニー',
-                    'artist': 'Neko Hacker',
+                    'artists': ['Neko Hacker'],
                     'track': '進め!むじなカンパニー (instrumental)',
                     'track_number': 3,
                 },
@@ -166,7 +164,7 @@ class NekoHackerIE(InfoExtractor):
                     'acodec': 'mp3',
                     'release_date': '20210115',
                     'album': '進め!むじなカンパニー',
-                    'artist': 'Neko Hacker',
+                    'artists': ['Neko Hacker'],
                     'track': 'むじな de なじむ (instrumental)',
                     'track_number': 4,
                 },
@@ -181,14 +179,17 @@ class NekoHackerIE(InfoExtractor):
         playlist = get_element_by_class('playlist', webpage)
 
         if not playlist:
-            iframe = try_call(lambda: get_element_text_and_html_by_tag('iframe', webpage)[1]) or ''
-            iframe_src = url_or_none(extract_attributes(iframe).get('src'))
+            iframe_src = traverse_obj(webpage, (
+                {find_element(tag='iframe', html=True)}, {extract_attributes}, 'src', {url_or_none}))
             if not iframe_src:
                 raise ExtractorError('No playlist or embed found in webpage')
             elif re.match(r'https?://(?:\w+\.)?spotify\.com/', iframe_src):
                 raise ExtractorError('Spotify embeds are not supported', expected=True)
             return self.url_result(url, 'Generic')
 
+        player_params = self._search_json(
+            r'var srp_player_params_[\da-f]+\s*=', webpage, 'player params', playlist_id, default={})
+
         entries = []
         for track_number, track in enumerate(re.findall(r'(<li[^>]+data-audiopath[^>]+>)', playlist), 1):
             entry = traverse_obj(extract_attributes(track), {
@@ -200,12 +201,12 @@ class NekoHackerIE(InfoExtractor):
                 'album': 'data-albumtitle',
                 'duration': ('data-tracktime', {parse_duration}),
                 'release_date': ('data-releasedate', {lambda x: re.match(r'\d{8}', x.replace('.', ''))}, 0),
-                'thumbnail': ('data-albumart', {url_or_none}),
             })
             entries.append({
                 **entry,
+                'thumbnail': url_or_none(player_params.get('artwork')),
                 'track_number': track_number,
-                'artist': 'Neko Hacker',
+                'artists': ['Neko Hacker'],
                 'vcodec': 'none',
                 'acodec': 'mp3' if entry['ext'] == 'mp3' else None,
             })
diff --git a/yt_dlp/extractor/neteasemusic.py b/yt_dlp/extractor/neteasemusic.py
index a759da214..900b8b2a3 100644
--- a/yt_dlp/extractor/neteasemusic.py
+++ b/yt_dlp/extractor/neteasemusic.py
@@ -36,10 +36,6 @@ class NetEaseMusicBaseIE(InfoExtractor):
     _API_BASE = 'http://music.163.com/api/'
     _GEO_BYPASS = False
 
-    @staticmethod
-    def _kilo_or_none(value):
-        return int_or_none(value, scale=1000)
-
     def _create_eapi_cipher(self, api_path, query_body, cookies):
         request_text = json.dumps({**query_body, 'header': cookies}, separators=(',', ':'))
 
@@ -101,7 +97,7 @@ class NetEaseMusicBaseIE(InfoExtractor):
                 'vcodec': 'none',
                 **traverse_obj(song, {
                     'ext': ('type', {str}),
-                    'abr': ('br', {self._kilo_or_none}),
+                    'abr': ('br', {int_or_none(scale=1000)}),
                     'filesize': ('size', {int_or_none}),
                 }),
             })
@@ -282,9 +278,9 @@ class NetEaseMusicIE(NetEaseMusicBaseIE):
             **lyric_data,
             **traverse_obj(info, {
                 'title': ('name', {str}),
-                'timestamp': ('album', 'publishTime', {self._kilo_or_none}),
+                'timestamp': ('album', 'publishTime', {int_or_none(scale=1000)}),
                 'thumbnail': ('album', 'picUrl', {url_or_none}),
-                'duration': ('duration', {self._kilo_or_none}),
+                'duration': ('duration', {int_or_none(scale=1000)}),
                 'album': ('album', 'name', {str}),
                 'average_rating': ('score', {int_or_none}),
             }),
@@ -440,7 +436,7 @@ class NetEaseMusicListIE(NetEaseMusicBaseIE):
             'tags': ('tags', ..., {str}),
             'uploader': ('creator', 'nickname', {str}),
             'uploader_id': ('creator', 'userId', {str_or_none}),
-            'timestamp': ('updateTime', {self._kilo_or_none}),
+            'timestamp': ('updateTime', {int_or_none(scale=1000)}),
         }))
         if traverse_obj(info, ('playlist', 'specialType')) == 10:
             metainfo['title'] = f'{metainfo.get("title")} {strftime_or_none(metainfo.get("timestamp"), "%Y-%m-%d")}'
@@ -517,10 +513,10 @@ class NetEaseMusicMvIE(NetEaseMusicBaseIE):
             'creators': traverse_obj(info, ('artists', ..., 'name')) or [info.get('artistName')],
             **traverse_obj(info, {
                 'title': ('name', {str}),
-                'description': (('desc', 'briefDesc'), {str}, {lambda x: x or None}),
+                'description': (('desc', 'briefDesc'), {str}, filter),
                 'upload_date': ('publishTime', {unified_strdate}),
                 'thumbnail': ('cover', {url_or_none}),
-                'duration': ('duration', {self._kilo_or_none}),
+                'duration': ('duration', {int_or_none(scale=1000)}),
                 'view_count': ('playCount', {int_or_none}),
                 'like_count': ('likeCount', {int_or_none}),
                 'comment_count': ('commentCount', {int_or_none}),
@@ -588,7 +584,7 @@ class NetEaseMusicProgramIE(NetEaseMusicBaseIE):
             'description': ('description', {str}),
             'creator': ('dj', 'brand', {str}),
             'thumbnail': ('coverUrl', {url_or_none}),
-            'timestamp': ('createTime', {self._kilo_or_none}),
+            'timestamp': ('createTime', {int_or_none(scale=1000)}),
         })
 
         if not self._yes_playlist(
@@ -598,7 +594,7 @@ class NetEaseMusicProgramIE(NetEaseMusicBaseIE):
             return {
                 'id': str(info['mainSong']['id']),
                 'formats': formats,
-                'duration': traverse_obj(info, ('mainSong', 'duration', {self._kilo_or_none})),
+                'duration': traverse_obj(info, ('mainSong', 'duration', {int_or_none(scale=1000)})),
                 **metainfo,
             }
 
diff --git a/yt_dlp/extractor/niconico.py b/yt_dlp/extractor/niconico.py
index 961dd0c5e..29fc1da1e 100644
--- a/yt_dlp/extractor/niconico.py
+++ b/yt_dlp/extractor/niconico.py
@@ -371,11 +371,11 @@ class NiconicoIE(InfoExtractor):
             'acodec': 'aac',
             'vcodec': 'h264',
             **traverse_obj(audio_quality, ('metadata', {
-                'abr': ('bitrate', {functools.partial(float_or_none, scale=1000)}),
+                'abr': ('bitrate', {float_or_none(scale=1000)}),
                 'asr': ('samplingRate', {int_or_none}),
             })),
             **traverse_obj(video_quality, ('metadata', {
-                'vbr': ('bitrate', {functools.partial(float_or_none, scale=1000)}),
+                'vbr': ('bitrate', {float_or_none(scale=1000)}),
                 'height': ('resolution', 'height', {int_or_none}),
                 'width': ('resolution', 'width', {int_or_none}),
             })),
@@ -428,7 +428,7 @@ class NiconicoIE(InfoExtractor):
                 **audio_fmt,
                 **traverse_obj(audios, (lambda _, v: audio_fmt['format_id'].startswith(v['id']), {
                     'format_id': ('id', {str}),
-                    'abr': ('bitRate', {functools.partial(float_or_none, scale=1000)}),
+                    'abr': ('bitRate', {float_or_none(scale=1000)}),
                     'asr': ('samplingRate', {int_or_none}),
                 }), get_all=False),
                 'acodec': 'aac',
diff --git a/yt_dlp/extractor/nubilesporn.py b/yt_dlp/extractor/nubilesporn.py
index c2079d8b0..47c7be61d 100644
--- a/yt_dlp/extractor/nubilesporn.py
+++ b/yt_dlp/extractor/nubilesporn.py
@@ -10,10 +10,10 @@ from ..utils import (
     get_element_html_by_class,
     get_elements_by_class,
     int_or_none,
-    try_call,
     unified_timestamp,
     urlencode_postdata,
 )
+from ..utils.traversal import find_element, find_elements, traverse_obj
 
 
 class NubilesPornIE(InfoExtractor):
@@ -70,9 +70,8 @@ class NubilesPornIE(InfoExtractor):
             url, get_element_by_class('watch-page-video-wrapper', page), video_id)[0]
 
         channel_id, channel_name = self._search_regex(
-            r'/video/website/(?P<id>\d+).+>(?P<name>\w+).com', get_element_html_by_class('site-link', page),
+            r'/video/website/(?P<id>\d+).+>(?P<name>\w+).com', get_element_html_by_class('site-link', page) or '',
             'channel', fatal=False, group=('id', 'name')) or (None, None)
-        channel_name = re.sub(r'([^A-Z]+)([A-Z]+)', r'\1 \2', channel_name)
 
         return {
             'id': video_id,
@@ -82,14 +81,14 @@ class NubilesPornIE(InfoExtractor):
             'thumbnail': media_entries.get('thumbnail'),
             'description': clean_html(get_element_html_by_class('content-pane-description', page)),
             'timestamp': unified_timestamp(get_element_by_class('date', page)),
-            'channel': channel_name,
+            'channel': re.sub(r'([^A-Z]+)([A-Z]+)', r'\1 \2', channel_name) if channel_name else None,
             'channel_id': channel_id,
             'channel_url': format_field(channel_id, None, 'https://members.nubiles-porn.com/video/website/%s'),
             'like_count': int_or_none(get_element_by_id('likecount', page)),
             'average_rating': float_or_none(get_element_by_class('score', page)),
             'age_limit': 18,
-            'categories': try_call(lambda: list(map(clean_html, get_elements_by_class('btn', get_element_by_class('categories', page))))),
-            'tags': try_call(lambda: list(map(clean_html, get_elements_by_class('btn', get_elements_by_class('tags', page)[1])))),
+            'categories': traverse_obj(page, ({find_element(cls='categories')}, {find_elements(cls='btn')}, ..., {clean_html})),
+            'tags': traverse_obj(page, ({find_elements(cls='tags')}, 1, {find_elements(cls='btn')}, ..., {clean_html})),
             'cast': get_elements_by_class('content-pane-performer', page),
             'availability': 'needs_auth',
             'series': channel_name,
diff --git a/yt_dlp/extractor/nytimes.py b/yt_dlp/extractor/nytimes.py
index 5ec3cdd67..9ef57410a 100644
--- a/yt_dlp/extractor/nytimes.py
+++ b/yt_dlp/extractor/nytimes.py
@@ -235,7 +235,7 @@ class NYTimesArticleIE(NYTimesBaseIE):
         details = traverse_obj(block, {
             'id': ('sourceId', {str}),
             'uploader': ('bylines', ..., 'renderedRepresentation', {str}),
-            'duration': (None, (('duration', {lambda x: float_or_none(x, scale=1000)}), ('length', {int_or_none}))),
+            'duration': (None, (('duration', {float_or_none(scale=1000)}), ('length', {int_or_none}))),
             'timestamp': ('firstPublished', {parse_iso8601}),
             'series': ('podcastSeries', {str}),
         }, get_all=False)
diff --git a/yt_dlp/extractor/ondemandkorea.py b/yt_dlp/extractor/ondemandkorea.py
index 591b4147e..1921f3fd8 100644
--- a/yt_dlp/extractor/ondemandkorea.py
+++ b/yt_dlp/extractor/ondemandkorea.py
@@ -115,7 +115,7 @@ class OnDemandKoreaIE(InfoExtractor):
             **traverse_obj(data, {
                 'thumbnail': ('episode', 'images', 'thumbnail', {url_or_none}),
                 'release_date': ('episode', 'release_date', {lambda x: x.replace('-', '')}, {unified_strdate}),
-                'duration': ('duration', {functools.partial(float_or_none, scale=1000)}),
+                'duration': ('duration', {float_or_none(scale=1000)}),
                 'age_limit': ('age_rating', 'name', {lambda x: x.replace('R', '')}, {parse_age_limit}),
                 'series': ('episode', {if_series(key='program')}, 'title'),
                 'series_id': ('episode', {if_series(key='program')}, 'id', {str_or_none}),
diff --git a/yt_dlp/extractor/orf.py b/yt_dlp/extractor/orf.py
index 9c37a54d6..12c4a2104 100644
--- a/yt_dlp/extractor/orf.py
+++ b/yt_dlp/extractor/orf.py
@@ -1,5 +1,4 @@
 import base64
-import functools
 import re
 
 from .common import InfoExtractor
@@ -192,7 +191,7 @@ class ORFPodcastIE(InfoExtractor):
                 'ext': ('enclosures', 0, 'type', {mimetype2ext}),
                 'title': 'title',
                 'description': ('description', {clean_html}),
-                'duration': ('duration', {functools.partial(float_or_none, scale=1000)}),
+                'duration': ('duration', {float_or_none(scale=1000)}),
                 'series': ('podcast', 'title'),
             })),
         }
@@ -494,7 +493,7 @@ class ORFONIE(InfoExtractor):
         return traverse_obj(api_json, {
             'id': ('id', {int}, {str_or_none}),
             'age_limit': ('age_classification', {parse_age_limit}),
-            'duration': ('exact_duration', {functools.partial(float_or_none, scale=1000)}),
+            'duration': ('exact_duration', {float_or_none(scale=1000)}),
             'title': (('title', 'headline'), {str}),
             'description': (('description', 'teaser_text'), {str}),
             'media_type': ('video_type', {str}),
diff --git a/yt_dlp/extractor/parler.py b/yt_dlp/extractor/parler.py
index 9be288a7d..e5bb3be4e 100644
--- a/yt_dlp/extractor/parler.py
+++ b/yt_dlp/extractor/parler.py
@@ -1,5 +1,3 @@
-import functools
-
 from .common import InfoExtractor
 from .youtube import YoutubeIE
 from ..utils import (
@@ -83,7 +81,7 @@ class ParlerIE(InfoExtractor):
                 'timestamp': ('date_created', {unified_timestamp}),
                 'uploader': ('user', 'name', {strip_or_none}),
                 'uploader_id': ('user', 'username', {str}),
-                'uploader_url': ('user', 'username', {functools.partial(urljoin, 'https://parler.com/')}),
+                'uploader_url': ('user', 'username', {urljoin('https://parler.com/')}),
                 'view_count': ('views', {int_or_none}),
                 'comment_count': ('total_comments', {int_or_none}),
                 'repost_count': ('echos', {int_or_none}),
diff --git a/yt_dlp/extractor/pornbox.py b/yt_dlp/extractor/pornbox.py
index 9b89adbf9..0996e4d97 100644
--- a/yt_dlp/extractor/pornbox.py
+++ b/yt_dlp/extractor/pornbox.py
@@ -1,4 +1,3 @@
-import functools
 
 from .common import InfoExtractor
 from ..utils import (
@@ -105,7 +104,7 @@ class PornboxIE(InfoExtractor):
         get_quality = qualities(['web', 'vga', 'hd', '1080p', '4k', '8k'])
         metadata['formats'] = traverse_obj(stream_data, ('qualities', lambda _, v: v['src'], {
             'url': 'src',
-            'vbr': ('bitrate', {functools.partial(int_or_none, scale=1000)}),
+            'vbr': ('bitrate', {int_or_none(scale=1000)}),
             'format_id': ('quality', {str_or_none}),
             'quality': ('quality', {get_quality}),
             'width': ('size', {lambda x: int(x[:-1])}),
diff --git a/yt_dlp/extractor/pr0gramm.py b/yt_dlp/extractor/pr0gramm.py
index b0d6475fe..d5d6ecdfd 100644
--- a/yt_dlp/extractor/pr0gramm.py
+++ b/yt_dlp/extractor/pr0gramm.py
@@ -198,6 +198,6 @@ class Pr0grammIE(InfoExtractor):
                 'dislike_count': ('down', {int}),
                 'timestamp': ('created', {int}),
                 'upload_date': ('created', {int}, {dt.date.fromtimestamp}, {lambda x: x.strftime('%Y%m%d')}),
-                'thumbnail': ('thumb', {lambda x: urljoin('https://thumb.pr0gramm.com', x)}),
+                'thumbnail': ('thumb', {urljoin('https://thumb.pr0gramm.com')}),
             }),
         }
diff --git a/yt_dlp/extractor/qdance.py b/yt_dlp/extractor/qdance.py
index 934ebbfd7..4f71657c3 100644
--- a/yt_dlp/extractor/qdance.py
+++ b/yt_dlp/extractor/qdance.py
@@ -140,7 +140,7 @@ class QDanceIE(InfoExtractor):
             'description': ('description', {str.strip}),
             'display_id': ('slug', {str}),
             'thumbnail': ('thumbnail', {url_or_none}),
-            'duration': ('durationInSeconds', {int_or_none}, {lambda x: x or None}),
+            'duration': ('durationInSeconds', {int_or_none}, filter),
             'availability': ('subscription', 'level', {extract_availability}),
             'is_live': ('type', {lambda x: x.lower() == 'live'}),
             'artist': ('acts', ..., {str}),
diff --git a/yt_dlp/extractor/qqmusic.py b/yt_dlp/extractor/qqmusic.py
index d0238692f..fb46e0d12 100644
--- a/yt_dlp/extractor/qqmusic.py
+++ b/yt_dlp/extractor/qqmusic.py
@@ -211,10 +211,10 @@ class QQMusicIE(QQMusicBaseIE):
             'formats': formats,
             **traverse_obj(info_data, {
                 'title': ('title', {str}),
-                'album': ('album', 'title', {str}, {lambda x: x or None}),
+                'album': ('album', 'title', {str}, filter),
                 'release_date': ('time_public', {lambda x: x.replace('-', '') or None}),
                 'creators': ('singer', ..., 'name', {str}),
-                'alt_title': ('subtitle', {str}, {lambda x: x or None}),
+                'alt_title': ('subtitle', {str}, filter),
                 'duration': ('interval', {int_or_none}),
             }),
             **traverse_obj(init_data, ('detail', {
diff --git a/yt_dlp/extractor/redge.py b/yt_dlp/extractor/redge.py
index 7cb91eea4..5ae09a096 100644
--- a/yt_dlp/extractor/redge.py
+++ b/yt_dlp/extractor/redge.py
@@ -1,4 +1,3 @@
-import functools
 
 from .common import InfoExtractor
 from ..networking import HEADRequest
@@ -118,7 +117,7 @@ class RedCDNLivxIE(InfoExtractor):
 
         time_scale = traverse_obj(ism_doc, ('@TimeScale', {int_or_none})) or 10000000
         duration = traverse_obj(
-            ism_doc, ('@Duration', {functools.partial(float_or_none, scale=time_scale)})) or None
+            ism_doc, ('@Duration', {float_or_none(scale=time_scale)})) or None
 
         live_status = None
         if traverse_obj(ism_doc, '@IsLive') == 'TRUE':
diff --git a/yt_dlp/extractor/rtvslo.py b/yt_dlp/extractor/rtvslo.py
index 9c2e6fb6b..49bebb178 100644
--- a/yt_dlp/extractor/rtvslo.py
+++ b/yt_dlp/extractor/rtvslo.py
@@ -187,4 +187,4 @@ class RTVSLOShowIE(InfoExtractor):
         return self.playlist_from_matches(
             re.findall(r'<a [^>]*\bhref="(/arhiv/[^"]+)"', webpage),
             playlist_id, self._html_extract_title(webpage),
-            getter=lambda x: urljoin('https://365.rtvslo.si', x), ie=RTVSLOIE)
+            getter=urljoin('https://365.rtvslo.si'), ie=RTVSLOIE)
diff --git a/yt_dlp/extractor/snapchat.py b/yt_dlp/extractor/snapchat.py
index 732677c19..09e5766d4 100644
--- a/yt_dlp/extractor/snapchat.py
+++ b/yt_dlp/extractor/snapchat.py
@@ -56,13 +56,13 @@ class SnapchatSpotlightIE(InfoExtractor):
             **traverse_obj(video_data, ('videoMetadata', {
                 'title': ('name', {str}),
                 'description': ('description', {str}),
-                'timestamp': ('uploadDateMs', {lambda x: float_or_none(x, 1000)}),
+                'timestamp': ('uploadDateMs', {float_or_none(scale=1000)}),
                 'view_count': ('viewCount', {int_or_none}, {lambda x: None if x == -1 else x}),
                 'repost_count': ('shareCount', {int_or_none}),
                 'url': ('contentUrl', {url_or_none}),
                 'width': ('width', {int_or_none}),
                 'height': ('height', {int_or_none}),
-                'duration': ('durationMs', {lambda x: float_or_none(x, 1000)}),
+                'duration': ('durationMs', {float_or_none(scale=1000)}),
                 'thumbnail': ('thumbnailUrl', {url_or_none}),
                 'uploader': ('creator', 'personCreator', 'username', {str}),
                 'uploader_url': ('creator', 'personCreator', 'url', {url_or_none}),
diff --git a/yt_dlp/extractor/tbsjp.py b/yt_dlp/extractor/tbsjp.py
index 32f9cfbde..0d521f106 100644
--- a/yt_dlp/extractor/tbsjp.py
+++ b/yt_dlp/extractor/tbsjp.py
@@ -3,14 +3,12 @@ from ..networking.exceptions import HTTPError
 from ..utils import (
     ExtractorError,
     clean_html,
-    get_element_text_and_html_by_tag,
     int_or_none,
     str_or_none,
-    traverse_obj,
-    try_call,
     unified_timestamp,
     urljoin,
 )
+from ..utils.traversal import find_element, traverse_obj
 
 
 class TBSJPEpisodeIE(InfoExtractor):
@@ -64,7 +62,7 @@ class TBSJPEpisodeIE(InfoExtractor):
             self._merge_subtitles(subs, target=subtitles)
 
         return {
-            'title': try_call(lambda: clean_html(get_element_text_and_html_by_tag('h3', webpage)[0])),
+            'title': traverse_obj(webpage, ({find_element(tag='h3')}, {clean_html})),
             'id': video_id,
             **traverse_obj(episode, {
                 'categories': ('keywords', {list}),
diff --git a/yt_dlp/extractor/teamcoco.py b/yt_dlp/extractor/teamcoco.py
index 3fb899cac..a94ff9b33 100644
--- a/yt_dlp/extractor/teamcoco.py
+++ b/yt_dlp/extractor/teamcoco.py
@@ -136,7 +136,7 @@ class TeamcocoIE(TeamcocoBaseIE):
             'blocks', lambda _, v: v['name'] in ('meta-tags', 'video-player', 'video-info'), 'props', {dict})))
 
         thumbnail = traverse_obj(
-            info, (('image', 'poster'), {lambda x: urljoin('https://teamcoco.com/', x)}), get_all=False)
+            info, (('image', 'poster'), {urljoin('https://teamcoco.com/')}), get_all=False)
         video_id = traverse_obj(parse_qs(thumbnail), ('id', 0)) or display_id
 
         formats, subtitles = self._get_formats_and_subtitles(info, video_id)
diff --git a/yt_dlp/extractor/telewebion.py b/yt_dlp/extractor/telewebion.py
index b65116024..02a6ea85b 100644
--- a/yt_dlp/extractor/telewebion.py
+++ b/yt_dlp/extractor/telewebion.py
@@ -10,10 +10,11 @@ from ..utils.traversal import traverse_obj
 
 
 def _fmt_url(url):
-    return functools.partial(format_field, template=url, default=None)
+    return format_field(template=url, default=None)
 
 
 class TelewebionIE(InfoExtractor):
+    _WORKING = False
     _VALID_URL = r'https?://(?:www\.)?telewebion\.com/episode/(?P<id>(?:0x[a-fA-F\d]+|\d+))'
     _TESTS = [{
         'url': 'http://www.telewebion.com/episode/0x1b3139c/',
diff --git a/yt_dlp/extractor/tencent.py b/yt_dlp/extractor/tencent.py
index fc2b07ac2..b281ad1a9 100644
--- a/yt_dlp/extractor/tencent.py
+++ b/yt_dlp/extractor/tencent.py
@@ -1,4 +1,3 @@
-import functools
 import random
 import re
 import string
@@ -278,7 +277,7 @@ class VQQSeriesIE(VQQBaseIE):
             webpage)]
 
         return self.playlist_from_matches(
-            episode_paths, series_id, ie=VQQVideoIE, getter=functools.partial(urljoin, url),
+            episode_paths, series_id, ie=VQQVideoIE, getter=urljoin(url),
             title=self._get_clean_title(traverse_obj(webpage_metadata, ('coverInfo', 'title'))
                                         or self._og_search_title(webpage)),
             description=(traverse_obj(webpage_metadata, ('coverInfo', 'description'))
@@ -328,7 +327,7 @@ class WeTvBaseIE(TencentBaseIE):
                          or re.findall(r'<a[^>]+class="play-video__link"[^>]+href="(?P<path>[^"]+)', webpage))
 
         return self.playlist_from_matches(
-            episode_paths, series_id, ie=ie, getter=functools.partial(urljoin, url),
+            episode_paths, series_id, ie=ie, getter=urljoin(url),
             title=self._get_clean_title(traverse_obj(webpage_metadata, ('coverInfo', 'title'))
                                         or self._og_search_title(webpage)),
             description=(traverse_obj(webpage_metadata, ('coverInfo', 'description'))
diff --git a/yt_dlp/extractor/tenplay.py b/yt_dlp/extractor/tenplay.py
index 07db58347..cc7bc3b2f 100644
--- a/yt_dlp/extractor/tenplay.py
+++ b/yt_dlp/extractor/tenplay.py
@@ -1,4 +1,3 @@
-import functools
 import itertools
 
 from .common import InfoExtractor
@@ -161,4 +160,4 @@ class TenPlaySeasonIE(InfoExtractor):
         return self.playlist_from_matches(
             self._entries(urljoin(url, episodes_carousel['loadMoreUrl']), playlist_id),
             playlist_id, traverse_obj(season_info, ('content', 0, 'title', {str})),
-            getter=functools.partial(urljoin, url))
+            getter=urljoin(url))
diff --git a/yt_dlp/extractor/theguardian.py b/yt_dlp/extractor/theguardian.py
index a9e499064..7e8f9fef2 100644
--- a/yt_dlp/extractor/theguardian.py
+++ b/yt_dlp/extractor/theguardian.py
@@ -131,4 +131,4 @@ class TheGuardianPodcastPlaylistIE(InfoExtractor):
 
         return self.playlist_from_matches(
             self._entries(url, podcast_id), podcast_id, title, description=description,
-            ie=TheGuardianPodcastIE, getter=lambda x: urljoin('https://www.theguardian.com', x))
+            ie=TheGuardianPodcastIE, getter=urljoin('https://www.theguardian.com'))
diff --git a/yt_dlp/extractor/tiktok.py b/yt_dlp/extractor/tiktok.py
index f7e103fe9..ba15f08b6 100644
--- a/yt_dlp/extractor/tiktok.py
+++ b/yt_dlp/extractor/tiktok.py
@@ -469,7 +469,7 @@ class TikTokBaseIE(InfoExtractor):
                 aweme_detail, aweme_id, traverse_obj(author_info, 'uploader', 'uploader_id', 'channel_id')),
             'thumbnails': thumbnails,
             'duration': (traverse_obj(video_info, (
-                (None, 'download_addr'), 'duration', {functools.partial(int_or_none, scale=1000)}, any))
+                (None, 'download_addr'), 'duration', {int_or_none(scale=1000)}, any))
                 or traverse_obj(music_info, ('duration', {int_or_none}))),
             'availability': self._availability(
                 is_private='Private' in labels,
@@ -583,7 +583,7 @@ class TikTokBaseIE(InfoExtractor):
                 author_info, ['uploader', 'uploader_id'], self._UPLOADER_URL_FORMAT, default=None),
             **traverse_obj(aweme_detail, ('music', {
                 'track': ('title', {str}),
-                'album': ('album', {str}, {lambda x: x or None}),
+                'album': ('album', {str}, filter),
                 'artists': ('authorName', {str}, {lambda x: re.split(r'(?:, | & )', x) if x else None}),
                 'duration': ('duration', {int_or_none}),
             })),
@@ -591,7 +591,7 @@ class TikTokBaseIE(InfoExtractor):
                 'title': ('desc', {str}),
                 'description': ('desc', {str}),
                 # audio-only slideshows have a video duration of 0 and an actual audio duration
-                'duration': ('video', 'duration', {int_or_none}, {lambda x: x or None}),
+                'duration': ('video', 'duration', {int_or_none}, filter),
                 'timestamp': ('createTime', {int_or_none}),
             }),
             **traverse_obj(aweme_detail, ('stats', {
@@ -1493,7 +1493,7 @@ class TikTokLiveIE(TikTokBaseIE):
 
             sdk_params = traverse_obj(stream, ('main', 'sdk_params', {parse_inner}, {
                 'vcodec': ('VCodec', {str}),
-                'tbr': ('vbitrate', {lambda x: int_or_none(x, 1000)}),
+                'tbr': ('vbitrate', {int_or_none(scale=1000)}),
                 'resolution': ('resolution', {lambda x: re.match(r'(?i)\d+x\d+|\d+p', x).group().lower()}),
             }))
 
diff --git a/yt_dlp/extractor/tva.py b/yt_dlp/extractor/tva.py
index d702640f3..48c4e9cba 100644
--- a/yt_dlp/extractor/tva.py
+++ b/yt_dlp/extractor/tva.py
@@ -1,4 +1,3 @@
-import functools
 import re
 
 from .brightcove import BrightcoveNewIE
@@ -68,7 +67,7 @@ class TVAIE(InfoExtractor):
             'episode': episode,
             **traverse_obj(entity, {
                 'description': ('longDescription', {str}),
-                'duration': ('durationMillis', {functools.partial(float_or_none, scale=1000)}),
+                'duration': ('durationMillis', {float_or_none(scale=1000)}),
                 'channel': ('knownEntities', 'channel', 'name', {str}),
                 'series': ('knownEntities', 'videoShow', 'name', {str}),
                 'season_number': ('slug', {lambda x: re.search(r'/s(?:ai|ea)son-(\d+)/', x)}, 1, {int_or_none}),
diff --git a/yt_dlp/extractor/vidyard.py b/yt_dlp/extractor/vidyard.py
index 20a54b161..2f6d1f4c5 100644
--- a/yt_dlp/extractor/vidyard.py
+++ b/yt_dlp/extractor/vidyard.py
@@ -1,4 +1,3 @@
-import functools
 import re
 
 from .common import InfoExtractor
@@ -72,9 +71,9 @@ class VidyardBaseIE(InfoExtractor):
                 'id': ('facadeUuid', {str}),
                 'display_id': ('videoId', {int}, {str_or_none}),
                 'title': ('name', {str}),
-                'description': ('description', {str}, {unescapeHTML}, {lambda x: x or None}),
+                'description': ('description', {str}, {unescapeHTML}, filter),
                 'duration': ((
-                    ('milliseconds', {functools.partial(float_or_none, scale=1000)}),
+                    ('milliseconds', {float_or_none(scale=1000)}),
                     ('seconds', {int_or_none})), any),
                 'thumbnails': ('thumbnailUrls', ('small', 'normal'), {'url': {url_or_none}}),
                 'tags': ('tags', ..., 'name', {str}),
diff --git a/yt_dlp/extractor/vrt.py b/yt_dlp/extractor/vrt.py
index 33ff57475..9345ca962 100644
--- a/yt_dlp/extractor/vrt.py
+++ b/yt_dlp/extractor/vrt.py
@@ -1,4 +1,3 @@
-import functools
 import json
 import time
 import urllib.parse
@@ -171,7 +170,7 @@ class VRTIE(VRTBaseIE):
             **traverse_obj(data, {
                 'title': ('title', {str}),
                 'description': ('shortDescription', {str}),
-                'duration': ('duration', {functools.partial(float_or_none, scale=1000)}),
+                'duration': ('duration', {float_or_none(scale=1000)}),
                 'thumbnail': ('posterImageUrl', {url_or_none}),
             }),
         }
diff --git a/yt_dlp/extractor/weibo.py b/yt_dlp/extractor/weibo.py
index b5c0e926f..e632858e5 100644
--- a/yt_dlp/extractor/weibo.py
+++ b/yt_dlp/extractor/weibo.py
@@ -67,7 +67,7 @@ class WeiboBaseIE(InfoExtractor):
                 'format': ('quality_desc', {str}),
                 'format_id': ('label', {str}),
                 'ext': ('mime', {mimetype2ext}),
-                'tbr': ('bitrate', {int_or_none}, {lambda x: x or None}),
+                'tbr': ('bitrate', {int_or_none}, filter),
                 'vcodec': ('video_codecs', {str}),
                 'fps': ('fps', {int_or_none}),
                 'width': ('width', {int_or_none}),
@@ -107,14 +107,14 @@ class WeiboBaseIE(InfoExtractor):
             **traverse_obj(video_info, {
                 'id': (('id', 'id_str', 'mid'), {str_or_none}),
                 'display_id': ('mblogid', {str_or_none}),
-                'title': ('page_info', 'media_info', ('video_title', 'kol_title', 'name'), {str}, {lambda x: x or None}),
+                'title': ('page_info', 'media_info', ('video_title', 'kol_title', 'name'), {str}, filter),
                 'description': ('text_raw', {str}),
                 'duration': ('page_info', 'media_info', 'duration', {int_or_none}),
                 'timestamp': ('page_info', 'media_info', 'video_publish_time', {int_or_none}),
                 'thumbnail': ('page_info', 'page_pic', {url_or_none}),
                 'uploader': ('user', 'screen_name', {str}),
                 'uploader_id': ('user', ('id', 'id_str'), {str_or_none}),
-                'uploader_url': ('user', 'profile_url', {lambda x: urljoin('https://weibo.com/', x)}),
+                'uploader_url': ('user', 'profile_url', {urljoin('https://weibo.com/')}),
                 'view_count': ('page_info', 'media_info', 'online_users_number', {int_or_none}),
                 'like_count': ('attitudes_count', {int_or_none}),
                 'repost_count': ('reposts_count', {int_or_none}),
diff --git a/yt_dlp/extractor/weverse.py b/yt_dlp/extractor/weverse.py
index 6f1a8b95d..53ad1100d 100644
--- a/yt_dlp/extractor/weverse.py
+++ b/yt_dlp/extractor/weverse.py
@@ -159,8 +159,8 @@ class WeverseBaseIE(InfoExtractor):
             'creators': ('community', 'communityName', {str}, all),
             'channel_id': (('community', 'author'), 'communityId', {str_or_none}),
             'duration': ('extension', 'video', 'playTime', {float_or_none}),
-            'timestamp': ('publishedAt', {lambda x: int_or_none(x, 1000)}),
-            'release_timestamp': ('extension', 'video', 'onAirStartAt', {lambda x: int_or_none(x, 1000)}),
+            'timestamp': ('publishedAt', {int_or_none(scale=1000)}),
+            'release_timestamp': ('extension', 'video', 'onAirStartAt', {int_or_none(scale=1000)}),
             'thumbnail': ('extension', (('mediaInfo', 'thumbnail', 'url'), ('video', 'thumb')), {url_or_none}),
             'view_count': ('extension', 'video', 'playCount', {int_or_none}),
             'like_count': ('extension', 'video', 'likeCount', {int_or_none}),
@@ -469,7 +469,7 @@ class WeverseMomentIE(WeverseBaseIE):
                 'creator': (('community', 'author'), 'communityName', {str}),
                 'channel_id': (('community', 'author'), 'communityId', {str_or_none}),
                 'duration': ('extension', 'moment', 'video', 'uploadInfo', 'playTime', {float_or_none}),
-                'timestamp': ('publishedAt', {lambda x: int_or_none(x, 1000)}),
+                'timestamp': ('publishedAt', {int_or_none(scale=1000)}),
                 'thumbnail': ('extension', 'moment', 'video', 'uploadInfo', 'imageUrl', {url_or_none}),
                 'like_count': ('emotionCount', {int_or_none}),
                 'comment_count': ('commentCount', {int_or_none}),
diff --git a/yt_dlp/extractor/wevidi.py b/yt_dlp/extractor/wevidi.py
index 0db52af43..88b394fa2 100644
--- a/yt_dlp/extractor/wevidi.py
+++ b/yt_dlp/extractor/wevidi.py
@@ -78,7 +78,7 @@ class WeVidiIE(InfoExtractor):
         }
 
         src_path = f'{wvplayer_props["srcVID"]}/{wvplayer_props["srcUID"]}/{wvplayer_props["srcNAME"]}'
-        for res in traverse_obj(wvplayer_props, ('resolutions', ..., {int}, {lambda x: x or None})):
+        for res in traverse_obj(wvplayer_props, ('resolutions', ..., {int}, filter)):
             format_id = str(-(res // -2) - 1)
             yield {
                 'acodec': 'mp4a.40.2',
diff --git a/yt_dlp/extractor/xiaohongshu.py b/yt_dlp/extractor/xiaohongshu.py
index 00c6ed7c5..1280ca6a9 100644
--- a/yt_dlp/extractor/xiaohongshu.py
+++ b/yt_dlp/extractor/xiaohongshu.py
@@ -1,4 +1,3 @@
-import functools
 
 from .common import InfoExtractor
 from ..utils import (
@@ -51,7 +50,7 @@ class XiaoHongShuIE(InfoExtractor):
                 'tbr': ('avgBitrate', {int_or_none}),
                 'format': ('qualityType', {str}),
                 'filesize': ('size', {int_or_none}),
-                'duration': ('duration', {functools.partial(float_or_none, scale=1000)}),
+                'duration': ('duration', {float_or_none(scale=1000)}),
             })
 
             formats.extend(traverse_obj(info, (('mediaUrl', ('backupUrls', ...)), {
diff --git a/yt_dlp/extractor/youporn.py b/yt_dlp/extractor/youporn.py
index 4a00dfe9c..8eb77aa03 100644
--- a/yt_dlp/extractor/youporn.py
+++ b/yt_dlp/extractor/youporn.py
@@ -247,7 +247,7 @@ class YouPornListBase(InfoExtractor):
             if not html:
                 return
             for element in get_elements_html_by_class('video-title', html):
-                if video_url := traverse_obj(element, ({extract_attributes}, 'href', {lambda x: urljoin(url, x)})):
+                if video_url := traverse_obj(element, ({extract_attributes}, 'href', {urljoin(url)})):
                     yield self.url_result(video_url)
 
             if page_num is not None:
diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py
index 88c032cdb..caa99182a 100644
--- a/yt_dlp/extractor/youtube.py
+++ b/yt_dlp/extractor/youtube.py
@@ -3611,7 +3611,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             'frameworkUpdates', 'entityBatchUpdate', 'mutations',
             lambda _, v: v['payload']['macroMarkersListEntity']['markersList']['markerType'] == 'MARKER_TYPE_HEATMAP',
             'payload', 'macroMarkersListEntity', 'markersList', 'markers', ..., {
-                'start_time': ('startMillis', {functools.partial(float_or_none, scale=1000)}),
+                'start_time': ('startMillis', {float_or_none(scale=1000)}),
                 'end_time': {lambda x: (int(x['startMillis']) + int(x['durationMillis'])) / 1000},
                 'value': ('intensityScoreNormalized', {float_or_none}),
             })) or None
@@ -3637,7 +3637,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'author_is_verified': ('author', 'isVerified', {bool}),
                 'author_url': ('author', 'channelCommand', 'innertubeCommand', (
                     ('browseEndpoint', 'canonicalBaseUrl'), ('commandMetadata', 'webCommandMetadata', 'url'),
-                ), {lambda x: urljoin('https://www.youtube.com', x)}),
+                ), {urljoin('https://www.youtube.com')}),
             }, get_all=False),
             'is_favorited': (None if toolbar_entity_payload is None else
                              toolbar_entity_payload.get('heartState') == 'TOOLBAR_HEART_STATE_HEARTED'),
@@ -4304,7 +4304,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                     continue
 
             tbr = float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
-            format_duration = traverse_obj(fmt, ('approxDurationMs', {lambda x: float_or_none(x, 1000)}))
+            format_duration = traverse_obj(fmt, ('approxDurationMs', {float_or_none(scale=1000)}))
             # Some formats may have much smaller duration than others (possibly damaged during encoding)
             # E.g. 2-nOtRESiUc Ref: https://github.com/yt-dlp/yt-dlp/issues/2823
             # Make sure to avoid false positives with small duration differences.
diff --git a/yt_dlp/extractor/zaiko.py b/yt_dlp/extractor/zaiko.py
index 4563b7ba0..13ce5de12 100644
--- a/yt_dlp/extractor/zaiko.py
+++ b/yt_dlp/extractor/zaiko.py
@@ -109,7 +109,7 @@ class ZaikoIE(ZaikoBaseIE):
                 'uploader': ('profile', 'name', {str}),
                 'uploader_id': ('profile', 'id', {str_or_none}),
                 'release_timestamp': ('stream', 'start', 'timestamp', {int_or_none}),
-                'categories': ('event', 'genres', ..., {lambda x: x or None}),
+                'categories': ('event', 'genres', ..., filter),
             }),
             'alt_title': traverse_obj(initial_event_info, ('title', {str})),
             'thumbnails': [{'url': url, 'id': url_basename(url)} for url in thumbnail_urls if url_or_none(url)],
diff --git a/yt_dlp/options.py b/yt_dlp/options.py
index 8eb5f2a56..6c6a0b3f9 100644
--- a/yt_dlp/options.py
+++ b/yt_dlp/options.py
@@ -700,7 +700,8 @@ def create_parser():
     selection.add_option(
         '--break-on-existing',
         action='store_true', dest='break_on_existing', default=False,
-        help='Stop the download process when encountering a file that is in the archive')
+        help='Stop the download process when encountering a file that is in the archive '
+             'supplied with the --download-archive option')
     selection.add_option(
         '--no-break-on-existing',
         action='store_false', dest='break_on_existing',
diff --git a/yt_dlp/utils/_utils.py b/yt_dlp/utils/_utils.py
index 844818e38..b28bb555e 100644
--- a/yt_dlp/utils/_utils.py
+++ b/yt_dlp/utils/_utils.py
@@ -5142,6 +5142,7 @@ class _UnsafeExtensionError(Exception):
         'rm',
         'swf',
         'ts',
+        'vid',
         'vob',
         'vp9',
 
@@ -5174,6 +5175,7 @@ class _UnsafeExtensionError(Exception):
         'heic',
         'ico',
         'image',
+        'jfif',
         'jng',
         'jpe',
         'jpeg',