Give it up for day 1 of smooth brain shenanigans! https://pbs.twimg.com/media/E1bxikWWEAEGYHU.png
commit
67dce51f45
|
@ -0,0 +1,3 @@
|
||||||
|
# Default ignored files
|
||||||
|
/shelf/
|
||||||
|
/workspace.xml
|
|
@ -0,0 +1,10 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<module type="PYTHON_MODULE" version="4">
|
||||||
|
<component name="NewModuleRootManager">
|
||||||
|
<content url="file://$MODULE_DIR$">
|
||||||
|
<excludeFolder url="file://$MODULE_DIR$/venv" />
|
||||||
|
</content>
|
||||||
|
<orderEntry type="jdk" jdkName="Python 3.8 (aoc-2022)" jdkType="Python SDK" />
|
||||||
|
<orderEntry type="sourceFolder" forTests="false" />
|
||||||
|
</component>
|
||||||
|
</module>
|
|
@ -0,0 +1,6 @@
|
||||||
|
<component name="InspectionProjectProfileManager">
|
||||||
|
<settings>
|
||||||
|
<option name="USE_PROJECT_PROFILE" value="false" />
|
||||||
|
<version value="1.0" />
|
||||||
|
</settings>
|
||||||
|
</component>
|
|
@ -0,0 +1,4 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project version="4">
|
||||||
|
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (aoc-2022)" project-jdk-type="Python SDK" />
|
||||||
|
</project>
|
|
@ -0,0 +1,8 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project version="4">
|
||||||
|
<component name="ProjectModuleManager">
|
||||||
|
<modules>
|
||||||
|
<module fileurl="file://$PROJECT_DIR$/.idea/aoc-2022.iml" filepath="$PROJECT_DIR$/.idea/aoc-2022.iml" />
|
||||||
|
</modules>
|
||||||
|
</component>
|
||||||
|
</project>
|
|
@ -0,0 +1,6 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project version="4">
|
||||||
|
<component name="VcsDirectoryMappings">
|
||||||
|
<mapping directory="$PROJECT_DIR$" vcs="Git" />
|
||||||
|
</component>
|
||||||
|
</project>
|
|
@ -0,0 +1,25 @@
|
||||||
|
import os
|
||||||
|
import aocd
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
os.environ["AOC_SESSION"]='53616c7465645f5fde65f3dbf32f917afa58e3e337ef5b813300ee2768c7c808ba404009fe240d8c2ed2f84b9d0799b0ea54c934f5533a0555f920ca0d59c46b'
|
||||||
|
puzzle_input = aocd.get_data(day=1, year=2022)
|
||||||
|
|
||||||
|
puzzle_input_list = str(puzzle_input).split('\n')
|
||||||
|
elf_df = pd.DataFrame(columns=['Name','Total Calories'])
|
||||||
|
|
||||||
|
reindeer_total = 0
|
||||||
|
elf_id = 0
|
||||||
|
|
||||||
|
for calorie_count in puzzle_input_list:
|
||||||
|
try:
|
||||||
|
reindeer_total = reindeer_total + int(calorie_count)
|
||||||
|
except:
|
||||||
|
elf_id = elf_id + 1
|
||||||
|
elf_name = f'elf_{elf_id}'
|
||||||
|
elf_df.loc[len(elf_df.index)] = elf_name,reindeer_total
|
||||||
|
reindeer_total = 0
|
||||||
|
|
||||||
|
top_3_chonkers = elf_df['Total Calories'].nlargest(3).sum(numeric_only=True)
|
||||||
|
|
||||||
|
print(f'The three chonkiest elves ate {top_3_chonkers:,} calories, what heccin chonkers.')
|
|
@ -0,0 +1 @@
|
||||||
|
pip
|
|
@ -0,0 +1,165 @@
|
||||||
|
GNU LESSER GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 29 June 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
|
||||||
|
This version of the GNU Lesser General Public License incorporates
|
||||||
|
the terms and conditions of version 3 of the GNU General Public
|
||||||
|
License, supplemented by the additional permissions listed below.
|
||||||
|
|
||||||
|
0. Additional Definitions.
|
||||||
|
|
||||||
|
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||||
|
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||||
|
General Public License.
|
||||||
|
|
||||||
|
"The Library" refers to a covered work governed by this License,
|
||||||
|
other than an Application or a Combined Work as defined below.
|
||||||
|
|
||||||
|
An "Application" is any work that makes use of an interface provided
|
||||||
|
by the Library, but which is not otherwise based on the Library.
|
||||||
|
Defining a subclass of a class defined by the Library is deemed a mode
|
||||||
|
of using an interface provided by the Library.
|
||||||
|
|
||||||
|
A "Combined Work" is a work produced by combining or linking an
|
||||||
|
Application with the Library. The particular version of the Library
|
||||||
|
with which the Combined Work was made is also called the "Linked
|
||||||
|
Version".
|
||||||
|
|
||||||
|
The "Minimal Corresponding Source" for a Combined Work means the
|
||||||
|
Corresponding Source for the Combined Work, excluding any source code
|
||||||
|
for portions of the Combined Work that, considered in isolation, are
|
||||||
|
based on the Application, and not on the Linked Version.
|
||||||
|
|
||||||
|
The "Corresponding Application Code" for a Combined Work means the
|
||||||
|
object code and/or source code for the Application, including any data
|
||||||
|
and utility programs needed for reproducing the Combined Work from the
|
||||||
|
Application, but excluding the System Libraries of the Combined Work.
|
||||||
|
|
||||||
|
1. Exception to Section 3 of the GNU GPL.
|
||||||
|
|
||||||
|
You may convey a covered work under sections 3 and 4 of this License
|
||||||
|
without being bound by section 3 of the GNU GPL.
|
||||||
|
|
||||||
|
2. Conveying Modified Versions.
|
||||||
|
|
||||||
|
If you modify a copy of the Library, and, in your modifications, a
|
||||||
|
facility refers to a function or data to be supplied by an Application
|
||||||
|
that uses the facility (other than as an argument passed when the
|
||||||
|
facility is invoked), then you may convey a copy of the modified
|
||||||
|
version:
|
||||||
|
|
||||||
|
a) under this License, provided that you make a good faith effort to
|
||||||
|
ensure that, in the event an Application does not supply the
|
||||||
|
function or data, the facility still operates, and performs
|
||||||
|
whatever part of its purpose remains meaningful, or
|
||||||
|
|
||||||
|
b) under the GNU GPL, with none of the additional permissions of
|
||||||
|
this License applicable to that copy.
|
||||||
|
|
||||||
|
3. Object Code Incorporating Material from Library Header Files.
|
||||||
|
|
||||||
|
The object code form of an Application may incorporate material from
|
||||||
|
a header file that is part of the Library. You may convey such object
|
||||||
|
code under terms of your choice, provided that, if the incorporated
|
||||||
|
material is not limited to numerical parameters, data structure
|
||||||
|
layouts and accessors, or small macros, inline functions and templates
|
||||||
|
(ten or fewer lines in length), you do both of the following:
|
||||||
|
|
||||||
|
a) Give prominent notice with each copy of the object code that the
|
||||||
|
Library is used in it and that the Library and its use are
|
||||||
|
covered by this License.
|
||||||
|
|
||||||
|
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||||
|
document.
|
||||||
|
|
||||||
|
4. Combined Works.
|
||||||
|
|
||||||
|
You may convey a Combined Work under terms of your choice that,
|
||||||
|
taken together, effectively do not restrict modification of the
|
||||||
|
portions of the Library contained in the Combined Work and reverse
|
||||||
|
engineering for debugging such modifications, if you also do each of
|
||||||
|
the following:
|
||||||
|
|
||||||
|
a) Give prominent notice with each copy of the Combined Work that
|
||||||
|
the Library is used in it and that the Library and its use are
|
||||||
|
covered by this License.
|
||||||
|
|
||||||
|
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||||
|
document.
|
||||||
|
|
||||||
|
c) For a Combined Work that displays copyright notices during
|
||||||
|
execution, include the copyright notice for the Library among
|
||||||
|
these notices, as well as a reference directing the user to the
|
||||||
|
copies of the GNU GPL and this license document.
|
||||||
|
|
||||||
|
d) Do one of the following:
|
||||||
|
|
||||||
|
0) Convey the Minimal Corresponding Source under the terms of this
|
||||||
|
License, and the Corresponding Application Code in a form
|
||||||
|
suitable for, and under terms that permit, the user to
|
||||||
|
recombine or relink the Application with a modified version of
|
||||||
|
the Linked Version to produce a modified Combined Work, in the
|
||||||
|
manner specified by section 6 of the GNU GPL for conveying
|
||||||
|
Corresponding Source.
|
||||||
|
|
||||||
|
1) Use a suitable shared library mechanism for linking with the
|
||||||
|
Library. A suitable mechanism is one that (a) uses at run time
|
||||||
|
a copy of the Library already present on the user's computer
|
||||||
|
system, and (b) will operate properly with a modified version
|
||||||
|
of the Library that is interface-compatible with the Linked
|
||||||
|
Version.
|
||||||
|
|
||||||
|
e) Provide Installation Information, but only if you would otherwise
|
||||||
|
be required to provide such information under section 6 of the
|
||||||
|
GNU GPL, and only to the extent that such information is
|
||||||
|
necessary to install and execute a modified version of the
|
||||||
|
Combined Work produced by recombining or relinking the
|
||||||
|
Application with a modified version of the Linked Version. (If
|
||||||
|
you use option 4d0, the Installation Information must accompany
|
||||||
|
the Minimal Corresponding Source and Corresponding Application
|
||||||
|
Code. If you use option 4d1, you must provide the Installation
|
||||||
|
Information in the manner specified by section 6 of the GNU GPL
|
||||||
|
for conveying Corresponding Source.)
|
||||||
|
|
||||||
|
5. Combined Libraries.
|
||||||
|
|
||||||
|
You may place library facilities that are a work based on the
|
||||||
|
Library side by side in a single library together with other library
|
||||||
|
facilities that are not Applications and are not covered by this
|
||||||
|
License, and convey such a combined library under terms of your
|
||||||
|
choice, if you do both of the following:
|
||||||
|
|
||||||
|
a) Accompany the combined library with a copy of the same work based
|
||||||
|
on the Library, uncombined with any other library facilities,
|
||||||
|
conveyed under the terms of this License.
|
||||||
|
|
||||||
|
b) Give prominent notice with the combined library that part of it
|
||||||
|
is a work based on the Library, and explaining where to find the
|
||||||
|
accompanying uncombined form of the same work.
|
||||||
|
|
||||||
|
6. Revised Versions of the GNU Lesser General Public License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions
|
||||||
|
of the GNU Lesser General Public License from time to time. Such new
|
||||||
|
versions will be similar in spirit to the present version, but may
|
||||||
|
differ in detail to address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Library as you received it specifies that a certain numbered version
|
||||||
|
of the GNU Lesser General Public License "or any later version"
|
||||||
|
applies to it, you have the option of following the terms and
|
||||||
|
conditions either of that published version or of any later version
|
||||||
|
published by the Free Software Foundation. If the Library as you
|
||||||
|
received it does not specify a version number of the GNU Lesser
|
||||||
|
General Public License, you may choose any version of the GNU Lesser
|
||||||
|
General Public License ever published by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Library as you received it specifies that a proxy can decide
|
||||||
|
whether future versions of the GNU Lesser General Public License shall
|
||||||
|
apply, that proxy's public statement of acceptance of any version is
|
||||||
|
permanent authorization for you to choose that version for the
|
||||||
|
Library.
|
|
@ -0,0 +1,122 @@
|
||||||
|
Metadata-Version: 2.1
|
||||||
|
Name: Pebble
|
||||||
|
Version: 5.0.3
|
||||||
|
Summary: Threading and multiprocessing eye-candy.
|
||||||
|
Home-page: https://github.com/noxdafox/pebble
|
||||||
|
Author: Matteo Cafasso
|
||||||
|
Author-email: noxdafox@gmail.com
|
||||||
|
License: LGPL
|
||||||
|
Keywords: thread process pool decorator
|
||||||
|
Platform: UNKNOWN
|
||||||
|
Classifier: Programming Language :: Python :: 3
|
||||||
|
Classifier: Development Status :: 5 - Production/Stable
|
||||||
|
Classifier: Intended Audience :: Developers
|
||||||
|
Classifier: Operating System :: OS Independent
|
||||||
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||||
|
Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
|
||||||
|
Requires-Python: >=3.6
|
||||||
|
|
||||||
|
Pebble
|
||||||
|
======
|
||||||
|
|
||||||
|
Pebble provides a neat API to manage threads and processes within an application.
|
||||||
|
|
||||||
|
:Source: https://github.com/noxdafox/pebble
|
||||||
|
:Documentation: https://pebble.readthedocs.io
|
||||||
|
:Download: https://pypi.org/project/Pebble/
|
||||||
|
|
||||||
|
|build badge| |docs badge| |downloads badge|
|
||||||
|
|
||||||
|
.. |build badge| image:: https://github.com/noxdafox/pebble/actions/workflows/action.yml/badge.svg
|
||||||
|
:target: https://github.com/noxdafox/pebble/actions/workflows/action.yml
|
||||||
|
:alt: Build Status
|
||||||
|
.. |docs badge| image:: https://readthedocs.org/projects/pebble/badge/?version=latest
|
||||||
|
:target: https://pebble.readthedocs.io
|
||||||
|
:alt: Documentation Status
|
||||||
|
.. |downloads badge| image:: https://img.shields.io/pypi/dm/pebble
|
||||||
|
:target: https://pypistats.org/packages/pebble
|
||||||
|
:alt: PyPI - Downloads
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
|
||||||
|
Run a job in a separate thread and wait for its results.
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
from pebble import concurrent
|
||||||
|
|
||||||
|
@concurrent.thread
|
||||||
|
def function(foo, bar=0):
|
||||||
|
return foo + bar
|
||||||
|
|
||||||
|
future = function(1, bar=2)
|
||||||
|
|
||||||
|
result = future.result() # blocks until results are ready
|
||||||
|
|
||||||
|
Same code with AsyncIO support.
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
from pebble import asynchronous
|
||||||
|
|
||||||
|
@asynchronous.thread
|
||||||
|
def function(foo, bar=0):
|
||||||
|
return foo + bar
|
||||||
|
|
||||||
|
async def asynchronous_function():
|
||||||
|
result = await function(1, bar=2) # blocks until results are ready
|
||||||
|
print(result)
|
||||||
|
|
||||||
|
asyncio.run(asynchronous_function())
|
||||||
|
|
||||||
|
Run a function with a timeout of ten seconds and deal with errors.
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
from pebble import concurrent
|
||||||
|
from concurrent.futures import TimeoutError
|
||||||
|
|
||||||
|
@concurrent.process(timeout=10)
|
||||||
|
def function(foo, bar=0):
|
||||||
|
return foo + bar
|
||||||
|
|
||||||
|
future = function(1, bar=2)
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = future.result() # blocks until results are ready
|
||||||
|
except TimeoutError as error:
|
||||||
|
print("Function took longer than %d seconds" % error.args[1])
|
||||||
|
except Exception as error:
|
||||||
|
print("Function raised %s" % error)
|
||||||
|
print(error.traceback) # traceback of the function
|
||||||
|
|
||||||
|
Pools support workers restart, timeout for long running tasks and more.
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
from pebble import ProcessPool
|
||||||
|
from concurrent.futures import TimeoutError
|
||||||
|
|
||||||
|
TIMEOUT_SECONDS = 3
|
||||||
|
|
||||||
|
def function(foo, bar=0):
|
||||||
|
return foo + bar
|
||||||
|
|
||||||
|
def task_done(future):
|
||||||
|
try:
|
||||||
|
result = future.result() # blocks until results are ready
|
||||||
|
except TimeoutError as error:
|
||||||
|
print("Function took longer than %d seconds" % error.args[1])
|
||||||
|
except Exception as error:
|
||||||
|
print("Function raised %s" % error)
|
||||||
|
print(error.traceback) # traceback of the function
|
||||||
|
|
||||||
|
with ProcessPool(max_workers=5, max_tasks=10) as pool:
|
||||||
|
for index in range(0, 10):
|
||||||
|
future = pool.schedule(function, index, bar=1, timeout=TIMEOUT_SECONDS)
|
||||||
|
future.add_done_callback(task_done)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
Pebble-5.0.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||||
|
Pebble-5.0.3.dist-info/LICENSE,sha256=-XvEu5t66KZTlBBzZ4tcd3Xo3kSgHDvMIefNwUi5DmE,7632
|
||||||
|
Pebble-5.0.3.dist-info/METADATA,sha256=a9aYFWeYWNOoKtZdApgQI1PFzDuUYjoRpA-80Z-qjR8,3575
|
||||||
|
Pebble-5.0.3.dist-info/RECORD,,
|
||||||
|
Pebble-5.0.3.dist-info/WHEEL,sha256=g4nMs7d-Xl9-xC9XovUrsDHGXt-FT0E17Yqo92DEfvY,92
|
||||||
|
Pebble-5.0.3.dist-info/top_level.txt,sha256=EEQYWBulldjRWqxjpmwEuoRdK7f1pNJ6jq6p3MGNRjg,7
|
||||||
|
pebble/__init__.py,sha256=SAodUEDIn9Q0SWoyq039EwW_KirtKUnkbtToEC4wKLE,596
|
||||||
|
pebble/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
pebble/__pycache__/common.cpython-38.pyc,,
|
||||||
|
pebble/__pycache__/decorators.cpython-38.pyc,,
|
||||||
|
pebble/__pycache__/functions.cpython-38.pyc,,
|
||||||
|
pebble/asynchronous/__init__.py,sha256=ClWUUyk8YL37xcSvENIqtlGK0alj6leJwe5hvLfo8tk,137
|
||||||
|
pebble/asynchronous/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
pebble/asynchronous/__pycache__/process.cpython-38.pyc,,
|
||||||
|
pebble/asynchronous/__pycache__/thread.cpython-38.pyc,,
|
||||||
|
pebble/asynchronous/process.py,sha256=O4sKUX1AS_6aTMz3ALf_4Ms73tGNTewFmnn_6nbq_r0,7188
|
||||||
|
pebble/asynchronous/thread.py,sha256=XfhalURvjexqJzAnUl2bA_G_BDn9567Z5buOmU7MfWk,2972
|
||||||
|
pebble/common.py,sha256=edDb3TzzGFdM0wvlLUtroEkl5fyUsbMao-ixxCJinB4,5693
|
||||||
|
pebble/concurrent/__init__.py,sha256=TAvGIxAOzIeNeZ5SmPTbIra1vFNKZQoHuhq-rFFcvmE,134
|
||||||
|
pebble/concurrent/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
pebble/concurrent/__pycache__/process.cpython-38.pyc,,
|
||||||
|
pebble/concurrent/__pycache__/thread.cpython-38.pyc,,
|
||||||
|
pebble/concurrent/process.py,sha256=6ZpXnyhfwJZbfZiJ1oORjuQRpSZQ5aw53LchLCygStg,6772
|
||||||
|
pebble/concurrent/thread.py,sha256=zbkjrxKVSrt6zRL48IjtIogUD4FHaqY72gN7fFb94Dc,2727
|
||||||
|
pebble/decorators.py,sha256=rsRqmgYoBxWVQ8oPp2buVmnK_b0kiVYvZS8OvIY-Fzs,2442
|
||||||
|
pebble/functions.py,sha256=6sdUdBEOXVIWuybDnmVnk6Q-3kDtl3Lo309RZbPZDII,4195
|
||||||
|
pebble/pool/__init__.py,sha256=0LxrI3LR_0YGEZv7K2pqr8EaFAaWDzms3KL2rwSCw40,256
|
||||||
|
pebble/pool/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
pebble/pool/__pycache__/base_pool.cpython-38.pyc,,
|
||||||
|
pebble/pool/__pycache__/channel.cpython-38.pyc,,
|
||||||
|
pebble/pool/__pycache__/process.cpython-38.pyc,,
|
||||||
|
pebble/pool/__pycache__/thread.cpython-38.pyc,,
|
||||||
|
pebble/pool/base_pool.py,sha256=y6aFbFj-gU7qE5Mm6eh9xVzUEh4OgjNtCV_Dem4EfMc,7902
|
||||||
|
pebble/pool/channel.py,sha256=x6Pj-f0__bWn4rIWjKhdS-LJa8Ja3HMCBjd6URaeQZU,5999
|
||||||
|
pebble/pool/process.py,sha256=nQ8BOVzgE5KCeF9yQr_AeNU3ruHJZbsa1R8d1cz2VUc,17448
|
||||||
|
pebble/pool/thread.py,sha256=3XSP9K6vk76bIc2-5Cwy6IkmOu-F5PGrkjrbtwAndzw,6633
|
|
@ -0,0 +1,5 @@
|
||||||
|
Wheel-Version: 1.0
|
||||||
|
Generator: bdist_wheel (0.34.2)
|
||||||
|
Root-Is-Purelib: true
|
||||||
|
Tag: py3-none-any
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
pebble
|
|
@ -0,0 +1,222 @@
|
||||||
|
# don't import any costly modules
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
is_pypy = '__pypy__' in sys.builtin_module_names
|
||||||
|
|
||||||
|
|
||||||
|
def warn_distutils_present():
|
||||||
|
if 'distutils' not in sys.modules:
|
||||||
|
return
|
||||||
|
if is_pypy and sys.version_info < (3, 7):
|
||||||
|
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
|
||||||
|
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
|
||||||
|
return
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
warnings.warn(
|
||||||
|
"Distutils was imported before Setuptools, but importing Setuptools "
|
||||||
|
"also replaces the `distutils` module in `sys.modules`. This may lead "
|
||||||
|
"to undesirable behaviors or errors. To avoid these issues, avoid "
|
||||||
|
"using distutils directly, ensure that setuptools is installed in the "
|
||||||
|
"traditional way (e.g. not an editable install), and/or make sure "
|
||||||
|
"that setuptools is always imported before distutils."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def clear_distutils():
|
||||||
|
if 'distutils' not in sys.modules:
|
||||||
|
return
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
warnings.warn("Setuptools is replacing distutils.")
|
||||||
|
mods = [
|
||||||
|
name
|
||||||
|
for name in sys.modules
|
||||||
|
if name == "distutils" or name.startswith("distutils.")
|
||||||
|
]
|
||||||
|
for name in mods:
|
||||||
|
del sys.modules[name]
|
||||||
|
|
||||||
|
|
||||||
|
def enabled():
|
||||||
|
"""
|
||||||
|
Allow selection of distutils by environment variable.
|
||||||
|
"""
|
||||||
|
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
|
||||||
|
return which == 'local'
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_local_distutils():
|
||||||
|
import importlib
|
||||||
|
|
||||||
|
clear_distutils()
|
||||||
|
|
||||||
|
# With the DistutilsMetaFinder in place,
|
||||||
|
# perform an import to cause distutils to be
|
||||||
|
# loaded from setuptools._distutils. Ref #2906.
|
||||||
|
with shim():
|
||||||
|
importlib.import_module('distutils')
|
||||||
|
|
||||||
|
# check that submodules load as expected
|
||||||
|
core = importlib.import_module('distutils.core')
|
||||||
|
assert '_distutils' in core.__file__, core.__file__
|
||||||
|
assert 'setuptools._distutils.log' not in sys.modules
|
||||||
|
|
||||||
|
|
||||||
|
def do_override():
|
||||||
|
"""
|
||||||
|
Ensure that the local copy of distutils is preferred over stdlib.
|
||||||
|
|
||||||
|
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
|
||||||
|
for more motivation.
|
||||||
|
"""
|
||||||
|
if enabled():
|
||||||
|
warn_distutils_present()
|
||||||
|
ensure_local_distutils()
|
||||||
|
|
||||||
|
|
||||||
|
class _TrivialRe:
|
||||||
|
def __init__(self, *patterns):
|
||||||
|
self._patterns = patterns
|
||||||
|
|
||||||
|
def match(self, string):
|
||||||
|
return all(pat in string for pat in self._patterns)
|
||||||
|
|
||||||
|
|
||||||
|
class DistutilsMetaFinder:
|
||||||
|
def find_spec(self, fullname, path, target=None):
|
||||||
|
# optimization: only consider top level modules and those
|
||||||
|
# found in the CPython test suite.
|
||||||
|
if path is not None and not fullname.startswith('test.'):
|
||||||
|
return
|
||||||
|
|
||||||
|
method_name = 'spec_for_{fullname}'.format(**locals())
|
||||||
|
method = getattr(self, method_name, lambda: None)
|
||||||
|
return method()
|
||||||
|
|
||||||
|
def spec_for_distutils(self):
|
||||||
|
if self.is_cpython():
|
||||||
|
return
|
||||||
|
|
||||||
|
import importlib
|
||||||
|
import importlib.abc
|
||||||
|
import importlib.util
|
||||||
|
|
||||||
|
try:
|
||||||
|
mod = importlib.import_module('setuptools._distutils')
|
||||||
|
except Exception:
|
||||||
|
# There are a couple of cases where setuptools._distutils
|
||||||
|
# may not be present:
|
||||||
|
# - An older Setuptools without a local distutils is
|
||||||
|
# taking precedence. Ref #2957.
|
||||||
|
# - Path manipulation during sitecustomize removes
|
||||||
|
# setuptools from the path but only after the hook
|
||||||
|
# has been loaded. Ref #2980.
|
||||||
|
# In either case, fall back to stdlib behavior.
|
||||||
|
return
|
||||||
|
|
||||||
|
class DistutilsLoader(importlib.abc.Loader):
|
||||||
|
def create_module(self, spec):
|
||||||
|
mod.__name__ = 'distutils'
|
||||||
|
return mod
|
||||||
|
|
||||||
|
def exec_module(self, module):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return importlib.util.spec_from_loader(
|
||||||
|
'distutils', DistutilsLoader(), origin=mod.__file__
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_cpython():
|
||||||
|
"""
|
||||||
|
Suppress supplying distutils for CPython (build and tests).
|
||||||
|
Ref #2965 and #3007.
|
||||||
|
"""
|
||||||
|
return os.path.isfile('pybuilddir.txt')
|
||||||
|
|
||||||
|
def spec_for_pip(self):
|
||||||
|
"""
|
||||||
|
Ensure stdlib distutils when running under pip.
|
||||||
|
See pypa/pip#8761 for rationale.
|
||||||
|
"""
|
||||||
|
if self.pip_imported_during_build():
|
||||||
|
return
|
||||||
|
clear_distutils()
|
||||||
|
self.spec_for_distutils = lambda: None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pip_imported_during_build(cls):
|
||||||
|
"""
|
||||||
|
Detect if pip is being imported in a build script. Ref #2355.
|
||||||
|
"""
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
return any(
|
||||||
|
cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def frame_file_is_setup(frame):
|
||||||
|
"""
|
||||||
|
Return True if the indicated frame suggests a setup.py file.
|
||||||
|
"""
|
||||||
|
# some frames may not have __file__ (#2940)
|
||||||
|
return frame.f_globals.get('__file__', '').endswith('setup.py')
|
||||||
|
|
||||||
|
def spec_for_sensitive_tests(self):
|
||||||
|
"""
|
||||||
|
Ensure stdlib distutils when running select tests under CPython.
|
||||||
|
|
||||||
|
python/cpython#91169
|
||||||
|
"""
|
||||||
|
clear_distutils()
|
||||||
|
self.spec_for_distutils = lambda: None
|
||||||
|
|
||||||
|
sensitive_tests = (
|
||||||
|
[
|
||||||
|
'test.test_distutils',
|
||||||
|
'test.test_peg_generator',
|
||||||
|
'test.test_importlib',
|
||||||
|
]
|
||||||
|
if sys.version_info < (3, 10)
|
||||||
|
else [
|
||||||
|
'test.test_distutils',
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
for name in DistutilsMetaFinder.sensitive_tests:
|
||||||
|
setattr(
|
||||||
|
DistutilsMetaFinder,
|
||||||
|
f'spec_for_{name}',
|
||||||
|
DistutilsMetaFinder.spec_for_sensitive_tests,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
DISTUTILS_FINDER = DistutilsMetaFinder()
|
||||||
|
|
||||||
|
|
||||||
|
def add_shim():
|
||||||
|
DISTUTILS_FINDER in sys.meta_path or insert_shim()
|
||||||
|
|
||||||
|
|
||||||
|
class shim:
|
||||||
|
def __enter__(self):
|
||||||
|
insert_shim()
|
||||||
|
|
||||||
|
def __exit__(self, exc, value, tb):
|
||||||
|
remove_shim()
|
||||||
|
|
||||||
|
|
||||||
|
def insert_shim():
|
||||||
|
sys.meta_path.insert(0, DISTUTILS_FINDER)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_shim():
|
||||||
|
try:
|
||||||
|
sys.meta_path.remove(DISTUTILS_FINDER)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
|
@ -0,0 +1 @@
|
||||||
|
__import__('_distutils_hack').do_override()
|
|
@ -0,0 +1 @@
|
||||||
|
pip
|
|
@ -0,0 +1,21 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2016 wim glenn
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,237 @@
|
||||||
|
Metadata-Version: 2.1
|
||||||
|
Name: advent-of-code-data
|
||||||
|
Version: 1.2.3
|
||||||
|
Summary: Get your puzzle data with a single import
|
||||||
|
Home-page: https://github.com/wimglenn/advent-of-code-data
|
||||||
|
Author: Wim Glenn
|
||||||
|
Author-email: hey@wimglenn.com
|
||||||
|
License: MIT
|
||||||
|
Classifier: Intended Audience :: Developers
|
||||||
|
Classifier: Programming Language :: Python :: 2
|
||||||
|
Classifier: Programming Language :: Python :: 3
|
||||||
|
Classifier: License :: OSI Approved :: MIT License
|
||||||
|
Classifier: Topic :: Software Development :: Libraries
|
||||||
|
Classifier: Topic :: Games/Entertainment :: Puzzle Games
|
||||||
|
Description-Content-Type: text/x-rst
|
||||||
|
License-File: LICENSE
|
||||||
|
Requires-Dist: python-dateutil
|
||||||
|
Requires-Dist: requests
|
||||||
|
Requires-Dist: termcolor
|
||||||
|
Requires-Dist: beautifulsoup4
|
||||||
|
Requires-Dist: pebble
|
||||||
|
Requires-Dist: tzlocal
|
||||||
|
Requires-Dist: colorama ; platform_system == "Windows"
|
||||||
|
|
||||||
|
Advent of Code data
|
||||||
|
===================
|
||||||
|
|
||||||
|
|pyversions|_ |pypi|_ |womm|_ |actions|_ |codecov|_
|
||||||
|
|
||||||
|
.. |pyversions| image:: https://img.shields.io/pypi/pyversions/advent-of-code-data.svg
|
||||||
|
.. _pyversions:
|
||||||
|
|
||||||
|
.. |pypi| image:: https://img.shields.io/pypi/v/advent-of-code-data.svg
|
||||||
|
.. _pypi: https://pypi.org/project/advent-of-code-data/
|
||||||
|
|
||||||
|
.. |womm| image:: https://cdn.rawgit.com/nikku/works-on-my-machine/v0.2.0/badge.svg
|
||||||
|
.. _womm: https://github.com/nikku/works-on-my-machine
|
||||||
|
|
||||||
|
.. |actions| image:: https://github.com/wimglenn/advent-of-code-data/actions/workflows/tests.yml/badge.svg
|
||||||
|
.. _actions: https://github.com/wimglenn/advent-of-code-data/actions/workflows/tests.yml
|
||||||
|
|
||||||
|
.. |codecov| image:: https://codecov.io/gh/wimglenn/advent-of-code-data/branch/main/graph/badge.svg
|
||||||
|
.. _codecov: https://codecov.io/gh/wimglenn/advent-of-code-data
|
||||||
|
|
||||||
|
|
||||||
|
Get your puzzle data with a single import statement:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
from aocd import data
|
||||||
|
|
||||||
|
Might be useful for lazy Pythonistas and speedhackers.
|
||||||
|
|
||||||
|
If you'd just like to print or keep your own input files, there's a shell entry point for that:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
aocd > input.txt # saves today's data
|
||||||
|
aocd 13 2018 > day13.txt # save some other day's data
|
||||||
|
|
||||||
|
There are currently two convenience transforms (maybe more to come later):
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
from aocd import lines # like data.splitlines()
|
||||||
|
from aocd import numbers # uses regex pattern -?\d+ to extract integers from data
|
||||||
|
|
||||||
|
If all that sounds too magical, there is a simple getter function to just return your raw data.
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
>>> from aocd import get_data
|
||||||
|
>>> get_data(day=24, year=2015)
|
||||||
|
'1\n2\n3\n7\n11\n13\n17\n19\n23\n31...
|
||||||
|
|
||||||
|
Note that ``aocd`` will cache puzzle inputs and answers (including incorrect guesses) clientside, to save unnecessary requests to the server.
|
||||||
|
|
||||||
|
|
||||||
|
Quickstart
|
||||||
|
----------
|
||||||
|
|
||||||
|
Install with pip
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
pip install advent-of-code-data
|
||||||
|
|
||||||
|
**Puzzle inputs differ by user.** So export your session ID, for example:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
export AOC_SESSION=cafef00db01dfaceba5eba11deadbeef
|
||||||
|
|
||||||
|
This is a cookie which is set when you login to AoC. You can find it with
|
||||||
|
your browser inspector. If you're hacking on AoC at all you probably already
|
||||||
|
know these kind of tricks, but if you need help with that part then you can
|
||||||
|
`look here <https://github.com/wimglenn/advent-of-code/issues/1>`_.
|
||||||
|
|
||||||
|
*Note:* If you don't like the env var, you could also keep your token(s) in files.
|
||||||
|
By default the location is ``~/.config/aocd/token``. Set the ``AOCD_DIR`` environment
|
||||||
|
variable to some existing directory if you wish to use another location to store token(s).
|
||||||
|
|
||||||
|
*New in version 0.9.0.* There's a utility script ``aocd-token`` which attempts to
|
||||||
|
find session tokens from your browser's cookie storage. This feature is experimental
|
||||||
|
and requires you to additionally install the package ``browser-cookie3``. Only Chrome
|
||||||
|
and Firefox browsers are currently supported. On macOS, you may get an authentication
|
||||||
|
dialog requesting permission, since Python is attempting to read browser storage files.
|
||||||
|
This is expected, the script *is* actually scraping those private files to access AoC
|
||||||
|
session token(s).
|
||||||
|
|
||||||
|
If this utility script was able to locate your token, you can save it to file with:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ aocd-token > ~/.config/aocd/token
|
||||||
|
|
||||||
|
Automated submission
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
*New in version 0.4.0.* Basic use:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
from aocd import submit
|
||||||
|
submit(my_answer, part="a", day=25, year=2017)
|
||||||
|
|
||||||
|
Note that the same filename introspection of year/day also works for automated
|
||||||
|
submission. There's also introspection of the "level", i.e. part a or part b,
|
||||||
|
aocd can automatically determine if you have already completed part a or not
|
||||||
|
and submit your answer for the correct part accordingly. In this case, just use:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
from aocd import submit
|
||||||
|
submit(my_answer)
|
||||||
|
|
||||||
|
The response message from AoC will be printed in the terminal. If you gave
|
||||||
|
the right answer, then the puzzle will be refreshed in your web browser
|
||||||
|
(so you can read the instructions for the next part, for example).
|
||||||
|
**Proceed with caution!** If you submit wrong guesses, your user **WILL**
|
||||||
|
get rate-limited by Eric, so don't call submit until you're fairly confident
|
||||||
|
you have a correct answer!
|
||||||
|
|
||||||
|
|
||||||
|
OOP-style interfaces
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
*New in version 0.8.0.*
|
||||||
|
|
||||||
|
Input data is via regular attribute access. Example usage:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
>>> from aocd.models import Puzzle
|
||||||
|
>>> puzzle = Puzzle(year=2017, day=20)
|
||||||
|
>>> puzzle
|
||||||
|
<Puzzle(2017, 20) at 0x107322978 - Particle Swarm>
|
||||||
|
>>> puzzle.input_data
|
||||||
|
'p=<-1027,-979,-188>, v=<7,60,66>, a=<9,1,-7>\np=<-1846,-1539,-1147>, v=<88,145,67>, a=<6,-5,2> ...
|
||||||
|
|
||||||
|
Submitting answers is also by regular attribute access. Any incorrect answers you submitted are remembered, and aocd will prevent you from attempting to submit the same incorrect value twice:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
>>> puzzle.answer_a = 299
|
||||||
|
That's not the right answer; your answer is too high. If you're stuck, there are some general tips on the about page, or you can ask for hints on the subreddit. Please wait one minute before trying again. (You guessed 299.) [Return to Day 20]
|
||||||
|
>>> puzzle.answer_a = 299
|
||||||
|
aocd will not submit that answer again. You've previously guessed 299 and the server responded:
|
||||||
|
That's not the right answer; your answer is too high. If you're stuck, there are some general tips on the about page, or you can ask for hints on the subreddit. Please wait one minute before trying again. (You guessed 299.) [Return to Day 20]
|
||||||
|
|
||||||
|
Your own solutions can be executed by writing and using an `entry-point <https://packaging.python.org/specifications/entry-points/>`_ into your code, registered in the group ``"adventofcode.user"``. Your entry-point should resolve to a callable, and it will be called with three keyword arguments: ``year``, ``day``, and ``data``. For example, `my entry-point is called "wim" <https://github.com/wimglenn/advent-of-code-wim/blob/d033366c16fba50e413f2fa7df32e8a0eac9542f/setup.py#L36>`_ and running against `my code <https://github.com/wimglenn/advent-of-code-wim/blob/main/aoc_wim/__init__.py>`_ (after ``pip install advent-of-code-wim``) would be like this:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
>>> puzzle = Puzzle(year=2018, day=10)
|
||||||
|
>>> puzzle.solve_for("wim")
|
||||||
|
('XLZAKBGZ', '10656')
|
||||||
|
|
||||||
|
|
||||||
|
If you've never written a plugin before, see https://entrypoints.readthedocs.io/ for more info about plugin systems based on Python entry-points.
|
||||||
|
|
||||||
|
|
||||||
|
Verify your code against multiple different inputs
|
||||||
|
--------------------------------------------------
|
||||||
|
|
||||||
|
*New in version 0.8.0.*
|
||||||
|
|
||||||
|
Ever tried running your code against other people's inputs? AoC is full of tricky edge cases. You may find that sometimes you're only getting the right answer by luck, and your code will fail on some other dataset. Using aocd, you can collect a few different auth tokens for each of your accounts (github/google/reddit/twitter) and verify your answers across multiple datasets.
|
||||||
|
|
||||||
|
To see an example of how to setup the entry-point for your code, look at `advent-of-code-sample <https://github.com/wimglenn/advent-of-code-sample>`_ for some inspiration. After dumping a bunch of session tokens into ``~/.config/aocd/tokens.json`` you could do something like this by running the ``aoc`` console script:
|
||||||
|
|
||||||
|
.. image:: https://user-images.githubusercontent.com/6615374/52138567-26e09f80-2613-11e9-8eaf-c42757bc9b86.png
|
||||||
|
|
||||||
|
As you can see above, I actually had incorrect code for `2017 Day 20: Particle Swarm <https://adventofcode.com/2017/day/20>`_, but that `bug <https://github.com/wimglenn/advent-of-code-wim/commit/31e454270001c6d06b46014fe5dafd03e29507b8>`_ only showed up for the google token's dataset. Whoops. Also, it looks like my algorithm for `2017 Day 13: Packet Scanners <https://adventofcode.com/2017/day/13>`_ was kinda garbage. Too slow. According to `AoC FAQ <https://adventofcode.com/about>`_:
|
||||||
|
|
||||||
|
*every problem has a solution that completes in at most 15 seconds on ten-year-old hardware*
|
||||||
|
|
||||||
|
By the way, the ``aoc`` runner will kill your code if it takes more than 60 seconds, you can increase/decrease this by passing a command-line option, e.g. ``--timeout=120``.
|
||||||
|
|
||||||
|
*New in version 1.1.0:* Added option ``--quiet`` to suppress any output from plugins so it doesn't mess up the ``aoc`` runner's display.
|
||||||
|
|
||||||
|
|
||||||
|
How does this library work?
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
It will automatically get today's data at import time, if used within the
|
||||||
|
interactive interpreter. Otherwise, the date is found by introspection of the
|
||||||
|
path and file name from which ``aocd`` module was imported.
|
||||||
|
|
||||||
|
This means your filenames should be something sensible. The examples below
|
||||||
|
should all parse correctly, because they have digits in the path that are
|
||||||
|
unambiguously recognisable as AoC years (2015+) or days (1-25).
|
||||||
|
|
||||||
|
.. code-block::
|
||||||
|
|
||||||
|
q03.py
|
||||||
|
xmas_problem_2016_25b_dawg.py
|
||||||
|
~/src/aoc/2015/p8.py
|
||||||
|
|
||||||
|
A filename like ``problem_one.py`` will not work, so don't do that. If
|
||||||
|
you don't like weird frame hacks, just use the ``aocd.get_data()`` function
|
||||||
|
instead and have a nice day!
|
||||||
|
|
||||||
|
|
||||||
|
Cache invalidation?
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
``aocd`` saves puzzle inputs, answers, names, and your bad guesses to avoid hitting
|
||||||
|
the AoC servers any more often than strictly necessary (this also speeds things up).
|
||||||
|
All data is persisted in plain text files under ``~/.config/aocd``. To remove any
|
||||||
|
caches, you may simply delete whatever files you want under that directory tree.
|
||||||
|
If you'd prefer to use a different path, then export an ``AOCD_DIR`` environment
|
||||||
|
variable with the desired location.
|
||||||
|
|
||||||
|
*New in version 1.1.0:* By default, your token files are also stored under ``~/.config/aocd``.
|
||||||
|
If you want the token(s) and cached inputs/answers to exist in separate locations, you can set
|
||||||
|
the environment variable ``AOCD_CONFIG_DIR`` to specify a different location for the token(s).
|
|
@ -0,0 +1,33 @@
|
||||||
|
../../Scripts/aoc.exe,sha256=hWXh6Peo-mPvxcD3kF9w16pzkGWYrWcNavuWRdMihbg,108402
|
||||||
|
../../Scripts/aocd-token.exe,sha256=p8g9XG0Ctx1JDg2f28_r1KDeb7HgFvi7S5byi4suVNE,108437
|
||||||
|
../../Scripts/aocd.exe,sha256=6ay0pNBYKsm0K3Sww_Rr8j2yr8UO-Qmgfbgs9jr_hh4,108399
|
||||||
|
advent_of_code_data-1.2.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||||
|
advent_of_code_data-1.2.3.dist-info/LICENSE,sha256=sDdX5cBRRpk3rmZ8hbYEfAUIYRdDqrlXmChOUkqf62o,1066
|
||||||
|
advent_of_code_data-1.2.3.dist-info/METADATA,sha256=g7AqscQXoWFI7yuhcufbzyuCfefwWzuAUddC9iktCdY,10704
|
||||||
|
advent_of_code_data-1.2.3.dist-info/RECORD,,
|
||||||
|
advent_of_code_data-1.2.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||||
|
advent_of_code_data-1.2.3.dist-info/WHEEL,sha256=bb2Ot9scclHKMOLDEHY6B2sicWOgugjFKaJsT7vwMQo,110
|
||||||
|
advent_of_code_data-1.2.3.dist-info/entry_points.txt,sha256=q3IbXzUveiOIZHwgfDydcrKHPI6foJoNkWLuny26xw4,110
|
||||||
|
advent_of_code_data-1.2.3.dist-info/top_level.txt,sha256=b0LekMCmohqRoroBX-8FUneWDXflOzG2oHWB8EsgOSA,5
|
||||||
|
aocd/__init__.py,sha256=sBAdaIXUTbQdVOsV4GcRdd_yiQb6NTITiX5Z0m_LX_8,1887
|
||||||
|
aocd/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
aocd/__pycache__/cli.cpython-38.pyc,,
|
||||||
|
aocd/__pycache__/cookies.cpython-38.pyc,,
|
||||||
|
aocd/__pycache__/exceptions.cpython-38.pyc,,
|
||||||
|
aocd/__pycache__/get.cpython-38.pyc,,
|
||||||
|
aocd/__pycache__/models.cpython-38.pyc,,
|
||||||
|
aocd/__pycache__/post.cpython-38.pyc,,
|
||||||
|
aocd/__pycache__/runner.cpython-38.pyc,,
|
||||||
|
aocd/__pycache__/transforms.cpython-38.pyc,,
|
||||||
|
aocd/__pycache__/utils.cpython-38.pyc,,
|
||||||
|
aocd/__pycache__/version.cpython-38.pyc,,
|
||||||
|
aocd/cli.py,sha256=FdBC7QRqCySoXQa_kvjwS3CSCGQ5Jr81d85XhLA0tjE,1844
|
||||||
|
aocd/cookies.py,sha256=fppUngnc2JD-vNTU2vqsRZ3G6wnMXMDjiwvP9BboViw,4744
|
||||||
|
aocd/exceptions.py,sha256=WGlKbZ1-VO4tawstWYhoePy0ZfntFDDRpJHaQt0jeVA,608
|
||||||
|
aocd/get.py,sha256=ac9wWiVypek-fqDKnFVPCqrinC2hQNzRS5Umwi9EFzQ,4661
|
||||||
|
aocd/models.py,sha256=Z2-IJhPmA-tzW5PNhgLjrFCoKQVRxjMaw1W5YNBhEGE,21063
|
||||||
|
aocd/post.py,sha256=BR4WePpOAalobqIPRZlMw3-3xIu_uMl8MASstdUsmNQ,1218
|
||||||
|
aocd/runner.py,sha256=jgjJgO1VmNuoAU-RrJPj2N9N-Ba1kaa2J5PalzBRpUg,9286
|
||||||
|
aocd/transforms.py,sha256=QknRs0qZXRp1coASJR9fgYMYaiuPJItLG0CqmhUX9XM,725
|
||||||
|
aocd/utils.py,sha256=kBe7mqOntohUGVqOOpOhelaRXTh5ovA70UpvvQq9Cjo,5219
|
||||||
|
aocd/version.py,sha256=C-D_WWrVkBDmQmApLcm0sWNh2CgIrwWfc8_sB5vvU-Q,22
|
|
@ -0,0 +1,6 @@
|
||||||
|
Wheel-Version: 1.0
|
||||||
|
Generator: bdist_wheel (0.38.4)
|
||||||
|
Root-Is-Purelib: true
|
||||||
|
Tag: py2-none-any
|
||||||
|
Tag: py3-none-any
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
[console_scripts]
|
||||||
|
aoc = aocd.runner:main
|
||||||
|
aocd = aocd.cli:main
|
||||||
|
aocd-token = aocd.cookies:scrape_session_tokens
|
|
@ -0,0 +1 @@
|
||||||
|
aocd
|
|
@ -0,0 +1,85 @@
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
from . import cli
|
||||||
|
from . import cookies
|
||||||
|
from . import exceptions
|
||||||
|
from . import get
|
||||||
|
from . import models
|
||||||
|
from . import post
|
||||||
|
from . import runner
|
||||||
|
from . import transforms
|
||||||
|
from . import utils
|
||||||
|
from . import version
|
||||||
|
from .exceptions import AocdError
|
||||||
|
from .exceptions import PuzzleUnsolvedError
|
||||||
|
from .get import get_data
|
||||||
|
from .get import get_day_and_year
|
||||||
|
from .post import submit
|
||||||
|
from .utils import AOC_TZ
|
||||||
|
from .version import __version__
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"cli",
|
||||||
|
"cookies",
|
||||||
|
"exceptions",
|
||||||
|
"get",
|
||||||
|
"models",
|
||||||
|
"post",
|
||||||
|
"runner",
|
||||||
|
"utils",
|
||||||
|
"version",
|
||||||
|
"data",
|
||||||
|
"get_data",
|
||||||
|
"submit",
|
||||||
|
"transforms",
|
||||||
|
"__version__",
|
||||||
|
"AocdError",
|
||||||
|
"PuzzleUnsolvedError",
|
||||||
|
"AOC_TZ",
|
||||||
|
]
|
||||||
|
__all__ += transforms.__all__
|
||||||
|
|
||||||
|
# Add declaration for magic attribute `data` to make it discoverable by static analysis tools.
|
||||||
|
data = ""
|
||||||
|
|
||||||
|
|
||||||
|
class Aocd(object):
|
||||||
|
_module = sys.modules[__name__]
|
||||||
|
|
||||||
|
def __dir__(self):
|
||||||
|
return __all__
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
if name == "data":
|
||||||
|
day, year = get_day_and_year()
|
||||||
|
return get_data(day=day, year=year)
|
||||||
|
if name == "submit":
|
||||||
|
try:
|
||||||
|
day, year = get_day_and_year()
|
||||||
|
except AocdError:
|
||||||
|
return submit
|
||||||
|
else:
|
||||||
|
return partial(submit, day=day, year=year)
|
||||||
|
if name in transforms.__all__:
|
||||||
|
transform = getattr(transforms, name)
|
||||||
|
return transform(self.data)
|
||||||
|
if name in dir(self):
|
||||||
|
return globals()[name]
|
||||||
|
raise AttributeError(name)
|
||||||
|
|
||||||
|
|
||||||
|
sys.modules[__name__] = Aocd()
|
||||||
|
|
||||||
|
|
||||||
|
if sys.platform == "win32":
|
||||||
|
import colorama
|
||||||
|
|
||||||
|
colorama.init(autoreset=True)
|
|
@ -0,0 +1,61 @@
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import datetime
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
from .get import get_data
|
||||||
|
from .get import most_recent_year
|
||||||
|
from .models import _load_users
|
||||||
|
from .utils import AOC_TZ
|
||||||
|
from .utils import _cli_guess
|
||||||
|
from .version import __version__
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
aoc_now = datetime.datetime.now(tz=AOC_TZ)
|
||||||
|
days = range(1, 26)
|
||||||
|
years = range(2015, aoc_now.year + int(aoc_now.month == 12))
|
||||||
|
users = _load_users()
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Advent of Code Data v{}".format(__version__),
|
||||||
|
usage="aocd [day 1-25] [year 2015-{}]".format(years[-1]),
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"day",
|
||||||
|
nargs="?",
|
||||||
|
type=int,
|
||||||
|
default=min(aoc_now.day, 25) if aoc_now.month == 12 else 1,
|
||||||
|
help="1-25 (default: %(default)s)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"year",
|
||||||
|
nargs="?",
|
||||||
|
type=int,
|
||||||
|
default=most_recent_year(),
|
||||||
|
help="2015-{} (default: %(default)s)".format(years[-1]),
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--version",
|
||||||
|
action="version",
|
||||||
|
version="%(prog)s v{}".format(__version__),
|
||||||
|
)
|
||||||
|
if len(users) > 1:
|
||||||
|
parser.add_argument("-u", "--user", choices=users, type=partial(_cli_guess, choices=users))
|
||||||
|
args = parser.parse_args()
|
||||||
|
if args.day in years and args.year in days:
|
||||||
|
# be forgiving
|
||||||
|
args.day, args.year = args.year, args.day
|
||||||
|
if args.day not in days or args.year not in years:
|
||||||
|
parser.print_usage()
|
||||||
|
parser.exit(1)
|
||||||
|
try:
|
||||||
|
session = users[args.user]
|
||||||
|
except (KeyError, AttributeError):
|
||||||
|
session = None
|
||||||
|
data = get_data(session=session, day=args.day, year=args.year)
|
||||||
|
print(data)
|
|
@ -0,0 +1,130 @@
|
||||||
|
import argparse
|
||||||
|
import glob
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from termcolor import cprint
|
||||||
|
|
||||||
|
from aocd.exceptions import DeadTokenError
|
||||||
|
from aocd.models import AOCD_CONFIG_DIR
|
||||||
|
from aocd.utils import _ensure_intermediate_dirs
|
||||||
|
from aocd.utils import get_owner
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_working_tokens():
|
||||||
|
log.debug("checking for installation of browser-cookie3 package")
|
||||||
|
try:
|
||||||
|
import browser_cookie3 as bc3 # soft dependency
|
||||||
|
except ImportError:
|
||||||
|
sys.exit("To use this feature you must pip install browser-cookie3")
|
||||||
|
|
||||||
|
log.info("checking browser cookies storage for auth tokens, this might pop up an auth dialog!")
|
||||||
|
log.info("checking chrome cookie jar...")
|
||||||
|
cookie_files = glob.glob(os.path.expanduser("~/.config/google-chrome/*/Cookies")) + [None]
|
||||||
|
chrome_cookies = []
|
||||||
|
for cookie_file in cookie_files:
|
||||||
|
try:
|
||||||
|
chrome = bc3.chrome(cookie_file=cookie_file, domain_name=".adventofcode.com")
|
||||||
|
except Exception as err:
|
||||||
|
log.debug("Couldn't scrape chrome - %s: %s", type(err), err)
|
||||||
|
else:
|
||||||
|
chrome_cookies += [c for c in chrome if c.name == "session"]
|
||||||
|
log.info("%d candidates from chrome", len(chrome_cookies))
|
||||||
|
chrome = chrome_cookies
|
||||||
|
|
||||||
|
log.info("checking firefox cookie jar...")
|
||||||
|
try:
|
||||||
|
firefox = bc3.firefox(domain_name=".adventofcode.com")
|
||||||
|
except Exception as err:
|
||||||
|
log.debug("Couldn't scrape firefox - %s: %s", type(err), err)
|
||||||
|
firefox = []
|
||||||
|
else:
|
||||||
|
firefox = [c for c in firefox if c.name == "session"]
|
||||||
|
log.info("%d candidates from firefox", len(firefox))
|
||||||
|
|
||||||
|
# order preserving de-dupe
|
||||||
|
tokens = list({}.fromkeys([c.value for c in chrome + firefox]))
|
||||||
|
removed = len(chrome + firefox) - len(tokens)
|
||||||
|
if removed:
|
||||||
|
log.info("Removed %d duplicate%s", removed, "s"[:removed-1])
|
||||||
|
|
||||||
|
result = {} # map of {token: auth source}
|
||||||
|
for token in tokens:
|
||||||
|
try:
|
||||||
|
owner = get_owner(token)
|
||||||
|
except DeadTokenError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
result[token] = owner
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def scrape_session_tokens():
|
||||||
|
aocd_token_file = os.path.join(AOCD_CONFIG_DIR, "token")
|
||||||
|
aocd_tokens_file = os.path.join(AOCD_CONFIG_DIR, "tokens.json")
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description="Scrapes AoC session tokens from your browser's cookie storage")
|
||||||
|
parser.add_argument("-v", "--verbose", action="count", help="increased logging (may be specified multiple)")
|
||||||
|
parser.add_argument("-c", "--check", nargs="?", help="check existing token(s) and exit", const=True)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.verbose is None:
|
||||||
|
log_level = logging.WARNING
|
||||||
|
elif args.verbose == 1:
|
||||||
|
log_level = logging.INFO
|
||||||
|
else:
|
||||||
|
log_level = logging.DEBUG
|
||||||
|
logging.basicConfig(level=log_level)
|
||||||
|
log.debug("called with %r", args)
|
||||||
|
|
||||||
|
if args.check is not None:
|
||||||
|
if args.check is True:
|
||||||
|
tokens = {}
|
||||||
|
if os.environ.get("AOC_SESSION"):
|
||||||
|
tokens["AOC_SESSION"] = os.environ["AOC_SESSION"]
|
||||||
|
if os.path.isfile(aocd_token_file):
|
||||||
|
with open(aocd_token_file) as f:
|
||||||
|
txt = f.read().strip()
|
||||||
|
if txt:
|
||||||
|
tokens[aocd_token_file] = txt.split()[0]
|
||||||
|
if os.path.isfile(aocd_tokens_file):
|
||||||
|
with open(aocd_tokens_file) as f:
|
||||||
|
tokens.update(json.load(f))
|
||||||
|
else:
|
||||||
|
tokens = {"CLI": args.check}
|
||||||
|
if not tokens:
|
||||||
|
sys.exit("no existing tokens found")
|
||||||
|
log.debug("%d tokens to check", len(tokens))
|
||||||
|
for name, token in tokens.items():
|
||||||
|
try:
|
||||||
|
owner = get_owner(token)
|
||||||
|
except DeadTokenError:
|
||||||
|
cprint("{} ({}) is dead".format(token, name), color="red")
|
||||||
|
else:
|
||||||
|
print("{} ({}) is alive".format(token, name))
|
||||||
|
if name != owner:
|
||||||
|
log.info("{} ({}) is owned by {}".format(token, name, owner))
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
working = get_working_tokens()
|
||||||
|
if not working:
|
||||||
|
sys.exit("could not find any working tokens in browser cookies, sorry :(")
|
||||||
|
|
||||||
|
log.debug("found %d live tokens", len(working))
|
||||||
|
for cookie in working.items():
|
||||||
|
print("%s <- %s" % cookie)
|
||||||
|
|
||||||
|
if "AOC_SESSION" not in os.environ:
|
||||||
|
if not os.path.isfile(aocd_token_file):
|
||||||
|
if len(working) == 1:
|
||||||
|
[(token, auth_source)] = working.items()
|
||||||
|
_ensure_intermediate_dirs(aocd_token_file)
|
||||||
|
with open(aocd_token_file, "w") as f:
|
||||||
|
f.write(token)
|
||||||
|
log.info("wrote %s session to %s", auth_source, aocd_token_file)
|
|
@ -0,0 +1,25 @@
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
|
||||||
|
class AocdError(Exception):
|
||||||
|
"""base exception for this package"""
|
||||||
|
|
||||||
|
|
||||||
|
class PuzzleLockedError(AocdError):
|
||||||
|
"""trying to access input before the unlock"""
|
||||||
|
|
||||||
|
|
||||||
|
class PuzzleUnsolvedError(AocdError):
|
||||||
|
"""answer is unknown because user has not solved puzzle yet"""
|
||||||
|
|
||||||
|
|
||||||
|
class DeadTokenError(AocdError):
|
||||||
|
"""the auth is expired/incorrect"""
|
||||||
|
|
||||||
|
|
||||||
|
class UnknownUserError(AocdError):
|
||||||
|
"""the token for this userid was not found in the cache"""
|
|
@ -0,0 +1,141 @@
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import traceback
|
||||||
|
from logging import getLogger
|
||||||
|
|
||||||
|
from .exceptions import AocdError
|
||||||
|
from .exceptions import PuzzleLockedError
|
||||||
|
from .models import default_user
|
||||||
|
from .models import Puzzle
|
||||||
|
from .models import User
|
||||||
|
from .utils import AOC_TZ
|
||||||
|
from .utils import blocker
|
||||||
|
|
||||||
|
|
||||||
|
log = getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_data(session=None, day=None, year=None, block=False):
|
||||||
|
"""
|
||||||
|
Get data for day (1-25) and year (>= 2015)
|
||||||
|
User's session cookie is needed (puzzle inputs differ by user)
|
||||||
|
"""
|
||||||
|
if session is None:
|
||||||
|
user = default_user()
|
||||||
|
else:
|
||||||
|
user = User(token=session)
|
||||||
|
if day is None:
|
||||||
|
day = current_day()
|
||||||
|
log.info("current day=%s", day)
|
||||||
|
if year is None:
|
||||||
|
year = most_recent_year()
|
||||||
|
log.info("most recent year=%s", year)
|
||||||
|
puzzle = Puzzle(year=year, day=day, user=user)
|
||||||
|
try:
|
||||||
|
return puzzle.input_data
|
||||||
|
except PuzzleLockedError:
|
||||||
|
if not block:
|
||||||
|
raise
|
||||||
|
q = block == "q"
|
||||||
|
blocker(quiet=q, until=(year, day))
|
||||||
|
return puzzle.input_data
|
||||||
|
|
||||||
|
|
||||||
|
def most_recent_year():
|
||||||
|
"""
|
||||||
|
This year, if it's December.
|
||||||
|
The most recent year, otherwise.
|
||||||
|
Note: Advent of Code started in 2015
|
||||||
|
"""
|
||||||
|
aoc_now = datetime.datetime.now(tz=AOC_TZ)
|
||||||
|
year = aoc_now.year
|
||||||
|
if aoc_now.month < 12:
|
||||||
|
year -= 1
|
||||||
|
if year < 2015:
|
||||||
|
raise AocdError("Time travel not supported yet")
|
||||||
|
return year
|
||||||
|
|
||||||
|
|
||||||
|
def current_day():
|
||||||
|
"""
|
||||||
|
Most recent day, if it's during the Advent of Code. Happy Holidays!
|
||||||
|
Day 1 is assumed, otherwise.
|
||||||
|
"""
|
||||||
|
aoc_now = datetime.datetime.now(tz=AOC_TZ)
|
||||||
|
if aoc_now.month != 12:
|
||||||
|
log.warning("current_day is only available in December (EST)")
|
||||||
|
return 1
|
||||||
|
day = min(aoc_now.day, 25)
|
||||||
|
return day
|
||||||
|
|
||||||
|
|
||||||
|
def get_day_and_year():
|
||||||
|
"""
|
||||||
|
Returns tuple (day, year).
|
||||||
|
|
||||||
|
Here be dragons!
|
||||||
|
|
||||||
|
The correct date is determined with introspection of the call stack, first
|
||||||
|
finding the filename of the module from which ``aocd`` was imported.
|
||||||
|
|
||||||
|
This means your filenames should be something sensible, which identify the
|
||||||
|
day and year unambiguously. The examples below should all parse correctly,
|
||||||
|
because they have unique digits in the file path that are recognisable as
|
||||||
|
AoC years (2015+) or days (1-25).
|
||||||
|
|
||||||
|
A filename like ``problem_one.py`` will not work, so don't do that. If you
|
||||||
|
don't like weird frame hacks, just use the ``aocd.get_data()`` function
|
||||||
|
directly instead and have a nice day!
|
||||||
|
"""
|
||||||
|
pattern_year = r"201[5-9]|202[0-9]"
|
||||||
|
pattern_day = r"2[0-5]|1[0-9]|[1-9]"
|
||||||
|
stack = [f[0] for f in traceback.extract_stack()]
|
||||||
|
for name in stack:
|
||||||
|
basename = os.path.basename(name)
|
||||||
|
reasons_to_skip_frame = [
|
||||||
|
not re.search(pattern_day, basename), # no digits in filename
|
||||||
|
name == __file__, # here
|
||||||
|
"importlib" in name, # Python 3 import machinery
|
||||||
|
"/IPython/" in name, # IPython adds a tonne of stack frames
|
||||||
|
name.startswith("<"), # crap like <decorator-gen-57>
|
||||||
|
name.endswith("ython3"), # ipython3 alias
|
||||||
|
basename.startswith("pydev_ipython_console"), # PyCharm Python Console
|
||||||
|
]
|
||||||
|
if not any(reasons_to_skip_frame):
|
||||||
|
log.debug("stack crawl found %s", name)
|
||||||
|
abspath = os.path.abspath(name)
|
||||||
|
break
|
||||||
|
log.debug("skipping frame %s", name)
|
||||||
|
else:
|
||||||
|
import __main__
|
||||||
|
if getattr(__main__, "__file__", "<input>") == "<input>":
|
||||||
|
log.debug("running within REPL")
|
||||||
|
day = current_day()
|
||||||
|
year = most_recent_year()
|
||||||
|
return day, year
|
||||||
|
log.debug("non-interactive")
|
||||||
|
raise AocdError("Failed introspection of filename")
|
||||||
|
years = {int(year) for year in re.findall(pattern_year, abspath)}
|
||||||
|
if len(years) > 1:
|
||||||
|
raise AocdError("Failed introspection of year")
|
||||||
|
year = years.pop() if years else None
|
||||||
|
basename_no_years = re.sub(pattern_year, "", basename)
|
||||||
|
try:
|
||||||
|
[day] = set(re.findall(pattern_day, basename_no_years))
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
assert not day.startswith("0"), "regex pattern_day must prevent any leading 0"
|
||||||
|
day = int(day)
|
||||||
|
assert 1 <= day <= 25, "regex pattern_day must only match numbers in range 1-25"
|
||||||
|
log.debug("year=%d day=%d", year, day)
|
||||||
|
return day, year
|
||||||
|
log.debug("giving up introspection for %s", abspath)
|
||||||
|
raise AocdError("Failed introspection of day")
|
|
@ -0,0 +1,570 @@
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import webbrowser
|
||||||
|
from datetime import datetime
|
||||||
|
from datetime import timedelta
|
||||||
|
from textwrap import dedent
|
||||||
|
|
||||||
|
import bs4
|
||||||
|
import pkg_resources
|
||||||
|
import requests
|
||||||
|
from termcolor import colored
|
||||||
|
from termcolor import cprint
|
||||||
|
|
||||||
|
from .exceptions import AocdError
|
||||||
|
from .exceptions import DeadTokenError
|
||||||
|
from .exceptions import UnknownUserError
|
||||||
|
from .exceptions import PuzzleUnsolvedError
|
||||||
|
from .exceptions import PuzzleLockedError
|
||||||
|
from .utils import AOC_TZ
|
||||||
|
from .utils import _ensure_intermediate_dirs
|
||||||
|
from .utils import atomic_write_file
|
||||||
|
from .utils import get_owner
|
||||||
|
from .version import __version__
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
AOCD_DATA_DIR = os.path.expanduser(os.environ.get("AOCD_DIR", os.path.join("~", ".config", "aocd")))
|
||||||
|
AOCD_CONFIG_DIR = os.path.expanduser(os.environ.get("AOCD_CONFIG_DIR", AOCD_DATA_DIR))
|
||||||
|
URL = "https://adventofcode.com/{year}/day/{day}"
|
||||||
|
USER_AGENT = {"User-Agent": "github.com/wimglenn/advent-of-code-data v{} by hey@wimglenn.com".format(__version__)}
|
||||||
|
|
||||||
|
|
||||||
|
class User(object):
|
||||||
|
|
||||||
|
_token2id = None
|
||||||
|
|
||||||
|
def __init__(self, token):
|
||||||
|
self.token = token
|
||||||
|
self._owner = "unknown.unknown.0"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_id(cls, id):
|
||||||
|
users = _load_users()
|
||||||
|
if id not in users:
|
||||||
|
raise UnknownUserError("User with id '{}' is not known".format(id))
|
||||||
|
user = cls(users[id])
|
||||||
|
user._owner = id
|
||||||
|
return user
|
||||||
|
|
||||||
|
@property
|
||||||
|
def auth(self):
|
||||||
|
return {"session": self.token}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def id(self):
|
||||||
|
fname = os.path.join(AOCD_CONFIG_DIR, "token2id.json")
|
||||||
|
if User._token2id is None:
|
||||||
|
try:
|
||||||
|
with io.open(fname, encoding="utf-8") as f:
|
||||||
|
log.debug("loading user id memo from %s", fname)
|
||||||
|
User._token2id = json.load(f)
|
||||||
|
except (IOError, OSError) as err:
|
||||||
|
if err.errno != errno.ENOENT:
|
||||||
|
raise
|
||||||
|
User._token2id = {}
|
||||||
|
if self.token not in User._token2id:
|
||||||
|
log.debug("token not found in memo, attempting to determine user id")
|
||||||
|
owner = get_owner(self.token)
|
||||||
|
log.debug("got owner=%s, adding to memo", owner)
|
||||||
|
User._token2id[self.token] = owner
|
||||||
|
_ensure_intermediate_dirs(fname)
|
||||||
|
with open(fname, "w") as f:
|
||||||
|
json.dump(User._token2id, f, sort_keys=True, indent=2)
|
||||||
|
else:
|
||||||
|
owner = User._token2id[self.token]
|
||||||
|
if self._owner == "unknown.unknown.0":
|
||||||
|
self._owner = owner
|
||||||
|
return owner
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "<{} {} (token=...{})>".format(type(self).__name__, self._owner, self.token[-4:])
|
||||||
|
|
||||||
|
@property
|
||||||
|
def memo_dir(self):
|
||||||
|
return os.path.join(AOCD_DATA_DIR, self.id)
|
||||||
|
|
||||||
|
def get_stats(self, years=None):
|
||||||
|
aoc_now = datetime.now(tz=AOC_TZ)
|
||||||
|
all_years = range(2015, aoc_now.year + int(aoc_now.month == 12))
|
||||||
|
if isinstance(years, int) and years in all_years:
|
||||||
|
years = (years,)
|
||||||
|
if years is None:
|
||||||
|
years = all_years
|
||||||
|
days = {str(i) for i in range(1, 26)}
|
||||||
|
results = {}
|
||||||
|
for year in years:
|
||||||
|
url = "https://adventofcode.com/{}/leaderboard/self".format(year)
|
||||||
|
response = requests.get(url, cookies=self.auth, headers=USER_AGENT)
|
||||||
|
response.raise_for_status()
|
||||||
|
soup = bs4.BeautifulSoup(response.text, "html.parser")
|
||||||
|
if soup.article is None and "You haven't collected any stars" in soup.main.text:
|
||||||
|
continue
|
||||||
|
if soup.article.pre is None and "overall leaderboard" in soup.article.text:
|
||||||
|
msg = "the auth token ...{} is expired or not functioning"
|
||||||
|
raise DeadTokenError(msg.format(self.token[-4:]))
|
||||||
|
stats_txt = soup.article.pre.text
|
||||||
|
lines = stats_txt.splitlines()
|
||||||
|
lines = [x for x in lines if x.split()[0] in days]
|
||||||
|
for line in reversed(lines):
|
||||||
|
vals = line.split()
|
||||||
|
day = int(vals[0])
|
||||||
|
results[year, day] = {}
|
||||||
|
results[year, day]["a"] = {
|
||||||
|
"time": _parse_duration(vals[1]),
|
||||||
|
"rank": int(vals[2]),
|
||||||
|
"score": int(vals[3]),
|
||||||
|
}
|
||||||
|
if vals[4] != "-":
|
||||||
|
results[year, day]["b"] = {
|
||||||
|
"time": _parse_duration(vals[4]),
|
||||||
|
"rank": int(vals[5]),
|
||||||
|
"score": int(vals[6]),
|
||||||
|
}
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def default_user():
|
||||||
|
# export your session id as AOC_SESSION env var
|
||||||
|
cookie = os.getenv("AOC_SESSION")
|
||||||
|
if cookie:
|
||||||
|
return User(token=cookie)
|
||||||
|
|
||||||
|
# or chuck it in a plaintext file at ~/.config/aocd/token
|
||||||
|
try:
|
||||||
|
with io.open(os.path.join(AOCD_CONFIG_DIR, "token"), encoding="utf-8") as f:
|
||||||
|
cookie = f.read().split()[0]
|
||||||
|
except (IOError, OSError) as err:
|
||||||
|
if err.errno != errno.ENOENT:
|
||||||
|
raise
|
||||||
|
if cookie:
|
||||||
|
return User(token=cookie)
|
||||||
|
|
||||||
|
msg = dedent(
|
||||||
|
"""\
|
||||||
|
ERROR: AoC session ID is needed to get your puzzle data!
|
||||||
|
You can find it in your browser cookies after login.
|
||||||
|
1) Save the cookie into a text file {}, or
|
||||||
|
2) Export the cookie in environment variable AOC_SESSION
|
||||||
|
|
||||||
|
See https://github.com/wimglenn/advent-of-code-wim/issues/1 for more info.
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
cprint(msg.format(os.path.join(AOCD_CONFIG_DIR, "token")), color="red", file=sys.stderr)
|
||||||
|
raise AocdError("Missing session ID")
|
||||||
|
|
||||||
|
|
||||||
|
class Puzzle(object):
|
||||||
|
def __init__(self, year, day, user=None):
|
||||||
|
self.year = year
|
||||||
|
self.day = day
|
||||||
|
if user is None:
|
||||||
|
user = default_user()
|
||||||
|
self._user = user
|
||||||
|
self.input_data_url = self.url + "/input"
|
||||||
|
self.submit_url = self.url + "/answer"
|
||||||
|
fname = "{}_{:02d}".format(self.year, self.day)
|
||||||
|
prefix = os.path.join(self.user.memo_dir, fname)
|
||||||
|
self.input_data_fname = prefix + "_input.txt"
|
||||||
|
self.example_input_data_fname = prefix + "_example_input.txt"
|
||||||
|
self.answer_a_fname = prefix + "a_answer.txt"
|
||||||
|
self.answer_b_fname = prefix + "b_answer.txt"
|
||||||
|
self.incorrect_answers_a_fname = prefix + "a_bad_answers.txt"
|
||||||
|
self.incorrect_answers_b_fname = prefix + "b_bad_answers.txt"
|
||||||
|
self.title_fname = os.path.join(
|
||||||
|
AOCD_DATA_DIR,
|
||||||
|
"titles",
|
||||||
|
"{}_{:02d}.txt".format(self.year, self.day)
|
||||||
|
)
|
||||||
|
self._title = ""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def user(self):
|
||||||
|
return self._user
|
||||||
|
|
||||||
|
@property
|
||||||
|
def input_data(self):
|
||||||
|
try:
|
||||||
|
# use previously received data, if any existing
|
||||||
|
with io.open(self.input_data_fname, encoding="utf-8") as f:
|
||||||
|
data = f.read()
|
||||||
|
except (IOError, OSError) as err:
|
||||||
|
if err.errno != errno.ENOENT:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
log.debug("reusing existing data %s", self.input_data_fname)
|
||||||
|
return data.rstrip("\r\n")
|
||||||
|
sanitized = "..." + self.user.token[-4:]
|
||||||
|
log.info("getting data year=%s day=%s token=%s", self.year, self.day, sanitized)
|
||||||
|
response = requests.get(
|
||||||
|
url=self.input_data_url, cookies=self.user.auth, headers=USER_AGENT
|
||||||
|
)
|
||||||
|
if not response.ok:
|
||||||
|
if response.status_code == 404:
|
||||||
|
raise PuzzleLockedError("{}/{:02d} not available yet".format(self.year, self.day))
|
||||||
|
log.error("got %s status code token=%s", response.status_code, sanitized)
|
||||||
|
log.error(response.text)
|
||||||
|
raise AocdError("Unexpected response")
|
||||||
|
data = response.text
|
||||||
|
log.info("saving the puzzle input token=%s", sanitized)
|
||||||
|
atomic_write_file(self.input_data_fname, data)
|
||||||
|
return data.rstrip("\r\n")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def example_data(self):
|
||||||
|
try:
|
||||||
|
with io.open(self.example_input_data_fname, encoding="utf-8") as f:
|
||||||
|
data = f.read()
|
||||||
|
except (IOError, OSError) as err:
|
||||||
|
if err.errno != errno.ENOENT:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
log.debug("reusing existing example data %s", self.example_input_data_fname)
|
||||||
|
return data.rstrip("\r\n")
|
||||||
|
soup = self._soup()
|
||||||
|
try:
|
||||||
|
data = soup.pre.text
|
||||||
|
except Exception:
|
||||||
|
log.info("unable to find example data year=%s day=%s", self.year, self.day)
|
||||||
|
data = ""
|
||||||
|
log.info("saving the example data")
|
||||||
|
atomic_write_file(self.example_input_data_fname, data)
|
||||||
|
return data.rstrip("\r\n")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def title(self):
|
||||||
|
if os.path.isfile(self.title_fname):
|
||||||
|
with io.open(self.title_fname, encoding="utf-8") as f:
|
||||||
|
self._title = f.read().strip()
|
||||||
|
else:
|
||||||
|
self._save_title()
|
||||||
|
return self._title
|
||||||
|
|
||||||
|
def _repr_pretty_(self, p, cycle):
|
||||||
|
# this is a hook for IPython's pretty-printer
|
||||||
|
if cycle:
|
||||||
|
p.text(repr(self))
|
||||||
|
else:
|
||||||
|
template = "<{0}({1.year}, {1.day}) at {2} - {1.title}>"
|
||||||
|
p.text(template.format(type(self).__name__, self, hex(id(self))))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def answer_a(self):
|
||||||
|
try:
|
||||||
|
return self._get_answer(part="a")
|
||||||
|
except PuzzleUnsolvedError:
|
||||||
|
raise AttributeError("answer_a")
|
||||||
|
|
||||||
|
@answer_a.setter
|
||||||
|
def answer_a(self, val):
|
||||||
|
if isinstance(val, int):
|
||||||
|
val = str(val)
|
||||||
|
if getattr(self, "answer_a", None) == val:
|
||||||
|
return
|
||||||
|
self._submit(value=val, part="a")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def answered_a(self):
|
||||||
|
return bool(getattr(self, "answer_a", None))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def answer_b(self):
|
||||||
|
try:
|
||||||
|
return self._get_answer(part="b")
|
||||||
|
except PuzzleUnsolvedError:
|
||||||
|
raise AttributeError("answer_b")
|
||||||
|
|
||||||
|
@answer_b.setter
|
||||||
|
def answer_b(self, val):
|
||||||
|
if isinstance(val, int):
|
||||||
|
val = str(val)
|
||||||
|
if getattr(self, "answer_b", None) == val:
|
||||||
|
return
|
||||||
|
self._submit(value=val, part="b")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def answered_b(self):
|
||||||
|
return bool(getattr(self, "answer_b", None))
|
||||||
|
|
||||||
|
def answered(self, part):
|
||||||
|
if part == "a":
|
||||||
|
return bool(getattr(self, "answer_a", None))
|
||||||
|
if part == "b":
|
||||||
|
return bool(getattr(self, "answer_b", None))
|
||||||
|
raise AocdError('part must be "a" or "b"')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def answers(self):
|
||||||
|
return self.answer_a, self.answer_b
|
||||||
|
|
||||||
|
@answers.setter
|
||||||
|
def answers(self, val):
|
||||||
|
self.answer_a, self.answer_b = val
|
||||||
|
|
||||||
|
@property
|
||||||
|
def incorrect_answers_a(self):
|
||||||
|
return self._get_bad_guesses(part="a")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def incorrect_answers_b(self):
|
||||||
|
return self._get_bad_guesses(part="b")
|
||||||
|
|
||||||
|
def _submit(self, value, part, reopen=True, quiet=False):
|
||||||
|
if value in {u"", b"", None, b"None", u"None"}:
|
||||||
|
raise AocdError("cowardly refusing to submit non-answer: {!r}".format(value))
|
||||||
|
value = str(value)
|
||||||
|
part = str(part).replace("1", "a").replace("2", "b").lower()
|
||||||
|
if part not in {"a", "b"}:
|
||||||
|
raise AocdError('part must be "a" or "b"')
|
||||||
|
bad_guesses = getattr(self, "incorrect_answers_" + part)
|
||||||
|
if value in bad_guesses:
|
||||||
|
if not quiet:
|
||||||
|
msg = "aocd will not submit that answer again. You've previously guessed {} and the server responded:"
|
||||||
|
print(msg.format(value))
|
||||||
|
cprint(bad_guesses[value], "red")
|
||||||
|
return
|
||||||
|
if part == "b" and value == getattr(self, "answer_a", None):
|
||||||
|
raise AocdError("cowardly refusing to re-submit answer_a ({}) for part b".format(value))
|
||||||
|
url = self.submit_url
|
||||||
|
check_guess = self._check_guess_against_existing(value, part)
|
||||||
|
if check_guess is not None:
|
||||||
|
if quiet:
|
||||||
|
log.info(check_guess)
|
||||||
|
else:
|
||||||
|
print(check_guess)
|
||||||
|
return
|
||||||
|
sanitized = "..." + self.user.token[-4:]
|
||||||
|
log.info("posting %r to %s (part %s) token=%s", value, url, part, sanitized)
|
||||||
|
level = {"a": 1, "b": 2}[part]
|
||||||
|
response = requests.post(
|
||||||
|
url=url,
|
||||||
|
cookies=self.user.auth,
|
||||||
|
headers=USER_AGENT,
|
||||||
|
data={"level": level, "answer": value},
|
||||||
|
)
|
||||||
|
if not response.ok:
|
||||||
|
log.error("got %s status code", response.status_code)
|
||||||
|
log.error(response.text)
|
||||||
|
raise AocdError("Non-200 response for POST: {}".format(response))
|
||||||
|
soup = bs4.BeautifulSoup(response.text, "html.parser")
|
||||||
|
message = soup.article.text
|
||||||
|
color = None
|
||||||
|
if "That's the right answer" in message:
|
||||||
|
color = "green"
|
||||||
|
if reopen:
|
||||||
|
# So you can read part B on the website...
|
||||||
|
part_b_url = self.url + "#part2"
|
||||||
|
log.info("reopening to %s", part_b_url)
|
||||||
|
webbrowser.open(part_b_url)
|
||||||
|
if not (self.day == 25 and part == "b"):
|
||||||
|
self._save_correct_answer(value=value, part=part)
|
||||||
|
if self.day == 25 and part == "a":
|
||||||
|
log.debug("checking if got 49 stars already for year %s...", self.year)
|
||||||
|
my_stats = self.user.get_stats(self.year)
|
||||||
|
n_stars = sum(len(val) for val in my_stats.values())
|
||||||
|
if n_stars == 49:
|
||||||
|
log.info("Got 49 stars already, getting 50th...")
|
||||||
|
self._submit(value="done", part="b", reopen=reopen, quiet=quiet)
|
||||||
|
else:
|
||||||
|
log.info("Got %d stars, need %d more for part b", n_stars, 49 - n_stars)
|
||||||
|
elif "Did you already complete it" in message:
|
||||||
|
color = "yellow"
|
||||||
|
elif "That's not the right answer" in message:
|
||||||
|
color = "red"
|
||||||
|
try:
|
||||||
|
context = soup.article.span.code.text
|
||||||
|
except AttributeError:
|
||||||
|
context = soup.article.text
|
||||||
|
log.warning("wrong answer: %s", context)
|
||||||
|
self._save_incorrect_answer(value=value, part=part, extra=soup.article.text)
|
||||||
|
elif "You gave an answer too recently" in message:
|
||||||
|
wait_pattern = r"You have (?:(\d+)m )?(\d+)s left to wait"
|
||||||
|
try:
|
||||||
|
[(minutes, seconds)] = re.findall(wait_pattern, message)
|
||||||
|
except ValueError:
|
||||||
|
log.warning(message)
|
||||||
|
color = "red"
|
||||||
|
else:
|
||||||
|
wait_time = int(seconds)
|
||||||
|
if minutes:
|
||||||
|
wait_time += 60 * int(minutes)
|
||||||
|
log.info("Waiting %d seconds to autoretry", wait_time)
|
||||||
|
time.sleep(wait_time)
|
||||||
|
return self._submit(value=value, part=part, reopen=reopen, quiet=quiet)
|
||||||
|
if not quiet:
|
||||||
|
cprint(message, color=color)
|
||||||
|
return response
|
||||||
|
|
||||||
|
def _check_guess_against_existing(self, guess, part):
|
||||||
|
try:
|
||||||
|
answer = self._get_answer(part=part)
|
||||||
|
if answer == "":
|
||||||
|
return None
|
||||||
|
except PuzzleUnsolvedError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if answer == guess:
|
||||||
|
template = "Part {part} already solved with same answer: {answer}"
|
||||||
|
else:
|
||||||
|
template = colored("Part {part} already solved with different answer: {answer}", "red")
|
||||||
|
|
||||||
|
return template.format(part=part, answer=answer)
|
||||||
|
|
||||||
|
def _save_correct_answer(self, value, part):
|
||||||
|
fname = getattr(self, "answer_{}_fname".format(part))
|
||||||
|
_ensure_intermediate_dirs(fname)
|
||||||
|
txt = value.strip()
|
||||||
|
msg = "saving"
|
||||||
|
if os.path.isfile(fname):
|
||||||
|
with open(fname) as f:
|
||||||
|
prev = f.read()
|
||||||
|
if txt == prev:
|
||||||
|
msg = "the correct answer for %d/%02d part %s was already saved"
|
||||||
|
log.debug(msg, self.year, self.day, part)
|
||||||
|
return
|
||||||
|
msg = "overwriting"
|
||||||
|
msg += " the correct answer for %d/%02d part %s: %s"
|
||||||
|
log.info(msg, self.year, self.day, part, txt)
|
||||||
|
with open(fname, "w") as f:
|
||||||
|
f.write(txt)
|
||||||
|
|
||||||
|
def _save_incorrect_answer(self, value, part, extra=""):
|
||||||
|
fname = getattr(self, "incorrect_answers_{}_fname".format(part))
|
||||||
|
_ensure_intermediate_dirs(fname)
|
||||||
|
msg = "appending an incorrect answer for %d/%02d part %s"
|
||||||
|
log.info(msg, self.year, self.day, part)
|
||||||
|
with open(fname, "a") as f:
|
||||||
|
f.write(value.strip() + " " + extra.replace("\n", " ") + "\n")
|
||||||
|
|
||||||
|
def _save_title(self, soup=None):
|
||||||
|
if soup is None:
|
||||||
|
soup = self._soup()
|
||||||
|
if soup.h2 is None:
|
||||||
|
log.warning("heading not found")
|
||||||
|
return
|
||||||
|
txt = soup.h2.text.strip("- ")
|
||||||
|
prefix = "Day {}: ".format(self.day)
|
||||||
|
if not txt.startswith(prefix):
|
||||||
|
log.error("weird heading, wtf? %s", txt)
|
||||||
|
return
|
||||||
|
txt = self._title = txt[len(prefix) :]
|
||||||
|
_ensure_intermediate_dirs(self.title_fname)
|
||||||
|
with io.open(self.title_fname, "w", encoding="utf-8") as f:
|
||||||
|
print(txt, file=f)
|
||||||
|
|
||||||
|
def _get_answer(self, part):
|
||||||
|
"""
|
||||||
|
Note: Answers are only revealed after a correct submission. If you've
|
||||||
|
not already solved the puzzle, PuzzleUnsolvedError will be raised.
|
||||||
|
"""
|
||||||
|
if part == "b" and self.day == 25:
|
||||||
|
return ""
|
||||||
|
answer_fname = getattr(self, "answer_{}_fname".format(part))
|
||||||
|
if os.path.isfile(answer_fname):
|
||||||
|
with open(answer_fname) as f:
|
||||||
|
return f.read().strip()
|
||||||
|
# scrape puzzle page for any previously solved answers
|
||||||
|
soup = self._soup()
|
||||||
|
if not self._title:
|
||||||
|
# may as well save this while we're here
|
||||||
|
self._save_title(soup=soup)
|
||||||
|
hit = "Your puzzle answer was"
|
||||||
|
paras = [p for p in soup.find_all("p") if p.text.startswith(hit)]
|
||||||
|
if paras:
|
||||||
|
parta_correct_answer = paras[0].code.text
|
||||||
|
self._save_correct_answer(value=parta_correct_answer, part="a")
|
||||||
|
if len(paras) > 1:
|
||||||
|
_p1, p2 = paras
|
||||||
|
partb_correct_answer = p2.code.text
|
||||||
|
self._save_correct_answer(value=partb_correct_answer, part="b")
|
||||||
|
if os.path.isfile(answer_fname):
|
||||||
|
with open(answer_fname) as f:
|
||||||
|
return f.read().strip()
|
||||||
|
msg = "Answer {}-{}{} is not available".format(self.year, self.day, part)
|
||||||
|
raise PuzzleUnsolvedError(msg)
|
||||||
|
|
||||||
|
def _get_bad_guesses(self, part):
|
||||||
|
fname = getattr(self, "incorrect_answers_{}_fname".format(part))
|
||||||
|
result = {}
|
||||||
|
if os.path.isfile(fname):
|
||||||
|
with open(fname) as f:
|
||||||
|
for line in f:
|
||||||
|
answer, _sep, extra = line.strip().partition(" ")
|
||||||
|
result[answer] = extra
|
||||||
|
return result
|
||||||
|
|
||||||
|
def solve(self):
|
||||||
|
try:
|
||||||
|
[ep] = pkg_resources.iter_entry_points(group="adventofcode.user")
|
||||||
|
except ValueError:
|
||||||
|
raise AocdError("Puzzle.solve is only available with unique entry point")
|
||||||
|
f = ep.load()
|
||||||
|
return f(year=self.year, day=self.day, data=self.input_data)
|
||||||
|
|
||||||
|
def solve_for(self, plugin):
|
||||||
|
for ep in pkg_resources.iter_entry_points(group="adventofcode.user"):
|
||||||
|
if ep.name == plugin:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise AocdError("No entry point found for '{}'".format(plugin))
|
||||||
|
f = ep.load()
|
||||||
|
return f(year=self.year, day=self.day, data=self.input_data)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def url(self):
|
||||||
|
return URL.format(year=self.year, day=self.day)
|
||||||
|
|
||||||
|
def view(self):
|
||||||
|
webbrowser.open(self.url)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def my_stats(self):
|
||||||
|
stats = self.user.get_stats(years=[self.year])
|
||||||
|
if (self.year, self.day) not in stats:
|
||||||
|
raise PuzzleUnsolvedError
|
||||||
|
result = stats[self.year, self.day]
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _soup(self):
|
||||||
|
response = requests.get(self.url, cookies=self.user.auth, headers=USER_AGENT)
|
||||||
|
response.raise_for_status()
|
||||||
|
soup = bs4.BeautifulSoup(response.text, "html.parser")
|
||||||
|
return soup
|
||||||
|
|
||||||
|
@property
|
||||||
|
def easter_eggs(self):
|
||||||
|
soup = self._soup()
|
||||||
|
# Most puzzles have exactly one easter-egg, but 2018/12/17 had two..
|
||||||
|
eggs = soup.find_all(["span", "em", "code"], class_=None, attrs={"title": bool})
|
||||||
|
return eggs
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_duration(s):
|
||||||
|
"""Parse a string like 01:11:16 (hours, minutes, seconds) into a timedelta"""
|
||||||
|
if s == ">24h":
|
||||||
|
return timedelta(hours=24)
|
||||||
|
h, m, s = [int(x) for x in s.split(":")]
|
||||||
|
return timedelta(hours=h, minutes=m, seconds=s)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_users():
|
||||||
|
path = os.path.join(AOCD_CONFIG_DIR, "tokens.json")
|
||||||
|
try:
|
||||||
|
with open(path) as f:
|
||||||
|
users = json.load(f)
|
||||||
|
except IOError:
|
||||||
|
users = {"default": default_user().token}
|
||||||
|
return users
|
|
@ -0,0 +1,42 @@
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from .get import current_day
|
||||||
|
from .get import most_recent_year
|
||||||
|
from .models import default_user
|
||||||
|
from .models import Puzzle
|
||||||
|
from .models import User
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def submit(
|
||||||
|
answer, part=None, day=None, year=None, session=None, reopen=True, quiet=False
|
||||||
|
):
|
||||||
|
if session is None:
|
||||||
|
user = default_user()
|
||||||
|
else:
|
||||||
|
user = User(token=session)
|
||||||
|
if day is None:
|
||||||
|
day = current_day()
|
||||||
|
if year is None:
|
||||||
|
year = most_recent_year()
|
||||||
|
puzzle = Puzzle(year=year, day=day, user=user)
|
||||||
|
if part is None:
|
||||||
|
# guess if user is submitting for part a or part b
|
||||||
|
answer_a = getattr(puzzle, "answer_a", None)
|
||||||
|
log.warning("answer a: %s", answer_a)
|
||||||
|
if answer_a is None:
|
||||||
|
log.warning("submitting for part a")
|
||||||
|
part = "a"
|
||||||
|
else:
|
||||||
|
log.warning("submitting for part b (part a is already completed)")
|
||||||
|
part = "b"
|
||||||
|
response = puzzle._submit(value=answer, part=part, reopen=reopen, quiet=quiet)
|
||||||
|
return response
|
|
@ -0,0 +1,252 @@
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import itertools
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
from collections import OrderedDict
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
import pebble.concurrent
|
||||||
|
import pkg_resources
|
||||||
|
from functools import partial
|
||||||
|
from termcolor import colored
|
||||||
|
|
||||||
|
from .exceptions import AocdError
|
||||||
|
from .models import AOCD_CONFIG_DIR
|
||||||
|
from .models import _load_users
|
||||||
|
from .models import Puzzle
|
||||||
|
from .utils import AOC_TZ
|
||||||
|
from .utils import _cli_guess
|
||||||
|
|
||||||
|
|
||||||
|
# from https://adventofcode.com/about
|
||||||
|
# every problem has a solution that completes in at most 15 seconds on ten-year-old hardware
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_TIMEOUT = 60
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
entry_points = pkg_resources.iter_entry_points(group="adventofcode.user")
|
||||||
|
plugins = OrderedDict([(ep.name, ep) for ep in entry_points])
|
||||||
|
aoc_now = datetime.now(tz=AOC_TZ)
|
||||||
|
years = range(2015, aoc_now.year + int(aoc_now.month == 12))
|
||||||
|
days = range(1, 26)
|
||||||
|
users = _load_users()
|
||||||
|
log_levels = "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
|
||||||
|
parser = ArgumentParser(description="AoC runner")
|
||||||
|
parser.add_argument("-p", "--plugins", nargs="+", choices=plugins)
|
||||||
|
parser.add_argument("-y", "--years", type=int, nargs="+", choices=years)
|
||||||
|
parser.add_argument("-d", "--days", type=int, nargs="+", choices=days)
|
||||||
|
parser.add_argument("-u", "--users", nargs="+", choices=users, type=partial(_cli_guess, choices=users))
|
||||||
|
parser.add_argument("-t", "--timeout", type=int, default=DEFAULT_TIMEOUT)
|
||||||
|
parser.add_argument("-s", "--no-submit", action="store_true", help="disable autosubmit")
|
||||||
|
parser.add_argument("-r", "--reopen", action="store_true", help="open browser on NEW solves")
|
||||||
|
parser.add_argument("-q", "--quiet", action="store_true", help="capture output from runner")
|
||||||
|
parser.add_argument("--log-level", default="WARNING", choices=log_levels)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if not users:
|
||||||
|
path = os.path.join(AOCD_CONFIG_DIR, "tokens.json")
|
||||||
|
print(
|
||||||
|
"There are no datasets available to use.\n"
|
||||||
|
"Either export your AOC_SESSION or put some auth "
|
||||||
|
"tokens into {}".format(path),
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
if not plugins:
|
||||||
|
print(
|
||||||
|
"There are no plugins available. Install some package(s) with a registered 'adventofcode.user' entry-point.\n"
|
||||||
|
"See https://github.com/wimglenn/advent-of-code-sample for an example plugin package structure.",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
logging.basicConfig(level=getattr(logging, args.log_level))
|
||||||
|
rc = run_for(
|
||||||
|
plugins=args.plugins or list(plugins),
|
||||||
|
years=args.years or years,
|
||||||
|
days=args.days or days,
|
||||||
|
datasets={k: users[k] for k in (args.users or users)},
|
||||||
|
timeout=args.timeout,
|
||||||
|
autosubmit=not args.no_submit,
|
||||||
|
reopen=args.reopen,
|
||||||
|
capture=args.quiet,
|
||||||
|
)
|
||||||
|
sys.exit(rc)
|
||||||
|
|
||||||
|
|
||||||
|
def _timeout_wrapper(f, capture=False, timeout=DEFAULT_TIMEOUT, *args, **kwargs):
|
||||||
|
func = pebble.concurrent.process(daemon=False, timeout=timeout)(_process_wrapper)
|
||||||
|
return func(f, capture, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def _process_wrapper(f, capture=False, *args, **kwargs):
|
||||||
|
# allows to run f in a process which can be killed if it misbehaves
|
||||||
|
prev_stdout = sys.stdout
|
||||||
|
prev_stderr = sys.stderr
|
||||||
|
if capture:
|
||||||
|
hush = open(os.devnull, "w")
|
||||||
|
sys.stdout = sys.stderr = hush
|
||||||
|
try:
|
||||||
|
result = f(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
if capture:
|
||||||
|
sys.stdout = prev_stdout
|
||||||
|
sys.stderr = prev_stderr
|
||||||
|
hush.close()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def run_with_timeout(entry_point, timeout, progress, dt=0.1, capture=False, **kwargs):
|
||||||
|
spinner = itertools.cycle(r"\|/-")
|
||||||
|
line = elapsed = format_time(0)
|
||||||
|
t0 = time.time()
|
||||||
|
func = entry_point.load()
|
||||||
|
future = _timeout_wrapper(func, capture=capture, timeout=timeout, **kwargs)
|
||||||
|
while not future.done():
|
||||||
|
if progress is not None:
|
||||||
|
line = "\r" + elapsed + " " + progress + " " + next(spinner)
|
||||||
|
sys.stderr.write(line)
|
||||||
|
sys.stderr.flush()
|
||||||
|
time.sleep(dt)
|
||||||
|
elapsed = format_time(time.time() - t0, timeout)
|
||||||
|
walltime = time.time() - t0
|
||||||
|
try:
|
||||||
|
a, b = future.result()
|
||||||
|
except Exception as err:
|
||||||
|
a = b = ""
|
||||||
|
error = repr(err)[:50]
|
||||||
|
else:
|
||||||
|
error = ""
|
||||||
|
# longest correct answer seen so far has been 32 chars
|
||||||
|
a = str(a)[:50]
|
||||||
|
b = str(b)[:50]
|
||||||
|
if progress is not None:
|
||||||
|
sys.stderr.write("\r" + " " * len(line) + "\r")
|
||||||
|
sys.stderr.flush()
|
||||||
|
return a, b, walltime, error
|
||||||
|
|
||||||
|
|
||||||
|
def format_time(t, timeout=DEFAULT_TIMEOUT):
|
||||||
|
if t < timeout / 4:
|
||||||
|
color = "green"
|
||||||
|
elif t < timeout / 2:
|
||||||
|
color = "yellow"
|
||||||
|
else:
|
||||||
|
color = "red"
|
||||||
|
runtime = colored("{: 7.2f}s".format(t), color)
|
||||||
|
return runtime
|
||||||
|
|
||||||
|
|
||||||
|
def run_one(year, day, input_data, entry_point, timeout=DEFAULT_TIMEOUT, progress=None, capture=False):
|
||||||
|
prev = os.getcwd()
|
||||||
|
scratch = tempfile.mkdtemp(prefix="{}-{:02d}-".format(year, day))
|
||||||
|
os.chdir(scratch)
|
||||||
|
assert not os.path.exists("input.txt")
|
||||||
|
try:
|
||||||
|
with open("input.txt", "w") as f:
|
||||||
|
f.write(input_data)
|
||||||
|
a, b, walltime, error = run_with_timeout(
|
||||||
|
entry_point=entry_point,
|
||||||
|
timeout=timeout,
|
||||||
|
year=year,
|
||||||
|
day=day,
|
||||||
|
data=input_data,
|
||||||
|
progress=progress,
|
||||||
|
capture=capture,
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
os.unlink("input.txt")
|
||||||
|
os.chdir(prev)
|
||||||
|
try:
|
||||||
|
os.rmdir(scratch)
|
||||||
|
except Exception as err:
|
||||||
|
log.warning("failed to remove scratch %s (%s: %s)", scratch, type(err), err)
|
||||||
|
return a, b, walltime, error
|
||||||
|
|
||||||
|
|
||||||
|
def run_for(plugins, years, days, datasets, timeout=DEFAULT_TIMEOUT, autosubmit=True, reopen=False, capture=False):
|
||||||
|
aoc_now = datetime.now(tz=AOC_TZ)
|
||||||
|
all_entry_points = pkg_resources.iter_entry_points(group="adventofcode.user")
|
||||||
|
entry_points = {ep.name: ep for ep in all_entry_points if ep.name in plugins}
|
||||||
|
it = itertools.product(years, days, plugins, datasets)
|
||||||
|
userpad = 3
|
||||||
|
datasetpad = 8
|
||||||
|
n_incorrect = 0
|
||||||
|
if entry_points:
|
||||||
|
userpad = len(max(entry_points, key=len))
|
||||||
|
if datasets:
|
||||||
|
datasetpad = len(max(datasets, key=len))
|
||||||
|
for year, day, plugin, dataset in it:
|
||||||
|
if year == aoc_now.year and day > aoc_now.day:
|
||||||
|
continue
|
||||||
|
token = datasets[dataset]
|
||||||
|
entry_point = entry_points[plugin]
|
||||||
|
os.environ["AOC_SESSION"] = token
|
||||||
|
puzzle = Puzzle(year=year, day=day)
|
||||||
|
title = puzzle.title
|
||||||
|
progress = "{}/{:<2d} - {:<40} {:>%d}/{:<%d}"
|
||||||
|
progress %= (userpad, datasetpad)
|
||||||
|
progress = progress.format(year, day, title, plugin, dataset)
|
||||||
|
a, b, walltime, error = run_one(
|
||||||
|
year=year,
|
||||||
|
day=day,
|
||||||
|
input_data=puzzle.input_data,
|
||||||
|
entry_point=entry_point,
|
||||||
|
timeout=timeout,
|
||||||
|
progress=progress,
|
||||||
|
capture=capture,
|
||||||
|
)
|
||||||
|
runtime = format_time(walltime, timeout)
|
||||||
|
line = " ".join([runtime, progress])
|
||||||
|
if error:
|
||||||
|
assert a == b == ""
|
||||||
|
icon = colored("✖", "red")
|
||||||
|
n_incorrect += 1
|
||||||
|
line += " {icon} {error}".format(icon=icon, error=error)
|
||||||
|
else:
|
||||||
|
result_template = " {icon} part {part}: {answer}"
|
||||||
|
for answer, part in zip((a, b), "ab"):
|
||||||
|
if day == 25 and part == "b":
|
||||||
|
# there's no part b on christmas day, skip
|
||||||
|
continue
|
||||||
|
expected = None
|
||||||
|
try:
|
||||||
|
expected = getattr(puzzle, "answer_" + part)
|
||||||
|
except AttributeError:
|
||||||
|
post = part == "a" or (part == "b" and puzzle.answered_a)
|
||||||
|
if autosubmit and post:
|
||||||
|
try:
|
||||||
|
puzzle._submit(answer, part, reopen=reopen, quiet=True)
|
||||||
|
except AocdError as err:
|
||||||
|
log.warning("error submitting - %s", err)
|
||||||
|
try:
|
||||||
|
expected = getattr(puzzle, "answer_" + part)
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
correct = expected is not None and str(expected) == answer
|
||||||
|
icon = colored("✔", "green") if correct else colored("✖", "red")
|
||||||
|
correction = ""
|
||||||
|
if not correct:
|
||||||
|
n_incorrect += 1
|
||||||
|
if expected is None:
|
||||||
|
icon = colored("?", "magenta")
|
||||||
|
correction = "(correct answer unknown)"
|
||||||
|
else:
|
||||||
|
correction = "(expected: {})".format(expected)
|
||||||
|
answer = "{} {}".format(answer, correction)
|
||||||
|
if part == "a":
|
||||||
|
answer = answer.ljust(30)
|
||||||
|
line += result_template.format(icon=icon, part=part, answer=answer)
|
||||||
|
print(line)
|
||||||
|
return n_incorrect
|
|
@ -0,0 +1,28 @@
|
||||||
|
"""
|
||||||
|
Transforms of aocd raw input text to something more useful for speed-solving.
|
||||||
|
Every function here needs to accept one positional argument and return the
|
||||||
|
'massaged' data.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__all__ = ["lines", "numbers"]
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
def lines(data):
|
||||||
|
return data.splitlines()
|
||||||
|
|
||||||
|
|
||||||
|
def numbers(data):
|
||||||
|
result = []
|
||||||
|
for line in data.splitlines():
|
||||||
|
matches = [int(n) for n in re.findall(r"-?\d+", line)]
|
||||||
|
if matches:
|
||||||
|
result.append(matches)
|
||||||
|
if all(len(n) == 1 for n in result):
|
||||||
|
# flatten the list if there is always 1 number per line
|
||||||
|
result = [n for [n] in result]
|
||||||
|
if len(result) == 1:
|
||||||
|
# un-nest the list if there is only one line
|
||||||
|
[result] = result
|
||||||
|
return result
|
|
@ -0,0 +1,138 @@
|
||||||
|
import argparse
|
||||||
|
import bs4
|
||||||
|
import errno
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import requests
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
import tzlocal
|
||||||
|
from datetime import datetime
|
||||||
|
from itertools import cycle
|
||||||
|
from dateutil.tz import gettz
|
||||||
|
|
||||||
|
from .exceptions import DeadTokenError
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
AOC_TZ = gettz("America/New_York")
|
||||||
|
|
||||||
|
|
||||||
|
def _ensure_intermediate_dirs(fname):
|
||||||
|
parent = os.path.dirname(os.path.expanduser(fname))
|
||||||
|
try:
|
||||||
|
os.makedirs(parent, exist_ok=True)
|
||||||
|
except TypeError:
|
||||||
|
# exist_ok not avail on Python 2
|
||||||
|
try:
|
||||||
|
os.makedirs(parent)
|
||||||
|
except (IOError, OSError) as err:
|
||||||
|
if err.errno != errno.EEXIST:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def blocker(quiet=False, dt=0.1, datefmt=None, until=None):
|
||||||
|
"""
|
||||||
|
This function just blocks until the next puzzle unlocks.
|
||||||
|
Pass `quiet=True` to disable the spinner etc.
|
||||||
|
Pass `dt` (seconds) to update the status txt more/less frequently.
|
||||||
|
Pass until=(year, day) to block until some other unlock date.
|
||||||
|
"""
|
||||||
|
aoc_now = datetime.now(tz=AOC_TZ)
|
||||||
|
month = 12
|
||||||
|
if until is not None:
|
||||||
|
year, day = until
|
||||||
|
else:
|
||||||
|
year = aoc_now.year
|
||||||
|
day = aoc_now.day + 1
|
||||||
|
if aoc_now.month < 12:
|
||||||
|
day = 1
|
||||||
|
elif aoc_now.day >= 25:
|
||||||
|
day = 1
|
||||||
|
year += 1
|
||||||
|
unlock = datetime(year, month, day, tzinfo=AOC_TZ)
|
||||||
|
if datetime.now(tz=AOC_TZ) > unlock:
|
||||||
|
# it should already be unlocked - nothing to do
|
||||||
|
return
|
||||||
|
spinner = cycle(r"\|/-")
|
||||||
|
localzone = tzlocal.get_localzone()
|
||||||
|
local_unlock = unlock.astimezone(tz=localzone)
|
||||||
|
if datefmt is None:
|
||||||
|
# %-I does not work on Windows, strip leading zeros manually
|
||||||
|
local_unlock = local_unlock.strftime("%I:%M %p").lstrip("0")
|
||||||
|
else:
|
||||||
|
local_unlock = local_unlock.strftime(datefmt)
|
||||||
|
msg = "{} Unlock day %s at %s ({} remaining)" % (unlock.day, local_unlock)
|
||||||
|
while datetime.now(tz=AOC_TZ) < unlock:
|
||||||
|
remaining = unlock - datetime.now(tz=AOC_TZ)
|
||||||
|
remaining = str(remaining).split(".")[0] # trim microseconds
|
||||||
|
if not quiet:
|
||||||
|
sys.stdout.write(msg.format(next(spinner), remaining))
|
||||||
|
sys.stdout.flush()
|
||||||
|
time.sleep(dt)
|
||||||
|
if not quiet:
|
||||||
|
sys.stdout.write("\r")
|
||||||
|
if not quiet:
|
||||||
|
# clears the "Unlock day" countdown line from the terminal
|
||||||
|
sys.stdout.write("\r".ljust(80) + "\n")
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
|
||||||
|
def get_owner(token):
|
||||||
|
"""parse owner of the token. raises DeadTokenError if the token is expired/invalid.
|
||||||
|
returns a string like authtype.username.userid"""
|
||||||
|
url = "https://adventofcode.com/settings"
|
||||||
|
response = requests.get(url, cookies={"session": token}, allow_redirects=False)
|
||||||
|
if response.status_code != 200:
|
||||||
|
# bad tokens will 302 redirect to main page
|
||||||
|
log.info("session %s is dead - status_code=%s", token, response.status_code)
|
||||||
|
raise DeadTokenError("the auth token ...{} is expired or not functioning".format(token[-4:]))
|
||||||
|
soup = bs4.BeautifulSoup(response.text, "html.parser")
|
||||||
|
auth_source = "unknown"
|
||||||
|
username = "unknown"
|
||||||
|
userid = soup.code.text.split("-")[1]
|
||||||
|
for span in soup.find_all("span"):
|
||||||
|
if span.text.startswith("Link to "):
|
||||||
|
auth_source = span.text[8:]
|
||||||
|
auth_source = auth_source.replace("https://twitter.com/", "twitter/")
|
||||||
|
auth_source = auth_source.replace("https://github.com/", "github/")
|
||||||
|
auth_source = auth_source.replace("https://www.reddit.com/u/", "reddit/")
|
||||||
|
auth_source, sep, username = auth_source.partition("/")
|
||||||
|
if not sep:
|
||||||
|
log.warning("problem in parsing %s", span.text)
|
||||||
|
auth_source = username = "unknown"
|
||||||
|
log.debug("found %r", span.text)
|
||||||
|
elif span.img is not None:
|
||||||
|
if "googleusercontent.com" in span.img.attrs.get("src", ""):
|
||||||
|
log.debug("found google user content img, getting google username")
|
||||||
|
auth_source = "google"
|
||||||
|
username = span.text
|
||||||
|
break
|
||||||
|
result = ".".join([auth_source, username, userid])
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def atomic_write_file(fname, contents_str):
|
||||||
|
"""Atomically write a string to a file by writing it to a temporary file, and then
|
||||||
|
renaming it to the final destination name. This solves a race condition where existence
|
||||||
|
of a file doesn't necessarily mean the contents are all correct yet."""
|
||||||
|
_ensure_intermediate_dirs(fname)
|
||||||
|
with tempfile.NamedTemporaryFile(mode="w", dir=os.path.dirname(fname), delete=False) as f:
|
||||||
|
log.debug("writing to tempfile @ %s", f.name)
|
||||||
|
f.write(contents_str)
|
||||||
|
log.debug("moving %s -> %s", f.name, fname)
|
||||||
|
shutil.move(f.name, fname)
|
||||||
|
|
||||||
|
|
||||||
|
def _cli_guess(choice, choices):
|
||||||
|
if choice in choices:
|
||||||
|
return choice
|
||||||
|
candidates = [c for c in choices if choice in c]
|
||||||
|
if len(candidates) > 1:
|
||||||
|
raise argparse.ArgumentTypeError("{} ambiguous (could be {})".format(choice, ", ".join(candidates)))
|
||||||
|
elif not candidates:
|
||||||
|
raise argparse.ArgumentTypeError("invalid choice {!r} (choose from {})".format(choice, ", ".join(choices)))
|
||||||
|
[result] = candidates
|
||||||
|
return result
|
|
@ -0,0 +1 @@
|
||||||
|
__version__ = "1.2.3"
|
|
@ -0,0 +1 @@
|
||||||
|
pip
|
|
@ -0,0 +1,15 @@
|
||||||
|
Apache Software License 2.0
|
||||||
|
|
||||||
|
Copyright (c) 2020, Paul Ganssle (Google)
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,114 @@
|
||||||
|
Metadata-Version: 2.1
|
||||||
|
Name: backports.zoneinfo
|
||||||
|
Version: 0.2.1
|
||||||
|
Summary: Backport of the standard library zoneinfo module
|
||||||
|
Home-page: https://github.com/pganssle/zoneinfo
|
||||||
|
Author: Python Software Foundation
|
||||||
|
Author-email: datetime-sig@python.org
|
||||||
|
License: Apache-2.0
|
||||||
|
Project-URL: Source, https://github.com/pganssle/zoneinfo
|
||||||
|
Project-URL: Documentation, https://zoneinfo.readthedocs.io/en/latest/
|
||||||
|
Project-URL: Bug Reports, https://github.com/pganssle/zoneinfo/issues
|
||||||
|
Platform: UNKNOWN
|
||||||
|
Classifier: Development Status :: 4 - Beta
|
||||||
|
Classifier: Intended Audience :: Developers
|
||||||
|
Classifier: License :: OSI Approved :: Apache Software License
|
||||||
|
Classifier: Programming Language :: Python :: 3
|
||||||
|
Classifier: Programming Language :: Python :: 3 :: Only
|
||||||
|
Classifier: Programming Language :: Python :: 3.6
|
||||||
|
Classifier: Programming Language :: Python :: 3.7
|
||||||
|
Classifier: Programming Language :: Python :: 3.8
|
||||||
|
Requires-Python: >=3.6
|
||||||
|
Description-Content-Type: text/markdown
|
||||||
|
Requires-Dist: importlib-resources ; python_version < "3.7"
|
||||||
|
Provides-Extra: tzdata
|
||||||
|
Requires-Dist: tzdata ; extra == 'tzdata'
|
||||||
|
|
||||||
|
# `backports.zoneinfo`: Backport of the standard library module `zoneinfo`
|
||||||
|
|
||||||
|
This package was originally the reference implementation for [PEP 615](https://www.python.org/dev/peps/pep-0615/), which proposes support for the IANA time zone database in the standard library, and now serves as a backport to Python 3.6+ (including PyPy).
|
||||||
|
|
||||||
|
This exposes the `backports.zoneinfo` module, which is a backport of the [`zoneinfo`](https://docs.python.org/3.9/library/zoneinfo.html#module-zoneinfo) module. The backport's documentation can be found [on readthedocs](https://zoneinfo.readthedocs.io/en/latest/).
|
||||||
|
|
||||||
|
The module uses the system time zone data if available, and falls back to the [`tzdata`](https://tzdata.readthedocs.io/en/latest/) package (available [on PyPI](https://pypi.org/project/tzdata/)) if installed.
|
||||||
|
|
||||||
|
## Installation and depending on this library
|
||||||
|
|
||||||
|
This module is called [`backports.zoneinfo`](https://pypi.org/project/backports.zoneinfo) on PyPI. To install it in your local environment, use:
|
||||||
|
|
||||||
|
```
|
||||||
|
pip install backports.zoneinfo
|
||||||
|
```
|
||||||
|
|
||||||
|
Or (particularly on Windows), you can also use the `tzdata` extra (which basically just declares a dependency on `tzdata`, so this doesn't actually save you any typing 😅):
|
||||||
|
|
||||||
|
```
|
||||||
|
pip install backports.zoneinfo[tzdata]
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want to use this in your application, it is best to use [PEP 508 environment markers](https://www.python.org/dev/peps/pep-0508/#environment-markers) to declare a dependency *conditional on the Python version*:
|
||||||
|
|
||||||
|
```
|
||||||
|
backports.zoneinfo;python_version<"3.9"
|
||||||
|
```
|
||||||
|
|
||||||
|
Support for `backports.zoneinfo` in Python 3.9+ is currently minimal, since it is expected that you would use the standard library `zoneinfo` module instead.
|
||||||
|
|
||||||
|
## Use
|
||||||
|
|
||||||
|
The `backports.zoneinfo` module should be a drop-in replacement for the Python 3.9 standard library module `zoneinfo`. If you do not support anything earlier than Python 3.9, **you do not need this library**; if you are supporting Python 3.6+, you may want to use this idiom to "fall back" to ``backports.zoneinfo``:
|
||||||
|
|
||||||
|
```python
|
||||||
|
try:
|
||||||
|
import zoneinfo
|
||||||
|
except ImportError:
|
||||||
|
from backports import zoneinfo
|
||||||
|
```
|
||||||
|
|
||||||
|
To get access to time zones with this module, construct a `ZoneInfo` object and attach it to your datetime:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from backports.zoneinfo import ZoneInfo
|
||||||
|
>>> from datetime import datetime, timedelta, timezone
|
||||||
|
>>> dt = datetime(1992, 3, 1, tzinfo=ZoneInfo("Europe/Minsk"))
|
||||||
|
>>> print(dt)
|
||||||
|
1992-03-01 00:00:00+02:00
|
||||||
|
>>> print(dt.utcoffset())
|
||||||
|
2:00:00
|
||||||
|
>>> print(dt.tzname())
|
||||||
|
EET
|
||||||
|
```
|
||||||
|
|
||||||
|
Arithmetic works as expected without the need for a "normalization" step:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> dt += timedelta(days=90)
|
||||||
|
>>> print(dt)
|
||||||
|
1992-05-30 00:00:00+03:00
|
||||||
|
>>> dt.utcoffset()
|
||||||
|
datetime.timedelta(seconds=10800)
|
||||||
|
>>> dt.tzname()
|
||||||
|
'EEST'
|
||||||
|
```
|
||||||
|
|
||||||
|
Ambiguous and imaginary times are handled using the `fold` attribute added in [PEP 495](https://www.python.org/dev/peps/pep-0495/):
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> dt = datetime(2020, 11, 1, 1, tzinfo=ZoneInfo("America/Chicago"))
|
||||||
|
>>> print(dt)
|
||||||
|
2020-11-01 01:00:00-05:00
|
||||||
|
>>> print(dt.replace(fold=1))
|
||||||
|
2020-11-01 01:00:00-06:00
|
||||||
|
|
||||||
|
>>> UTC = timezone.utc
|
||||||
|
>>> print(dt.astimezone(UTC))
|
||||||
|
2020-11-01 06:00:00+00:00
|
||||||
|
>>> print(dt.replace(fold=1).astimezone(UTC))
|
||||||
|
2020-11-01 07:00:00+00:00
|
||||||
|
```
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
Currently we are not accepting contributions to this repository because we have not put the CLA in place and we would like to avoid complicating the process of adoption into the standard library. Contributions to [CPython](https://github.com/python/cpython) will eventually be backported to this repository — see [the Python developer's guide](https://devguide.python.org/) for more information on how to contribute to CPython.
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
backports.zoneinfo-0.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||||
|
backports.zoneinfo-0.2.1.dist-info/LICENSE,sha256=S6GyCFg_LmwtX0yuFLLXnNpfuBB-kVxWT16r3dv7iG0,607
|
||||||
|
backports.zoneinfo-0.2.1.dist-info/LICENSE_APACHE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
||||||
|
backports.zoneinfo-0.2.1.dist-info/METADATA,sha256=KGF_lTJSMsl-lWaaSl4jC5m_ovX4PIwsEwNMeEaaEuU,4721
|
||||||
|
backports.zoneinfo-0.2.1.dist-info/RECORD,,
|
||||||
|
backports.zoneinfo-0.2.1.dist-info/WHEEL,sha256=-ODc2a2AO_YJ5T46NOquHfWjRM7bQvlt-f3zRaLBjL4,105
|
||||||
|
backports.zoneinfo-0.2.1.dist-info/top_level.txt,sha256=cGjaLMOoBR1FK0ApojtzWVmViTtJ7JGIK_HwXiEsvtU,10
|
||||||
|
backports/__init__.py,sha256=LaBURxHYOi2tcGccVQF3Za-Wyl8ZwP7SBOZnPs_A4CI,232
|
||||||
|
backports/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
backports/zoneinfo/__init__.py,sha256=87Ri6IM7oEs4BcaFf6cavbk_jY5VdOurbEoNiDUsIsU,1284
|
||||||
|
backports/zoneinfo/__init__.pyi,sha256=-LKhC2vJGDbTRa2we0jQAqKlOxwAcxbiq963O66WGDI,1279
|
||||||
|
backports/zoneinfo/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
backports/zoneinfo/__pycache__/_common.cpython-38.pyc,,
|
||||||
|
backports/zoneinfo/__pycache__/_tzpath.cpython-38.pyc,,
|
||||||
|
backports/zoneinfo/__pycache__/_version.cpython-38.pyc,,
|
||||||
|
backports/zoneinfo/__pycache__/_zoneinfo.cpython-38.pyc,,
|
||||||
|
backports/zoneinfo/_common.py,sha256=pvDrEPbvAunprHsYlFl54fbbrpwDc4JzoFXJ0W7qFhw,5640
|
||||||
|
backports/zoneinfo/_czoneinfo.cp38-win_amd64.pyd,sha256=3Si24065M1yrzZ4zlquyd5H0nXzZIx8lntGl41Yv5Ic,34816
|
||||||
|
backports/zoneinfo/_tzpath.py,sha256=DNAa1JnTub2gXYUlIqunzhQOkeFRkUG_m0s_EffcuUw,6067
|
||||||
|
backports/zoneinfo/_version.py,sha256=7VyHp48OUTQQUylqTtD2KYmDnZ1N5qU2-WFiy4BFYEo,23
|
||||||
|
backports/zoneinfo/_zoneinfo.py,sha256=OJlLi8y3uZDJNdmd1dM14F2bIlKDTVUYPvmiB6qW1fM,25107
|
||||||
|
backports/zoneinfo/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@ -0,0 +1,5 @@
|
||||||
|
Wheel-Version: 1.0
|
||||||
|
Generator: bdist_wheel (0.34.2)
|
||||||
|
Root-Is-Purelib: false
|
||||||
|
Tag: cp38-cp38-win_amd64
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
backports
|
|
@ -0,0 +1,5 @@
|
||||||
|
# A Python "namespace package" http://www.python.org/dev/peps/pep-0382/
|
||||||
|
# This always goes inside of a namespace package's __init__.py
|
||||||
|
from pkgutil import extend_path
|
||||||
|
|
||||||
|
__path__ = extend_path(__path__, __name__) # type: ignore
|
|
@ -0,0 +1,49 @@
|
||||||
|
__all__ = [
|
||||||
|
"ZoneInfo",
|
||||||
|
"reset_tzpath",
|
||||||
|
"available_timezones",
|
||||||
|
"TZPATH",
|
||||||
|
"ZoneInfoNotFoundError",
|
||||||
|
"InvalidTZPathWarning",
|
||||||
|
]
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from . import _tzpath
|
||||||
|
from ._common import ZoneInfoNotFoundError
|
||||||
|
from ._version import __version__
|
||||||
|
|
||||||
|
try:
|
||||||
|
from ._czoneinfo import ZoneInfo
|
||||||
|
except ImportError: # pragma: nocover
|
||||||
|
from ._zoneinfo import ZoneInfo
|
||||||
|
|
||||||
|
reset_tzpath = _tzpath.reset_tzpath
|
||||||
|
available_timezones = _tzpath.available_timezones
|
||||||
|
InvalidTZPathWarning = _tzpath.InvalidTZPathWarning
|
||||||
|
|
||||||
|
if sys.version_info < (3, 7):
|
||||||
|
# Module-level __getattr__ was added in Python 3.7, so instead of lazily
|
||||||
|
# populating TZPATH on every access, we will register a callback with
|
||||||
|
# reset_tzpath to update the top-level tuple.
|
||||||
|
TZPATH = _tzpath.TZPATH
|
||||||
|
|
||||||
|
def _tzpath_callback(new_tzpath):
|
||||||
|
global TZPATH
|
||||||
|
TZPATH = new_tzpath
|
||||||
|
|
||||||
|
_tzpath.TZPATH_CALLBACKS.append(_tzpath_callback)
|
||||||
|
del _tzpath_callback
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def __getattr__(name):
|
||||||
|
if name == "TZPATH":
|
||||||
|
return _tzpath.TZPATH
|
||||||
|
else:
|
||||||
|
raise AttributeError(
|
||||||
|
f"module {__name__!r} has no attribute {name!r}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def __dir__():
|
||||||
|
return sorted(list(globals()) + ["TZPATH"])
|
|
@ -0,0 +1,45 @@
|
||||||
|
import os
|
||||||
|
import typing
|
||||||
|
from datetime import datetime, tzinfo
|
||||||
|
from typing import (
|
||||||
|
Any,
|
||||||
|
Iterable,
|
||||||
|
Optional,
|
||||||
|
Protocol,
|
||||||
|
Sequence,
|
||||||
|
Set,
|
||||||
|
Type,
|
||||||
|
Union,
|
||||||
|
)
|
||||||
|
|
||||||
|
_T = typing.TypeVar("_T", bound="ZoneInfo")
|
||||||
|
|
||||||
|
class _IOBytes(Protocol):
|
||||||
|
def read(self, __size: int) -> bytes: ...
|
||||||
|
def seek(self, __size: int, __whence: int = ...) -> Any: ...
|
||||||
|
|
||||||
|
class ZoneInfo(tzinfo):
|
||||||
|
@property
|
||||||
|
def key(self) -> str: ...
|
||||||
|
def __init__(self, key: str) -> None: ...
|
||||||
|
@classmethod
|
||||||
|
def no_cache(cls: Type[_T], key: str) -> _T: ...
|
||||||
|
@classmethod
|
||||||
|
def from_file(
|
||||||
|
cls: Type[_T], __fobj: _IOBytes, key: Optional[str] = ...
|
||||||
|
) -> _T: ...
|
||||||
|
@classmethod
|
||||||
|
def clear_cache(cls, *, only_keys: Iterable[str] = ...) -> None: ...
|
||||||
|
|
||||||
|
# Note: Both here and in clear_cache, the types allow the use of `str` where
|
||||||
|
# a sequence of strings is required. This should be remedied if a solution
|
||||||
|
# to this typing bug is found: https://github.com/python/typing/issues/256
|
||||||
|
def reset_tzpath(
|
||||||
|
to: Optional[Sequence[Union[os.PathLike, str]]] = ...
|
||||||
|
) -> None: ...
|
||||||
|
def available_timezones() -> Set[str]: ...
|
||||||
|
|
||||||
|
TZPATH: Sequence[str]
|
||||||
|
|
||||||
|
class ZoneInfoNotFoundError(KeyError): ...
|
||||||
|
class InvalidTZPathWarning(RuntimeWarning): ...
|
|
@ -0,0 +1,171 @@
|
||||||
|
import struct
|
||||||
|
|
||||||
|
|
||||||
|
def load_tzdata(key):
|
||||||
|
try:
|
||||||
|
import importlib.resources as importlib_resources
|
||||||
|
except ImportError:
|
||||||
|
import importlib_resources
|
||||||
|
|
||||||
|
components = key.split("/")
|
||||||
|
package_name = ".".join(["tzdata.zoneinfo"] + components[:-1])
|
||||||
|
resource_name = components[-1]
|
||||||
|
|
||||||
|
try:
|
||||||
|
return importlib_resources.open_binary(package_name, resource_name)
|
||||||
|
except (ImportError, FileNotFoundError, UnicodeEncodeError):
|
||||||
|
# There are three types of exception that can be raised that all amount
|
||||||
|
# to "we cannot find this key":
|
||||||
|
#
|
||||||
|
# ImportError: If package_name doesn't exist (e.g. if tzdata is not
|
||||||
|
# installed, or if there's an error in the folder name like
|
||||||
|
# Amrica/New_York)
|
||||||
|
# FileNotFoundError: If resource_name doesn't exist in the package
|
||||||
|
# (e.g. Europe/Krasnoy)
|
||||||
|
# UnicodeEncodeError: If package_name or resource_name are not UTF-8,
|
||||||
|
# such as keys containing a surrogate character.
|
||||||
|
raise ZoneInfoNotFoundError(f"No time zone found with key {key}")
|
||||||
|
|
||||||
|
|
||||||
|
def load_data(fobj):
|
||||||
|
header = _TZifHeader.from_file(fobj)
|
||||||
|
|
||||||
|
if header.version == 1:
|
||||||
|
time_size = 4
|
||||||
|
time_type = "l"
|
||||||
|
else:
|
||||||
|
# Version 2+ has 64-bit integer transition times
|
||||||
|
time_size = 8
|
||||||
|
time_type = "q"
|
||||||
|
|
||||||
|
# Version 2+ also starts with a Version 1 header and data, which
|
||||||
|
# we need to skip now
|
||||||
|
skip_bytes = (
|
||||||
|
header.timecnt * 5 # Transition times and types
|
||||||
|
+ header.typecnt * 6 # Local time type records
|
||||||
|
+ header.charcnt # Time zone designations
|
||||||
|
+ header.leapcnt * 8 # Leap second records
|
||||||
|
+ header.isstdcnt # Standard/wall indicators
|
||||||
|
+ header.isutcnt # UT/local indicators
|
||||||
|
)
|
||||||
|
|
||||||
|
fobj.seek(skip_bytes, 1)
|
||||||
|
|
||||||
|
# Now we need to read the second header, which is not the same
|
||||||
|
# as the first
|
||||||
|
header = _TZifHeader.from_file(fobj)
|
||||||
|
|
||||||
|
typecnt = header.typecnt
|
||||||
|
timecnt = header.timecnt
|
||||||
|
charcnt = header.charcnt
|
||||||
|
|
||||||
|
# The data portion starts with timecnt transitions and indices
|
||||||
|
if timecnt:
|
||||||
|
trans_list_utc = struct.unpack(
|
||||||
|
f">{timecnt}{time_type}", fobj.read(timecnt * time_size)
|
||||||
|
)
|
||||||
|
trans_idx = struct.unpack(f">{timecnt}B", fobj.read(timecnt))
|
||||||
|
else:
|
||||||
|
trans_list_utc = ()
|
||||||
|
trans_idx = ()
|
||||||
|
|
||||||
|
# Read the ttinfo struct, (utoff, isdst, abbrind)
|
||||||
|
if typecnt:
|
||||||
|
utcoff, isdst, abbrind = zip(
|
||||||
|
*(struct.unpack(">lbb", fobj.read(6)) for i in range(typecnt))
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
utcoff = ()
|
||||||
|
isdst = ()
|
||||||
|
abbrind = ()
|
||||||
|
|
||||||
|
# Now read the abbreviations. They are null-terminated strings, indexed
|
||||||
|
# not by position in the array but by position in the unsplit
|
||||||
|
# abbreviation string. I suppose this makes more sense in C, which uses
|
||||||
|
# null to terminate the strings, but it's inconvenient here...
|
||||||
|
abbr_vals = {}
|
||||||
|
abbr_chars = fobj.read(charcnt)
|
||||||
|
|
||||||
|
def get_abbr(idx):
|
||||||
|
# Gets a string starting at idx and running until the next \x00
|
||||||
|
#
|
||||||
|
# We cannot pre-populate abbr_vals by splitting on \x00 because there
|
||||||
|
# are some zones that use subsets of longer abbreviations, like so:
|
||||||
|
#
|
||||||
|
# LMT\x00AHST\x00HDT\x00
|
||||||
|
#
|
||||||
|
# Where the idx to abbr mapping should be:
|
||||||
|
#
|
||||||
|
# {0: "LMT", 4: "AHST", 5: "HST", 9: "HDT"}
|
||||||
|
if idx not in abbr_vals:
|
||||||
|
span_end = abbr_chars.find(b"\x00", idx)
|
||||||
|
abbr_vals[idx] = abbr_chars[idx:span_end].decode()
|
||||||
|
|
||||||
|
return abbr_vals[idx]
|
||||||
|
|
||||||
|
abbr = tuple(get_abbr(idx) for idx in abbrind)
|
||||||
|
|
||||||
|
# The remainder of the file consists of leap seconds (currently unused) and
|
||||||
|
# the standard/wall and ut/local indicators, which are metadata we don't need.
|
||||||
|
# In version 2 files, we need to skip the unnecessary data to get at the TZ string:
|
||||||
|
if header.version >= 2:
|
||||||
|
# Each leap second record has size (time_size + 4)
|
||||||
|
skip_bytes = header.isutcnt + header.isstdcnt + header.leapcnt * 12
|
||||||
|
fobj.seek(skip_bytes, 1)
|
||||||
|
|
||||||
|
c = fobj.read(1) # Should be \n
|
||||||
|
assert c == b"\n", c
|
||||||
|
|
||||||
|
tz_bytes = b""
|
||||||
|
while True:
|
||||||
|
c = fobj.read(1)
|
||||||
|
if c == b"\n":
|
||||||
|
break
|
||||||
|
tz_bytes += c
|
||||||
|
|
||||||
|
tz_str = tz_bytes
|
||||||
|
else:
|
||||||
|
tz_str = None
|
||||||
|
|
||||||
|
return trans_idx, trans_list_utc, utcoff, isdst, abbr, tz_str
|
||||||
|
|
||||||
|
|
||||||
|
class _TZifHeader:
|
||||||
|
__slots__ = [
|
||||||
|
"version",
|
||||||
|
"isutcnt",
|
||||||
|
"isstdcnt",
|
||||||
|
"leapcnt",
|
||||||
|
"timecnt",
|
||||||
|
"typecnt",
|
||||||
|
"charcnt",
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self, *args):
|
||||||
|
assert len(self.__slots__) == len(args)
|
||||||
|
for attr, val in zip(self.__slots__, args):
|
||||||
|
setattr(self, attr, val)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_file(cls, stream):
|
||||||
|
# The header starts with a 4-byte "magic" value
|
||||||
|
if stream.read(4) != b"TZif":
|
||||||
|
raise ValueError("Invalid TZif file: magic not found")
|
||||||
|
|
||||||
|
_version = stream.read(1)
|
||||||
|
if _version == b"\x00":
|
||||||
|
version = 1
|
||||||
|
else:
|
||||||
|
version = int(_version)
|
||||||
|
stream.read(15)
|
||||||
|
|
||||||
|
args = (version,)
|
||||||
|
|
||||||
|
# Slots are defined in the order that the bytes are arranged
|
||||||
|
args = args + struct.unpack(">6l", stream.read(24))
|
||||||
|
|
||||||
|
return cls(*args)
|
||||||
|
|
||||||
|
|
||||||
|
class ZoneInfoNotFoundError(KeyError):
|
||||||
|
"""Exception raised when a ZoneInfo key is not found."""
|
Binary file not shown.
|
@ -0,0 +1,207 @@
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
PY36 = sys.version_info < (3, 7)
|
||||||
|
|
||||||
|
|
||||||
|
def reset_tzpath(to=None):
|
||||||
|
global TZPATH
|
||||||
|
|
||||||
|
tzpaths = to
|
||||||
|
if tzpaths is not None:
|
||||||
|
if isinstance(tzpaths, (str, bytes)):
|
||||||
|
raise TypeError(
|
||||||
|
f"tzpaths must be a list or tuple, "
|
||||||
|
+ f"not {type(tzpaths)}: {tzpaths!r}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not all(map(os.path.isabs, tzpaths)):
|
||||||
|
raise ValueError(_get_invalid_paths_message(tzpaths))
|
||||||
|
base_tzpath = tzpaths
|
||||||
|
else:
|
||||||
|
env_var = os.environ.get("PYTHONTZPATH", None)
|
||||||
|
if env_var is not None:
|
||||||
|
base_tzpath = _parse_python_tzpath(env_var)
|
||||||
|
elif sys.platform != "win32":
|
||||||
|
base_tzpath = [
|
||||||
|
"/usr/share/zoneinfo",
|
||||||
|
"/usr/lib/zoneinfo",
|
||||||
|
"/usr/share/lib/zoneinfo",
|
||||||
|
"/etc/zoneinfo",
|
||||||
|
]
|
||||||
|
|
||||||
|
base_tzpath.sort(key=lambda x: not os.path.exists(x))
|
||||||
|
else:
|
||||||
|
base_tzpath = ()
|
||||||
|
|
||||||
|
TZPATH = tuple(base_tzpath)
|
||||||
|
|
||||||
|
if TZPATH_CALLBACKS:
|
||||||
|
for callback in TZPATH_CALLBACKS:
|
||||||
|
callback(TZPATH)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_python_tzpath(env_var):
|
||||||
|
if not env_var:
|
||||||
|
return ()
|
||||||
|
|
||||||
|
raw_tzpath = env_var.split(os.pathsep)
|
||||||
|
new_tzpath = tuple(filter(os.path.isabs, raw_tzpath))
|
||||||
|
|
||||||
|
# If anything has been filtered out, we will warn about it
|
||||||
|
if len(new_tzpath) != len(raw_tzpath):
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
msg = _get_invalid_paths_message(raw_tzpath)
|
||||||
|
|
||||||
|
warnings.warn(
|
||||||
|
"Invalid paths specified in PYTHONTZPATH environment variable."
|
||||||
|
+ msg,
|
||||||
|
InvalidTZPathWarning,
|
||||||
|
)
|
||||||
|
|
||||||
|
return new_tzpath
|
||||||
|
|
||||||
|
|
||||||
|
def _get_invalid_paths_message(tzpaths):
|
||||||
|
invalid_paths = (path for path in tzpaths if not os.path.isabs(path))
|
||||||
|
|
||||||
|
prefix = "\n "
|
||||||
|
indented_str = prefix + prefix.join(invalid_paths)
|
||||||
|
|
||||||
|
return (
|
||||||
|
"Paths should be absolute but found the following relative paths:"
|
||||||
|
+ indented_str
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info < (3, 8):
|
||||||
|
|
||||||
|
def _isfile(path):
|
||||||
|
# bpo-33721: In Python 3.8 non-UTF8 paths return False rather than
|
||||||
|
# raising an error. See https://bugs.python.org/issue33721
|
||||||
|
try:
|
||||||
|
return os.path.isfile(path)
|
||||||
|
except ValueError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
else:
|
||||||
|
_isfile = os.path.isfile
|
||||||
|
|
||||||
|
|
||||||
|
def find_tzfile(key):
|
||||||
|
"""Retrieve the path to a TZif file from a key."""
|
||||||
|
_validate_tzfile_path(key)
|
||||||
|
for search_path in TZPATH:
|
||||||
|
filepath = os.path.join(search_path, key)
|
||||||
|
if _isfile(filepath):
|
||||||
|
return filepath
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
_TEST_PATH = os.path.normpath(os.path.join("_", "_"))[:-1]
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_tzfile_path(path, _base=_TEST_PATH):
|
||||||
|
if os.path.isabs(path):
|
||||||
|
raise ValueError(
|
||||||
|
f"ZoneInfo keys may not be absolute paths, got: {path}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# We only care about the kinds of path normalizations that would change the
|
||||||
|
# length of the key - e.g. a/../b -> a/b, or a/b/ -> a/b. On Windows,
|
||||||
|
# normpath will also change from a/b to a\b, but that would still preserve
|
||||||
|
# the length.
|
||||||
|
new_path = os.path.normpath(path)
|
||||||
|
if len(new_path) != len(path):
|
||||||
|
raise ValueError(
|
||||||
|
f"ZoneInfo keys must be normalized relative paths, got: {path}"
|
||||||
|
)
|
||||||
|
|
||||||
|
resolved = os.path.normpath(os.path.join(_base, new_path))
|
||||||
|
if not resolved.startswith(_base):
|
||||||
|
raise ValueError(
|
||||||
|
f"ZoneInfo keys must refer to subdirectories of TZPATH, got: {path}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
del _TEST_PATH
|
||||||
|
|
||||||
|
|
||||||
|
def available_timezones():
|
||||||
|
"""Returns a set containing all available time zones.
|
||||||
|
|
||||||
|
.. caution::
|
||||||
|
|
||||||
|
This may attempt to open a large number of files, since the best way to
|
||||||
|
determine if a given file on the time zone search path is to open it
|
||||||
|
and check for the "magic string" at the beginning.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from importlib import resources
|
||||||
|
except ImportError:
|
||||||
|
import importlib_resources as resources
|
||||||
|
|
||||||
|
valid_zones = set()
|
||||||
|
|
||||||
|
# Start with loading from the tzdata package if it exists: this has a
|
||||||
|
# pre-assembled list of zones that only requires opening one file.
|
||||||
|
try:
|
||||||
|
with resources.open_text("tzdata", "zones") as f:
|
||||||
|
for zone in f:
|
||||||
|
zone = zone.strip()
|
||||||
|
if zone:
|
||||||
|
valid_zones.add(zone)
|
||||||
|
except (ImportError, FileNotFoundError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def valid_key(fpath):
|
||||||
|
try:
|
||||||
|
with open(fpath, "rb") as f:
|
||||||
|
return f.read(4) == b"TZif"
|
||||||
|
except Exception: # pragma: nocover
|
||||||
|
return False
|
||||||
|
|
||||||
|
for tz_root in TZPATH:
|
||||||
|
if not os.path.exists(tz_root):
|
||||||
|
continue
|
||||||
|
|
||||||
|
for root, dirnames, files in os.walk(tz_root):
|
||||||
|
if root == tz_root:
|
||||||
|
# right/ and posix/ are special directories and shouldn't be
|
||||||
|
# included in the output of available zones
|
||||||
|
if "right" in dirnames:
|
||||||
|
dirnames.remove("right")
|
||||||
|
if "posix" in dirnames:
|
||||||
|
dirnames.remove("posix")
|
||||||
|
|
||||||
|
for file in files:
|
||||||
|
fpath = os.path.join(root, file)
|
||||||
|
|
||||||
|
key = os.path.relpath(fpath, start=tz_root)
|
||||||
|
if os.sep != "/": # pragma: nocover
|
||||||
|
key = key.replace(os.sep, "/")
|
||||||
|
|
||||||
|
if not key or key in valid_zones:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if valid_key(fpath):
|
||||||
|
valid_zones.add(key)
|
||||||
|
|
||||||
|
if "posixrules" in valid_zones:
|
||||||
|
# posixrules is a special symlink-only time zone where it exists, it
|
||||||
|
# should not be included in the output
|
||||||
|
valid_zones.remove("posixrules")
|
||||||
|
|
||||||
|
return valid_zones
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidTZPathWarning(RuntimeWarning):
|
||||||
|
"""Warning raised if an invalid path is specified in PYTHONTZPATH."""
|
||||||
|
|
||||||
|
|
||||||
|
TZPATH = ()
|
||||||
|
TZPATH_CALLBACKS = []
|
||||||
|
reset_tzpath()
|
|
@ -0,0 +1 @@
|
||||||
|
__version__ = "0.2.1"
|
|
@ -0,0 +1,754 @@
|
||||||
|
import bisect
|
||||||
|
import calendar
|
||||||
|
import collections
|
||||||
|
import functools
|
||||||
|
import re
|
||||||
|
import weakref
|
||||||
|
from datetime import datetime, timedelta, tzinfo
|
||||||
|
|
||||||
|
from . import _common, _tzpath
|
||||||
|
|
||||||
|
EPOCH = datetime(1970, 1, 1)
|
||||||
|
EPOCHORDINAL = datetime(1970, 1, 1).toordinal()
|
||||||
|
|
||||||
|
# It is relatively expensive to construct new timedelta objects, and in most
|
||||||
|
# cases we're looking at the same deltas, like integer numbers of hours, etc.
|
||||||
|
# To improve speed and memory use, we'll keep a dictionary with references
|
||||||
|
# to the ones we've already used so far.
|
||||||
|
#
|
||||||
|
# Loading every time zone in the 2020a version of the time zone database
|
||||||
|
# requires 447 timedeltas, which requires approximately the amount of space
|
||||||
|
# that ZoneInfo("America/New_York") with 236 transitions takes up, so we will
|
||||||
|
# set the cache size to 512 so that in the common case we always get cache
|
||||||
|
# hits, but specifically crafted ZoneInfo objects don't leak arbitrary amounts
|
||||||
|
# of memory.
|
||||||
|
@functools.lru_cache(maxsize=512)
|
||||||
|
def _load_timedelta(seconds):
|
||||||
|
return timedelta(seconds=seconds)
|
||||||
|
|
||||||
|
|
||||||
|
class ZoneInfo(tzinfo):
|
||||||
|
_strong_cache_size = 8
|
||||||
|
_strong_cache = collections.OrderedDict()
|
||||||
|
_weak_cache = weakref.WeakValueDictionary()
|
||||||
|
__module__ = "backports.zoneinfo"
|
||||||
|
|
||||||
|
def __init_subclass__(cls):
|
||||||
|
cls._strong_cache = collections.OrderedDict()
|
||||||
|
cls._weak_cache = weakref.WeakValueDictionary()
|
||||||
|
|
||||||
|
def __new__(cls, key):
|
||||||
|
instance = cls._weak_cache.get(key, None)
|
||||||
|
if instance is None:
|
||||||
|
instance = cls._weak_cache.setdefault(key, cls._new_instance(key))
|
||||||
|
instance._from_cache = True
|
||||||
|
|
||||||
|
# Update the "strong" cache
|
||||||
|
cls._strong_cache[key] = cls._strong_cache.pop(key, instance)
|
||||||
|
|
||||||
|
if len(cls._strong_cache) > cls._strong_cache_size:
|
||||||
|
cls._strong_cache.popitem(last=False)
|
||||||
|
|
||||||
|
return instance
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def no_cache(cls, key):
|
||||||
|
obj = cls._new_instance(key)
|
||||||
|
obj._from_cache = False
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _new_instance(cls, key):
|
||||||
|
obj = super().__new__(cls)
|
||||||
|
obj._key = key
|
||||||
|
obj._file_path = obj._find_tzfile(key)
|
||||||
|
|
||||||
|
if obj._file_path is not None:
|
||||||
|
file_obj = open(obj._file_path, "rb")
|
||||||
|
else:
|
||||||
|
file_obj = _common.load_tzdata(key)
|
||||||
|
|
||||||
|
with file_obj as f:
|
||||||
|
obj._load_file(f)
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_file(cls, fobj, key=None):
|
||||||
|
obj = super().__new__(cls)
|
||||||
|
obj._key = key
|
||||||
|
obj._file_path = None
|
||||||
|
obj._load_file(fobj)
|
||||||
|
obj._file_repr = repr(fobj)
|
||||||
|
|
||||||
|
# Disable pickling for objects created from files
|
||||||
|
obj.__reduce__ = obj._file_reduce
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def clear_cache(cls, *, only_keys=None):
|
||||||
|
if only_keys is not None:
|
||||||
|
for key in only_keys:
|
||||||
|
cls._weak_cache.pop(key, None)
|
||||||
|
cls._strong_cache.pop(key, None)
|
||||||
|
|
||||||
|
else:
|
||||||
|
cls._weak_cache.clear()
|
||||||
|
cls._strong_cache.clear()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def key(self):
|
||||||
|
return self._key
|
||||||
|
|
||||||
|
def utcoffset(self, dt):
|
||||||
|
return self._find_trans(dt).utcoff
|
||||||
|
|
||||||
|
def dst(self, dt):
|
||||||
|
return self._find_trans(dt).dstoff
|
||||||
|
|
||||||
|
def tzname(self, dt):
|
||||||
|
return self._find_trans(dt).tzname
|
||||||
|
|
||||||
|
def fromutc(self, dt):
|
||||||
|
"""Convert from datetime in UTC to datetime in local time"""
|
||||||
|
|
||||||
|
if not isinstance(dt, datetime):
|
||||||
|
raise TypeError("fromutc() requires a datetime argument")
|
||||||
|
if dt.tzinfo is not self:
|
||||||
|
raise ValueError("dt.tzinfo is not self")
|
||||||
|
|
||||||
|
timestamp = self._get_local_timestamp(dt)
|
||||||
|
num_trans = len(self._trans_utc)
|
||||||
|
|
||||||
|
if num_trans >= 1 and timestamp < self._trans_utc[0]:
|
||||||
|
tti = self._tti_before
|
||||||
|
fold = 0
|
||||||
|
elif (
|
||||||
|
num_trans == 0 or timestamp > self._trans_utc[-1]
|
||||||
|
) and not isinstance(self._tz_after, _ttinfo):
|
||||||
|
tti, fold = self._tz_after.get_trans_info_fromutc(
|
||||||
|
timestamp, dt.year
|
||||||
|
)
|
||||||
|
elif num_trans == 0:
|
||||||
|
tti = self._tz_after
|
||||||
|
fold = 0
|
||||||
|
else:
|
||||||
|
idx = bisect.bisect_right(self._trans_utc, timestamp)
|
||||||
|
|
||||||
|
if num_trans > 1 and timestamp >= self._trans_utc[1]:
|
||||||
|
tti_prev, tti = self._ttinfos[idx - 2 : idx]
|
||||||
|
elif timestamp > self._trans_utc[-1]:
|
||||||
|
tti_prev = self._ttinfos[-1]
|
||||||
|
tti = self._tz_after
|
||||||
|
else:
|
||||||
|
tti_prev = self._tti_before
|
||||||
|
tti = self._ttinfos[0]
|
||||||
|
|
||||||
|
# Detect fold
|
||||||
|
shift = tti_prev.utcoff - tti.utcoff
|
||||||
|
fold = shift.total_seconds() > timestamp - self._trans_utc[idx - 1]
|
||||||
|
dt += tti.utcoff
|
||||||
|
if fold:
|
||||||
|
return dt.replace(fold=1)
|
||||||
|
else:
|
||||||
|
return dt
|
||||||
|
|
||||||
|
def _find_trans(self, dt):
|
||||||
|
if dt is None:
|
||||||
|
if self._fixed_offset:
|
||||||
|
return self._tz_after
|
||||||
|
else:
|
||||||
|
return _NO_TTINFO
|
||||||
|
|
||||||
|
ts = self._get_local_timestamp(dt)
|
||||||
|
|
||||||
|
lt = self._trans_local[dt.fold]
|
||||||
|
|
||||||
|
num_trans = len(lt)
|
||||||
|
|
||||||
|
if num_trans and ts < lt[0]:
|
||||||
|
return self._tti_before
|
||||||
|
elif not num_trans or ts > lt[-1]:
|
||||||
|
if isinstance(self._tz_after, _TZStr):
|
||||||
|
return self._tz_after.get_trans_info(ts, dt.year, dt.fold)
|
||||||
|
else:
|
||||||
|
return self._tz_after
|
||||||
|
else:
|
||||||
|
# idx is the transition that occurs after this timestamp, so we
|
||||||
|
# subtract off 1 to get the current ttinfo
|
||||||
|
idx = bisect.bisect_right(lt, ts) - 1
|
||||||
|
assert idx >= 0
|
||||||
|
return self._ttinfos[idx]
|
||||||
|
|
||||||
|
def _get_local_timestamp(self, dt):
|
||||||
|
return (
|
||||||
|
(dt.toordinal() - EPOCHORDINAL) * 86400
|
||||||
|
+ dt.hour * 3600
|
||||||
|
+ dt.minute * 60
|
||||||
|
+ dt.second
|
||||||
|
)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if self._key is not None:
|
||||||
|
return f"{self._key}"
|
||||||
|
else:
|
||||||
|
return repr(self)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if self._key is not None:
|
||||||
|
return f"{self.__class__.__name__}(key={self._key!r})"
|
||||||
|
else:
|
||||||
|
return f"{self.__class__.__name__}.from_file({self._file_repr})"
|
||||||
|
|
||||||
|
def __reduce__(self):
|
||||||
|
return (self.__class__._unpickle, (self._key, self._from_cache))
|
||||||
|
|
||||||
|
def _file_reduce(self):
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
raise pickle.PicklingError(
|
||||||
|
"Cannot pickle a ZoneInfo file created from a file stream."
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _unpickle(cls, key, from_cache):
|
||||||
|
if from_cache:
|
||||||
|
return cls(key)
|
||||||
|
else:
|
||||||
|
return cls.no_cache(key)
|
||||||
|
|
||||||
|
def _find_tzfile(self, key):
|
||||||
|
return _tzpath.find_tzfile(key)
|
||||||
|
|
||||||
|
def _load_file(self, fobj):
|
||||||
|
# Retrieve all the data as it exists in the zoneinfo file
|
||||||
|
trans_idx, trans_utc, utcoff, isdst, abbr, tz_str = _common.load_data(
|
||||||
|
fobj
|
||||||
|
)
|
||||||
|
|
||||||
|
# Infer the DST offsets (needed for .dst()) from the data
|
||||||
|
dstoff = self._utcoff_to_dstoff(trans_idx, utcoff, isdst)
|
||||||
|
|
||||||
|
# Convert all the transition times (UTC) into "seconds since 1970-01-01 local time"
|
||||||
|
trans_local = self._ts_to_local(trans_idx, trans_utc, utcoff)
|
||||||
|
|
||||||
|
# Construct `_ttinfo` objects for each transition in the file
|
||||||
|
_ttinfo_list = [
|
||||||
|
_ttinfo(
|
||||||
|
_load_timedelta(utcoffset), _load_timedelta(dstoffset), tzname
|
||||||
|
)
|
||||||
|
for utcoffset, dstoffset, tzname in zip(utcoff, dstoff, abbr)
|
||||||
|
]
|
||||||
|
|
||||||
|
self._trans_utc = trans_utc
|
||||||
|
self._trans_local = trans_local
|
||||||
|
self._ttinfos = [_ttinfo_list[idx] for idx in trans_idx]
|
||||||
|
|
||||||
|
# Find the first non-DST transition
|
||||||
|
for i in range(len(isdst)):
|
||||||
|
if not isdst[i]:
|
||||||
|
self._tti_before = _ttinfo_list[i]
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
if self._ttinfos:
|
||||||
|
self._tti_before = self._ttinfos[0]
|
||||||
|
else:
|
||||||
|
self._tti_before = None
|
||||||
|
|
||||||
|
# Set the "fallback" time zone
|
||||||
|
if tz_str is not None and tz_str != b"":
|
||||||
|
self._tz_after = _parse_tz_str(tz_str.decode())
|
||||||
|
else:
|
||||||
|
if not self._ttinfos and not _ttinfo_list:
|
||||||
|
raise ValueError("No time zone information found.")
|
||||||
|
|
||||||
|
if self._ttinfos:
|
||||||
|
self._tz_after = self._ttinfos[-1]
|
||||||
|
else:
|
||||||
|
self._tz_after = _ttinfo_list[-1]
|
||||||
|
|
||||||
|
# Determine if this is a "fixed offset" zone, meaning that the output
|
||||||
|
# of the utcoffset, dst and tzname functions does not depend on the
|
||||||
|
# specific datetime passed.
|
||||||
|
#
|
||||||
|
# We make three simplifying assumptions here:
|
||||||
|
#
|
||||||
|
# 1. If _tz_after is not a _ttinfo, it has transitions that might
|
||||||
|
# actually occur (it is possible to construct TZ strings that
|
||||||
|
# specify STD and DST but no transitions ever occur, such as
|
||||||
|
# AAA0BBB,0/0,J365/25).
|
||||||
|
# 2. If _ttinfo_list contains more than one _ttinfo object, the objects
|
||||||
|
# represent different offsets.
|
||||||
|
# 3. _ttinfo_list contains no unused _ttinfos (in which case an
|
||||||
|
# otherwise fixed-offset zone with extra _ttinfos defined may
|
||||||
|
# appear to *not* be a fixed offset zone).
|
||||||
|
#
|
||||||
|
# Violations to these assumptions would be fairly exotic, and exotic
|
||||||
|
# zones should almost certainly not be used with datetime.time (the
|
||||||
|
# only thing that would be affected by this).
|
||||||
|
if len(_ttinfo_list) > 1 or not isinstance(self._tz_after, _ttinfo):
|
||||||
|
self._fixed_offset = False
|
||||||
|
elif not _ttinfo_list:
|
||||||
|
self._fixed_offset = True
|
||||||
|
else:
|
||||||
|
self._fixed_offset = _ttinfo_list[0] == self._tz_after
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _utcoff_to_dstoff(trans_idx, utcoffsets, isdsts):
|
||||||
|
# Now we must transform our ttis and abbrs into `_ttinfo` objects,
|
||||||
|
# but there is an issue: .dst() must return a timedelta with the
|
||||||
|
# difference between utcoffset() and the "standard" offset, but
|
||||||
|
# the "base offset" and "DST offset" are not encoded in the file;
|
||||||
|
# we can infer what they are from the isdst flag, but it is not
|
||||||
|
# sufficient to to just look at the last standard offset, because
|
||||||
|
# occasionally countries will shift both DST offset and base offset.
|
||||||
|
|
||||||
|
typecnt = len(isdsts)
|
||||||
|
dstoffs = [0] * typecnt # Provisionally assign all to 0.
|
||||||
|
dst_cnt = sum(isdsts)
|
||||||
|
dst_found = 0
|
||||||
|
|
||||||
|
for i in range(1, len(trans_idx)):
|
||||||
|
if dst_cnt == dst_found:
|
||||||
|
break
|
||||||
|
|
||||||
|
idx = trans_idx[i]
|
||||||
|
|
||||||
|
dst = isdsts[idx]
|
||||||
|
|
||||||
|
# We're only going to look at daylight saving time
|
||||||
|
if not dst:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Skip any offsets that have already been assigned
|
||||||
|
if dstoffs[idx] != 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
dstoff = 0
|
||||||
|
utcoff = utcoffsets[idx]
|
||||||
|
|
||||||
|
comp_idx = trans_idx[i - 1]
|
||||||
|
|
||||||
|
if not isdsts[comp_idx]:
|
||||||
|
dstoff = utcoff - utcoffsets[comp_idx]
|
||||||
|
|
||||||
|
if not dstoff and idx < (typecnt - 1):
|
||||||
|
comp_idx = trans_idx[i + 1]
|
||||||
|
|
||||||
|
# If the following transition is also DST and we couldn't
|
||||||
|
# find the DST offset by this point, we're going ot have to
|
||||||
|
# skip it and hope this transition gets assigned later
|
||||||
|
if isdsts[comp_idx]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
dstoff = utcoff - utcoffsets[comp_idx]
|
||||||
|
|
||||||
|
if dstoff:
|
||||||
|
dst_found += 1
|
||||||
|
dstoffs[idx] = dstoff
|
||||||
|
else:
|
||||||
|
# If we didn't find a valid value for a given index, we'll end up
|
||||||
|
# with dstoff = 0 for something where `isdst=1`. This is obviously
|
||||||
|
# wrong - one hour will be a much better guess than 0
|
||||||
|
for idx in range(typecnt):
|
||||||
|
if not dstoffs[idx] and isdsts[idx]:
|
||||||
|
dstoffs[idx] = 3600
|
||||||
|
|
||||||
|
return dstoffs
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _ts_to_local(trans_idx, trans_list_utc, utcoffsets):
|
||||||
|
"""Generate number of seconds since 1970 *in the local time*.
|
||||||
|
|
||||||
|
This is necessary to easily find the transition times in local time"""
|
||||||
|
if not trans_list_utc:
|
||||||
|
return [[], []]
|
||||||
|
|
||||||
|
# Start with the timestamps and modify in-place
|
||||||
|
trans_list_wall = [list(trans_list_utc), list(trans_list_utc)]
|
||||||
|
|
||||||
|
if len(utcoffsets) > 1:
|
||||||
|
offset_0 = utcoffsets[0]
|
||||||
|
offset_1 = utcoffsets[trans_idx[0]]
|
||||||
|
if offset_1 > offset_0:
|
||||||
|
offset_1, offset_0 = offset_0, offset_1
|
||||||
|
else:
|
||||||
|
offset_0 = offset_1 = utcoffsets[0]
|
||||||
|
|
||||||
|
trans_list_wall[0][0] += offset_0
|
||||||
|
trans_list_wall[1][0] += offset_1
|
||||||
|
|
||||||
|
for i in range(1, len(trans_idx)):
|
||||||
|
offset_0 = utcoffsets[trans_idx[i - 1]]
|
||||||
|
offset_1 = utcoffsets[trans_idx[i]]
|
||||||
|
|
||||||
|
if offset_1 > offset_0:
|
||||||
|
offset_1, offset_0 = offset_0, offset_1
|
||||||
|
|
||||||
|
trans_list_wall[0][i] += offset_0
|
||||||
|
trans_list_wall[1][i] += offset_1
|
||||||
|
|
||||||
|
return trans_list_wall
|
||||||
|
|
||||||
|
|
||||||
|
class _ttinfo:
|
||||||
|
__slots__ = ["utcoff", "dstoff", "tzname"]
|
||||||
|
|
||||||
|
def __init__(self, utcoff, dstoff, tzname):
|
||||||
|
self.utcoff = utcoff
|
||||||
|
self.dstoff = dstoff
|
||||||
|
self.tzname = tzname
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return (
|
||||||
|
self.utcoff == other.utcoff
|
||||||
|
and self.dstoff == other.dstoff
|
||||||
|
and self.tzname == other.tzname
|
||||||
|
)
|
||||||
|
|
||||||
|
def __repr__(self): # pragma: nocover
|
||||||
|
return (
|
||||||
|
f"{self.__class__.__name__}"
|
||||||
|
+ f"({self.utcoff}, {self.dstoff}, {self.tzname})"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_NO_TTINFO = _ttinfo(None, None, None)
|
||||||
|
|
||||||
|
|
||||||
|
class _TZStr:
|
||||||
|
__slots__ = (
|
||||||
|
"std",
|
||||||
|
"dst",
|
||||||
|
"start",
|
||||||
|
"end",
|
||||||
|
"get_trans_info",
|
||||||
|
"get_trans_info_fromutc",
|
||||||
|
"dst_diff",
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, std_abbr, std_offset, dst_abbr, dst_offset, start=None, end=None
|
||||||
|
):
|
||||||
|
self.dst_diff = dst_offset - std_offset
|
||||||
|
std_offset = _load_timedelta(std_offset)
|
||||||
|
self.std = _ttinfo(
|
||||||
|
utcoff=std_offset, dstoff=_load_timedelta(0), tzname=std_abbr
|
||||||
|
)
|
||||||
|
|
||||||
|
self.start = start
|
||||||
|
self.end = end
|
||||||
|
|
||||||
|
dst_offset = _load_timedelta(dst_offset)
|
||||||
|
delta = _load_timedelta(self.dst_diff)
|
||||||
|
self.dst = _ttinfo(utcoff=dst_offset, dstoff=delta, tzname=dst_abbr)
|
||||||
|
|
||||||
|
# These are assertions because the constructor should only be called
|
||||||
|
# by functions that would fail before passing start or end
|
||||||
|
assert start is not None, "No transition start specified"
|
||||||
|
assert end is not None, "No transition end specified"
|
||||||
|
|
||||||
|
self.get_trans_info = self._get_trans_info
|
||||||
|
self.get_trans_info_fromutc = self._get_trans_info_fromutc
|
||||||
|
|
||||||
|
def transitions(self, year):
|
||||||
|
start = self.start.year_to_epoch(year)
|
||||||
|
end = self.end.year_to_epoch(year)
|
||||||
|
return start, end
|
||||||
|
|
||||||
|
def _get_trans_info(self, ts, year, fold):
|
||||||
|
"""Get the information about the current transition - tti"""
|
||||||
|
start, end = self.transitions(year)
|
||||||
|
|
||||||
|
# With fold = 0, the period (denominated in local time) with the
|
||||||
|
# smaller offset starts at the end of the gap and ends at the end of
|
||||||
|
# the fold; with fold = 1, it runs from the start of the gap to the
|
||||||
|
# beginning of the fold.
|
||||||
|
#
|
||||||
|
# So in order to determine the DST boundaries we need to know both
|
||||||
|
# the fold and whether DST is positive or negative (rare), and it
|
||||||
|
# turns out that this boils down to fold XOR is_positive.
|
||||||
|
if fold == (self.dst_diff >= 0):
|
||||||
|
end -= self.dst_diff
|
||||||
|
else:
|
||||||
|
start += self.dst_diff
|
||||||
|
|
||||||
|
if start < end:
|
||||||
|
isdst = start <= ts < end
|
||||||
|
else:
|
||||||
|
isdst = not (end <= ts < start)
|
||||||
|
|
||||||
|
return self.dst if isdst else self.std
|
||||||
|
|
||||||
|
def _get_trans_info_fromutc(self, ts, year):
|
||||||
|
start, end = self.transitions(year)
|
||||||
|
start -= self.std.utcoff.total_seconds()
|
||||||
|
end -= self.dst.utcoff.total_seconds()
|
||||||
|
|
||||||
|
if start < end:
|
||||||
|
isdst = start <= ts < end
|
||||||
|
else:
|
||||||
|
isdst = not (end <= ts < start)
|
||||||
|
|
||||||
|
# For positive DST, the ambiguous period is one dst_diff after the end
|
||||||
|
# of DST; for negative DST, the ambiguous period is one dst_diff before
|
||||||
|
# the start of DST.
|
||||||
|
if self.dst_diff > 0:
|
||||||
|
ambig_start = end
|
||||||
|
ambig_end = end + self.dst_diff
|
||||||
|
else:
|
||||||
|
ambig_start = start
|
||||||
|
ambig_end = start - self.dst_diff
|
||||||
|
|
||||||
|
fold = ambig_start <= ts < ambig_end
|
||||||
|
|
||||||
|
return (self.dst if isdst else self.std, fold)
|
||||||
|
|
||||||
|
|
||||||
|
def _post_epoch_days_before_year(year):
|
||||||
|
"""Get the number of days between 1970-01-01 and YEAR-01-01"""
|
||||||
|
y = year - 1
|
||||||
|
return y * 365 + y // 4 - y // 100 + y // 400 - EPOCHORDINAL
|
||||||
|
|
||||||
|
|
||||||
|
class _DayOffset:
|
||||||
|
__slots__ = ["d", "julian", "hour", "minute", "second"]
|
||||||
|
|
||||||
|
def __init__(self, d, julian, hour=2, minute=0, second=0):
|
||||||
|
if not (0 + julian) <= d <= 365:
|
||||||
|
min_day = 0 + julian
|
||||||
|
raise ValueError(f"d must be in [{min_day}, 365], not: {d}")
|
||||||
|
|
||||||
|
self.d = d
|
||||||
|
self.julian = julian
|
||||||
|
self.hour = hour
|
||||||
|
self.minute = minute
|
||||||
|
self.second = second
|
||||||
|
|
||||||
|
def year_to_epoch(self, year):
|
||||||
|
days_before_year = _post_epoch_days_before_year(year)
|
||||||
|
|
||||||
|
d = self.d
|
||||||
|
if self.julian and d >= 59 and calendar.isleap(year):
|
||||||
|
d += 1
|
||||||
|
|
||||||
|
epoch = (days_before_year + d) * 86400
|
||||||
|
epoch += self.hour * 3600 + self.minute * 60 + self.second
|
||||||
|
|
||||||
|
return epoch
|
||||||
|
|
||||||
|
|
||||||
|
class _CalendarOffset:
|
||||||
|
__slots__ = ["m", "w", "d", "hour", "minute", "second"]
|
||||||
|
|
||||||
|
_DAYS_BEFORE_MONTH = (
|
||||||
|
-1,
|
||||||
|
0,
|
||||||
|
31,
|
||||||
|
59,
|
||||||
|
90,
|
||||||
|
120,
|
||||||
|
151,
|
||||||
|
181,
|
||||||
|
212,
|
||||||
|
243,
|
||||||
|
273,
|
||||||
|
304,
|
||||||
|
334,
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, m, w, d, hour=2, minute=0, second=0):
|
||||||
|
if not 0 < m <= 12:
|
||||||
|
raise ValueError("m must be in (0, 12]")
|
||||||
|
|
||||||
|
if not 0 < w <= 5:
|
||||||
|
raise ValueError("w must be in (0, 5]")
|
||||||
|
|
||||||
|
if not 0 <= d <= 6:
|
||||||
|
raise ValueError("d must be in [0, 6]")
|
||||||
|
|
||||||
|
self.m = m
|
||||||
|
self.w = w
|
||||||
|
self.d = d
|
||||||
|
self.hour = hour
|
||||||
|
self.minute = minute
|
||||||
|
self.second = second
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _ymd2ord(cls, year, month, day):
|
||||||
|
return (
|
||||||
|
_post_epoch_days_before_year(year)
|
||||||
|
+ cls._DAYS_BEFORE_MONTH[month]
|
||||||
|
+ (month > 2 and calendar.isleap(year))
|
||||||
|
+ day
|
||||||
|
)
|
||||||
|
|
||||||
|
# TODO: These are not actually epoch dates as they are expressed in local time
|
||||||
|
def year_to_epoch(self, year):
|
||||||
|
"""Calculates the datetime of the occurrence from the year"""
|
||||||
|
# We know year and month, we need to convert w, d into day of month
|
||||||
|
#
|
||||||
|
# Week 1 is the first week in which day `d` (where 0 = Sunday) appears.
|
||||||
|
# Week 5 represents the last occurrence of day `d`, so we need to know
|
||||||
|
# the range of the month.
|
||||||
|
first_day, days_in_month = calendar.monthrange(year, self.m)
|
||||||
|
|
||||||
|
# This equation seems magical, so I'll break it down:
|
||||||
|
# 1. calendar says 0 = Monday, POSIX says 0 = Sunday
|
||||||
|
# so we need first_day + 1 to get 1 = Monday -> 7 = Sunday,
|
||||||
|
# which is still equivalent because this math is mod 7
|
||||||
|
# 2. Get first day - desired day mod 7: -1 % 7 = 6, so we don't need
|
||||||
|
# to do anything to adjust negative numbers.
|
||||||
|
# 3. Add 1 because month days are a 1-based index.
|
||||||
|
month_day = (self.d - (first_day + 1)) % 7 + 1
|
||||||
|
|
||||||
|
# Now use a 0-based index version of `w` to calculate the w-th
|
||||||
|
# occurrence of `d`
|
||||||
|
month_day += (self.w - 1) * 7
|
||||||
|
|
||||||
|
# month_day will only be > days_in_month if w was 5, and `w` means
|
||||||
|
# "last occurrence of `d`", so now we just check if we over-shot the
|
||||||
|
# end of the month and if so knock off 1 week.
|
||||||
|
if month_day > days_in_month:
|
||||||
|
month_day -= 7
|
||||||
|
|
||||||
|
ordinal = self._ymd2ord(year, self.m, month_day)
|
||||||
|
epoch = ordinal * 86400
|
||||||
|
epoch += self.hour * 3600 + self.minute * 60 + self.second
|
||||||
|
return epoch
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_tz_str(tz_str):
|
||||||
|
# The tz string has the format:
|
||||||
|
#
|
||||||
|
# std[offset[dst[offset],start[/time],end[/time]]]
|
||||||
|
#
|
||||||
|
# std and dst must be 3 or more characters long and must not contain
|
||||||
|
# a leading colon, embedded digits, commas, nor a plus or minus signs;
|
||||||
|
# The spaces between "std" and "offset" are only for display and are
|
||||||
|
# not actually present in the string.
|
||||||
|
#
|
||||||
|
# The format of the offset is ``[+|-]hh[:mm[:ss]]``
|
||||||
|
|
||||||
|
offset_str, *start_end_str = tz_str.split(",", 1)
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
parser_re = re.compile(
|
||||||
|
r"(?P<std>[^<0-9:.+-]+|<[a-zA-Z0-9+\-]+>)" +
|
||||||
|
r"((?P<stdoff>[+-]?\d{1,2}(:\d{2}(:\d{2})?)?)" +
|
||||||
|
r"((?P<dst>[^0-9:.+-]+|<[a-zA-Z0-9+\-]+>)" +
|
||||||
|
r"((?P<dstoff>[+-]?\d{1,2}(:\d{2}(:\d{2})?)?))?" +
|
||||||
|
r")?" + # dst
|
||||||
|
r")?$" # stdoff
|
||||||
|
)
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
m = parser_re.match(offset_str)
|
||||||
|
|
||||||
|
if m is None:
|
||||||
|
raise ValueError(f"{tz_str} is not a valid TZ string")
|
||||||
|
|
||||||
|
std_abbr = m.group("std")
|
||||||
|
dst_abbr = m.group("dst")
|
||||||
|
dst_offset = None
|
||||||
|
|
||||||
|
std_abbr = std_abbr.strip("<>")
|
||||||
|
|
||||||
|
if dst_abbr:
|
||||||
|
dst_abbr = dst_abbr.strip("<>")
|
||||||
|
|
||||||
|
std_offset = m.group("stdoff")
|
||||||
|
if std_offset:
|
||||||
|
try:
|
||||||
|
std_offset = _parse_tz_delta(std_offset)
|
||||||
|
except ValueError as e:
|
||||||
|
raise ValueError(f"Invalid STD offset in {tz_str}") from e
|
||||||
|
else:
|
||||||
|
std_offset = 0
|
||||||
|
|
||||||
|
if dst_abbr is not None:
|
||||||
|
dst_offset = m.group("dstoff")
|
||||||
|
if dst_offset:
|
||||||
|
try:
|
||||||
|
dst_offset = _parse_tz_delta(dst_offset)
|
||||||
|
except ValueError as e:
|
||||||
|
raise ValueError(f"Invalid DST offset in {tz_str}") from e
|
||||||
|
else:
|
||||||
|
dst_offset = std_offset + 3600
|
||||||
|
|
||||||
|
if not start_end_str:
|
||||||
|
raise ValueError(f"Missing transition rules: {tz_str}")
|
||||||
|
|
||||||
|
start_end_strs = start_end_str[0].split(",", 1)
|
||||||
|
try:
|
||||||
|
start, end = (_parse_dst_start_end(x) for x in start_end_strs)
|
||||||
|
except ValueError as e:
|
||||||
|
raise ValueError(f"Invalid TZ string: {tz_str}") from e
|
||||||
|
|
||||||
|
return _TZStr(std_abbr, std_offset, dst_abbr, dst_offset, start, end)
|
||||||
|
elif start_end_str:
|
||||||
|
raise ValueError(f"Transition rule present without DST: {tz_str}")
|
||||||
|
else:
|
||||||
|
# This is a static ttinfo, don't return _TZStr
|
||||||
|
return _ttinfo(
|
||||||
|
_load_timedelta(std_offset), _load_timedelta(0), std_abbr
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_dst_start_end(dststr):
|
||||||
|
date, *time = dststr.split("/")
|
||||||
|
if date[0] == "M":
|
||||||
|
n_is_julian = False
|
||||||
|
m = re.match(r"M(\d{1,2})\.(\d).(\d)$", date)
|
||||||
|
if m is None:
|
||||||
|
raise ValueError(f"Invalid dst start/end date: {dststr}")
|
||||||
|
date_offset = tuple(map(int, m.groups()))
|
||||||
|
offset = _CalendarOffset(*date_offset)
|
||||||
|
else:
|
||||||
|
if date[0] == "J":
|
||||||
|
n_is_julian = True
|
||||||
|
date = date[1:]
|
||||||
|
else:
|
||||||
|
n_is_julian = False
|
||||||
|
|
||||||
|
doy = int(date)
|
||||||
|
offset = _DayOffset(doy, n_is_julian)
|
||||||
|
|
||||||
|
if time:
|
||||||
|
time_components = list(map(int, time[0].split(":")))
|
||||||
|
n_components = len(time_components)
|
||||||
|
if n_components < 3:
|
||||||
|
time_components.extend([0] * (3 - n_components))
|
||||||
|
offset.hour, offset.minute, offset.second = time_components
|
||||||
|
|
||||||
|
return offset
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_tz_delta(tz_delta):
|
||||||
|
match = re.match(
|
||||||
|
r"(?P<sign>[+-])?(?P<h>\d{1,2})(:(?P<m>\d{2})(:(?P<s>\d{2}))?)?",
|
||||||
|
tz_delta,
|
||||||
|
)
|
||||||
|
# Anything passed to this function should already have hit an equivalent
|
||||||
|
# regular expression to find the section to parse.
|
||||||
|
assert match is not None, tz_delta
|
||||||
|
|
||||||
|
h, m, s = (
|
||||||
|
int(v) if v is not None else 0
|
||||||
|
for v in map(match.group, ("h", "m", "s"))
|
||||||
|
)
|
||||||
|
|
||||||
|
total = h * 3600 + m * 60 + s
|
||||||
|
|
||||||
|
if not -86400 < total < 86400:
|
||||||
|
raise ValueError(
|
||||||
|
"Offset must be strictly between -24h and +24h:" + tz_delta
|
||||||
|
)
|
||||||
|
|
||||||
|
# Yes, +5 maps to an offset of -5h
|
||||||
|
if match.group("sign") != "-":
|
||||||
|
total *= -1
|
||||||
|
|
||||||
|
return total
|
|
@ -0,0 +1 @@
|
||||||
|
pip
|
|
@ -0,0 +1,119 @@
|
||||||
|
Metadata-Version: 2.1
|
||||||
|
Name: beautifulsoup4
|
||||||
|
Version: 4.11.1
|
||||||
|
Summary: Screen-scraping library
|
||||||
|
Home-page: https://www.crummy.com/software/BeautifulSoup/bs4/
|
||||||
|
Author: Leonard Richardson
|
||||||
|
Author-email: leonardr@segfault.org
|
||||||
|
License: MIT
|
||||||
|
Download-URL: https://www.crummy.com/software/BeautifulSoup/bs4/download/
|
||||||
|
Platform: UNKNOWN
|
||||||
|
Classifier: Development Status :: 5 - Production/Stable
|
||||||
|
Classifier: Intended Audience :: Developers
|
||||||
|
Classifier: License :: OSI Approved :: MIT License
|
||||||
|
Classifier: Programming Language :: Python
|
||||||
|
Classifier: Programming Language :: Python :: 3
|
||||||
|
Classifier: Topic :: Text Processing :: Markup :: HTML
|
||||||
|
Classifier: Topic :: Text Processing :: Markup :: XML
|
||||||
|
Classifier: Topic :: Text Processing :: Markup :: SGML
|
||||||
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||||
|
Requires-Python: >=3.6.0
|
||||||
|
Description-Content-Type: text/markdown
|
||||||
|
Provides-Extra: lxml
|
||||||
|
Provides-Extra: html5lib
|
||||||
|
Requires-Dist: soupsieve (>1.2)
|
||||||
|
Provides-Extra: html5lib
|
||||||
|
Requires-Dist: html5lib; extra == 'html5lib'
|
||||||
|
Provides-Extra: lxml
|
||||||
|
Requires-Dist: lxml; extra == 'lxml'
|
||||||
|
|
||||||
|
Beautiful Soup is a library that makes it easy to scrape information
|
||||||
|
from web pages. It sits atop an HTML or XML parser, providing Pythonic
|
||||||
|
idioms for iterating, searching, and modifying the parse tree.
|
||||||
|
|
||||||
|
# Quick start
|
||||||
|
|
||||||
|
```
|
||||||
|
>>> from bs4 import BeautifulSoup
|
||||||
|
>>> soup = BeautifulSoup("<p>Some<b>bad<i>HTML")
|
||||||
|
>>> print(soup.prettify())
|
||||||
|
<html>
|
||||||
|
<body>
|
||||||
|
<p>
|
||||||
|
Some
|
||||||
|
<b>
|
||||||
|
bad
|
||||||
|
<i>
|
||||||
|
HTML
|
||||||
|
</i>
|
||||||
|
</b>
|
||||||
|
</p>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
>>> soup.find(text="bad")
|
||||||
|
'bad'
|
||||||
|
>>> soup.i
|
||||||
|
<i>HTML</i>
|
||||||
|
#
|
||||||
|
>>> soup = BeautifulSoup("<tag1>Some<tag2/>bad<tag3>XML", "xml")
|
||||||
|
#
|
||||||
|
>>> print(soup.prettify())
|
||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<tag1>
|
||||||
|
Some
|
||||||
|
<tag2/>
|
||||||
|
bad
|
||||||
|
<tag3>
|
||||||
|
XML
|
||||||
|
</tag3>
|
||||||
|
</tag1>
|
||||||
|
```
|
||||||
|
|
||||||
|
To go beyond the basics, [comprehensive documentation is available](https://www.crummy.com/software/BeautifulSoup/bs4/doc/).
|
||||||
|
|
||||||
|
# Links
|
||||||
|
|
||||||
|
* [Homepage](https://www.crummy.com/software/BeautifulSoup/bs4/)
|
||||||
|
* [Documentation](https://www.crummy.com/software/BeautifulSoup/bs4/doc/)
|
||||||
|
* [Discussion group](https://groups.google.com/group/beautifulsoup/)
|
||||||
|
* [Development](https://code.launchpad.net/beautifulsoup/)
|
||||||
|
* [Bug tracker](https://bugs.launchpad.net/beautifulsoup/)
|
||||||
|
* [Complete changelog](https://bazaar.launchpad.net/~leonardr/beautifulsoup/bs4/view/head:/CHANGELOG)
|
||||||
|
|
||||||
|
# Note on Python 2 sunsetting
|
||||||
|
|
||||||
|
Beautiful Soup's support for Python 2 was discontinued on December 31,
|
||||||
|
2020: one year after the sunset date for Python 2 itself. From this
|
||||||
|
point onward, new Beautiful Soup development will exclusively target
|
||||||
|
Python 3. The final release of Beautiful Soup 4 to support Python 2
|
||||||
|
was 4.9.3.
|
||||||
|
|
||||||
|
# Supporting the project
|
||||||
|
|
||||||
|
If you use Beautiful Soup as part of your professional work, please consider a
|
||||||
|
[Tidelift subscription](https://tidelift.com/subscription/pkg/pypi-beautifulsoup4?utm_source=pypi-beautifulsoup4&utm_medium=referral&utm_campaign=readme).
|
||||||
|
This will support many of the free software projects your organization
|
||||||
|
depends on, not just Beautiful Soup.
|
||||||
|
|
||||||
|
If you use Beautiful Soup for personal projects, the best way to say
|
||||||
|
thank you is to read
|
||||||
|
[Tool Safety](https://www.crummy.com/software/BeautifulSoup/zine/), a zine I
|
||||||
|
wrote about what Beautiful Soup has taught me about software
|
||||||
|
development.
|
||||||
|
|
||||||
|
# Building the documentation
|
||||||
|
|
||||||
|
The bs4/doc/ directory contains full documentation in Sphinx
|
||||||
|
format. Run `make html` in that directory to create HTML
|
||||||
|
documentation.
|
||||||
|
|
||||||
|
# Running the unit tests
|
||||||
|
|
||||||
|
Beautiful Soup supports unit test discovery using Pytest:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ pytest
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
beautifulsoup4-4.11.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||||
|
beautifulsoup4-4.11.1.dist-info/METADATA,sha256=zefuevXradYVap7gKfvpSwLzvs8FSBONKfY8j5HIj-k,3525
|
||||||
|
beautifulsoup4-4.11.1.dist-info/RECORD,,
|
||||||
|
beautifulsoup4-4.11.1.dist-info/WHEEL,sha256=NzFAKnL7g-U64xnS1s5e3mJnxKpOTeOtlXdFwS9yNXI,92
|
||||||
|
beautifulsoup4-4.11.1.dist-info/top_level.txt,sha256=gpUVJcTwW3q7-QGp6tAEomZsskknmgSqVe6xn1C0jJI,26
|
||||||
|
bs4/__init__.py,sha256=v5VuQqegAzN3bo4kjs6fT0391zvKQGhvBlV841aJ86A,32832
|
||||||
|
bs4/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
bs4/__pycache__/dammit.cpython-38.pyc,,
|
||||||
|
bs4/__pycache__/diagnose.cpython-38.pyc,,
|
||||||
|
bs4/__pycache__/element.cpython-38.pyc,,
|
||||||
|
bs4/__pycache__/formatter.cpython-38.pyc,,
|
||||||
|
bs4/builder/__init__.py,sha256=Ny7NmZu3XDndp_elCrz2W8K9OUjrAjHQuYianJR6RDM,24378
|
||||||
|
bs4/builder/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
bs4/builder/__pycache__/_html5lib.cpython-38.pyc,,
|
||||||
|
bs4/builder/__pycache__/_htmlparser.cpython-38.pyc,,
|
||||||
|
bs4/builder/__pycache__/_lxml.cpython-38.pyc,,
|
||||||
|
bs4/builder/_html5lib.py,sha256=YFOKZf3E22AEp_V2k49U5LmF26bpSZixZrZGSwM_iCU,18946
|
||||||
|
bs4/builder/_htmlparser.py,sha256=6RYsAXWb_ppMZGB-7lpEmJQ73tpzSu_CJ5QPaFcJu-Y,19169
|
||||||
|
bs4/builder/_lxml.py,sha256=ik6BFGnxAzV2-21S_Wc-7ZeA174muSA_ZhmpnAe3g0E,14904
|
||||||
|
bs4/dammit.py,sha256=G0cQfsEqfwJ-FIQMkXgCJwSHMn7t9vPepCrud6fZEKk,41158
|
||||||
|
bs4/diagnose.py,sha256=MRbN2bJSpa8VFt8HemqP8BK9hL5ronCxZmrfGRZYwBg,7911
|
||||||
|
bs4/element.py,sha256=jjP-cIA3oWJrAhky61wayot92SQFKzJMEE8My6uTPDM,86753
|
||||||
|
bs4/formatter.py,sha256=f5UBtvW9twrrQeLjBeLaKp4ntZpUDmUBY8jF3BiLraM,7206
|
||||||
|
bs4/tests/__init__.py,sha256=yFvfhDv5vOeho6NEZcI0AIpmuEe9bvC7TsqNp3hDGxQ,49185
|
||||||
|
bs4/tests/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_builder.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_builder_registry.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_dammit.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_docs.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_element.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_formatter.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_html5lib.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_htmlparser.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_lxml.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_navigablestring.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_pageelement.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_soup.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_tag.cpython-38.pyc,,
|
||||||
|
bs4/tests/__pycache__/test_tree.cpython-38.pyc,,
|
||||||
|
bs4/tests/test_builder.py,sha256=nc2JE5EMrEf-p24qhf2R8qAV5PpFiOuNpYCmtmCjlTI,1115
|
||||||
|
bs4/tests/test_builder_registry.py,sha256=_Vh2CyYzv4BKoJdCa7s4lsBLUQptskfgiSn3U3810CQ,5068
|
||||||
|
bs4/tests/test_dammit.py,sha256=lS3EWCYCtxVE4fC_J2eTcXiChhuaL4Vcbo94B5geIL4,15680
|
||||||
|
bs4/tests/test_docs.py,sha256=xoAxnUfoQ7aRqGImwW_9BJDU8WNMZHIuvWqVepvWXt8,1127
|
||||||
|
bs4/tests/test_element.py,sha256=92oRSRoGk8gIXAbAGHErKzocx2MK32TqcQdUJ-dGQMo,2377
|
||||||
|
bs4/tests/test_formatter.py,sha256=0qV9H7mMDBcnFFH-dwNCrSm2zNi_40WMB2GMcV35PoY,4128
|
||||||
|
bs4/tests/test_html5lib.py,sha256=X6r13jfJ-OmG6SL_hyfFNXWs7sEEq_1TmCzCJclxvbA,8246
|
||||||
|
bs4/tests/test_htmlparser.py,sha256=BFCspIdhkr8Bss-kHufeNcwa_lvJpVWKgJskPoZgZ7E,5532
|
||||||
|
bs4/tests/test_lxml.py,sha256=deaf1YOrR8I0T5yZAV4TDxcAXHzVhdlnsSajGpBoxs0,7376
|
||||||
|
bs4/tests/test_navigablestring.py,sha256=RGSgziNf7cZnYdEPsoqL1B2I68TUJp1JmEQVxbh_ryA,5081
|
||||||
|
bs4/tests/test_pageelement.py,sha256=fpOU3W5IAz92b0A2VxWKkI5pApObMB17cNzXNF85FfA,27792
|
||||||
|
bs4/tests/test_soup.py,sha256=CUnK-rDccIlKMLBP4AweCqRDbPt3Lqzln_BpnBgKm4M,17810
|
||||||
|
bs4/tests/test_tag.py,sha256=f19uie7QehvgvhIqNWfjDRR4TKa-ftm_RRoo6LXZyqk,9016
|
||||||
|
bs4/tests/test_tree.py,sha256=y9Qvs8nnYj6RnGRSxtoYQQwSja-DlbtukCVs0neVwyU,47557
|
|
@ -0,0 +1,5 @@
|
||||||
|
Wheel-Version: 1.0
|
||||||
|
Generator: bdist_wheel (0.31.1)
|
||||||
|
Root-Is-Purelib: true
|
||||||
|
Tag: py3-none-any
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
bs4
|
||||||
|
bs4/builder
|
||||||
|
bs4/tests
|
|
@ -0,0 +1,812 @@
|
||||||
|
"""Beautiful Soup Elixir and Tonic - "The Screen-Scraper's Friend".
|
||||||
|
|
||||||
|
http://www.crummy.com/software/BeautifulSoup/
|
||||||
|
|
||||||
|
Beautiful Soup uses a pluggable XML or HTML parser to parse a
|
||||||
|
(possibly invalid) document into a tree representation. Beautiful Soup
|
||||||
|
provides methods and Pythonic idioms that make it easy to navigate,
|
||||||
|
search, and modify the parse tree.
|
||||||
|
|
||||||
|
Beautiful Soup works with Python 3.5 and up. It works better if lxml
|
||||||
|
and/or html5lib is installed.
|
||||||
|
|
||||||
|
For more than you ever wanted to know about Beautiful Soup, see the
|
||||||
|
documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/
|
||||||
|
"""
|
||||||
|
|
||||||
|
__author__ = "Leonard Richardson (leonardr@segfault.org)"
|
||||||
|
__version__ = "4.11.1"
|
||||||
|
__copyright__ = "Copyright (c) 2004-2022 Leonard Richardson"
|
||||||
|
# Use of this source code is governed by the MIT license.
|
||||||
|
__license__ = "MIT"
|
||||||
|
|
||||||
|
__all__ = ['BeautifulSoup']
|
||||||
|
|
||||||
|
from collections import Counter
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
# The very first thing we do is give a useful error if someone is
|
||||||
|
# running this code under Python 2.
|
||||||
|
if sys.version_info.major < 3:
|
||||||
|
raise ImportError('You are trying to use a Python 3-specific version of Beautiful Soup under Python 2. This will not work. The final version of Beautiful Soup to support Python 2 was 4.9.3.')
|
||||||
|
|
||||||
|
from .builder import (
|
||||||
|
builder_registry,
|
||||||
|
ParserRejectedMarkup,
|
||||||
|
XMLParsedAsHTMLWarning,
|
||||||
|
)
|
||||||
|
from .dammit import UnicodeDammit
|
||||||
|
from .element import (
|
||||||
|
CData,
|
||||||
|
Comment,
|
||||||
|
DEFAULT_OUTPUT_ENCODING,
|
||||||
|
Declaration,
|
||||||
|
Doctype,
|
||||||
|
NavigableString,
|
||||||
|
PageElement,
|
||||||
|
ProcessingInstruction,
|
||||||
|
PYTHON_SPECIFIC_ENCODINGS,
|
||||||
|
ResultSet,
|
||||||
|
Script,
|
||||||
|
Stylesheet,
|
||||||
|
SoupStrainer,
|
||||||
|
Tag,
|
||||||
|
TemplateString,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define some custom warnings.
|
||||||
|
class GuessedAtParserWarning(UserWarning):
|
||||||
|
"""The warning issued when BeautifulSoup has to guess what parser to
|
||||||
|
use -- probably because no parser was specified in the constructor.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class MarkupResemblesLocatorWarning(UserWarning):
|
||||||
|
"""The warning issued when BeautifulSoup is given 'markup' that
|
||||||
|
actually looks like a resource locator -- a URL or a path to a file
|
||||||
|
on disk.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class BeautifulSoup(Tag):
|
||||||
|
"""A data structure representing a parsed HTML or XML document.
|
||||||
|
|
||||||
|
Most of the methods you'll call on a BeautifulSoup object are inherited from
|
||||||
|
PageElement or Tag.
|
||||||
|
|
||||||
|
Internally, this class defines the basic interface called by the
|
||||||
|
tree builders when converting an HTML/XML document into a data
|
||||||
|
structure. The interface abstracts away the differences between
|
||||||
|
parsers. To write a new tree builder, you'll need to understand
|
||||||
|
these methods as a whole.
|
||||||
|
|
||||||
|
These methods will be called by the BeautifulSoup constructor:
|
||||||
|
* reset()
|
||||||
|
* feed(markup)
|
||||||
|
|
||||||
|
The tree builder may call these methods from its feed() implementation:
|
||||||
|
* handle_starttag(name, attrs) # See note about return value
|
||||||
|
* handle_endtag(name)
|
||||||
|
* handle_data(data) # Appends to the current data node
|
||||||
|
* endData(containerClass) # Ends the current data node
|
||||||
|
|
||||||
|
No matter how complicated the underlying parser is, you should be
|
||||||
|
able to build a tree using 'start tag' events, 'end tag' events,
|
||||||
|
'data' events, and "done with data" events.
|
||||||
|
|
||||||
|
If you encounter an empty-element tag (aka a self-closing tag,
|
||||||
|
like HTML's <br> tag), call handle_starttag and then
|
||||||
|
handle_endtag.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Since BeautifulSoup subclasses Tag, it's possible to treat it as
|
||||||
|
# a Tag with a .name. This name makes it clear the BeautifulSoup
|
||||||
|
# object isn't a real markup tag.
|
||||||
|
ROOT_TAG_NAME = '[document]'
|
||||||
|
|
||||||
|
# If the end-user gives no indication which tree builder they
|
||||||
|
# want, look for one with these features.
|
||||||
|
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
|
||||||
|
|
||||||
|
# A string containing all ASCII whitespace characters, used in
|
||||||
|
# endData() to detect data chunks that seem 'empty'.
|
||||||
|
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
|
||||||
|
|
||||||
|
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n"
|
||||||
|
|
||||||
|
def __init__(self, markup="", features=None, builder=None,
|
||||||
|
parse_only=None, from_encoding=None, exclude_encodings=None,
|
||||||
|
element_classes=None, **kwargs):
|
||||||
|
"""Constructor.
|
||||||
|
|
||||||
|
:param markup: A string or a file-like object representing
|
||||||
|
markup to be parsed.
|
||||||
|
|
||||||
|
:param features: Desirable features of the parser to be
|
||||||
|
used. This may be the name of a specific parser ("lxml",
|
||||||
|
"lxml-xml", "html.parser", or "html5lib") or it may be the
|
||||||
|
type of markup to be used ("html", "html5", "xml"). It's
|
||||||
|
recommended that you name a specific parser, so that
|
||||||
|
Beautiful Soup gives you the same results across platforms
|
||||||
|
and virtual environments.
|
||||||
|
|
||||||
|
:param builder: A TreeBuilder subclass to instantiate (or
|
||||||
|
instance to use) instead of looking one up based on
|
||||||
|
`features`. You only need to use this if you've implemented a
|
||||||
|
custom TreeBuilder.
|
||||||
|
|
||||||
|
:param parse_only: A SoupStrainer. Only parts of the document
|
||||||
|
matching the SoupStrainer will be considered. This is useful
|
||||||
|
when parsing part of a document that would otherwise be too
|
||||||
|
large to fit into memory.
|
||||||
|
|
||||||
|
:param from_encoding: A string indicating the encoding of the
|
||||||
|
document to be parsed. Pass this in if Beautiful Soup is
|
||||||
|
guessing wrongly about the document's encoding.
|
||||||
|
|
||||||
|
:param exclude_encodings: A list of strings indicating
|
||||||
|
encodings known to be wrong. Pass this in if you don't know
|
||||||
|
the document's encoding but you know Beautiful Soup's guess is
|
||||||
|
wrong.
|
||||||
|
|
||||||
|
:param element_classes: A dictionary mapping BeautifulSoup
|
||||||
|
classes like Tag and NavigableString, to other classes you'd
|
||||||
|
like to be instantiated instead as the parse tree is
|
||||||
|
built. This is useful for subclassing Tag or NavigableString
|
||||||
|
to modify default behavior.
|
||||||
|
|
||||||
|
:param kwargs: For backwards compatibility purposes, the
|
||||||
|
constructor accepts certain keyword arguments used in
|
||||||
|
Beautiful Soup 3. None of these arguments do anything in
|
||||||
|
Beautiful Soup 4; they will result in a warning and then be
|
||||||
|
ignored.
|
||||||
|
|
||||||
|
Apart from this, any keyword arguments passed into the
|
||||||
|
BeautifulSoup constructor are propagated to the TreeBuilder
|
||||||
|
constructor. This makes it possible to configure a
|
||||||
|
TreeBuilder by passing in arguments, not just by saying which
|
||||||
|
one to use.
|
||||||
|
"""
|
||||||
|
if 'convertEntities' in kwargs:
|
||||||
|
del kwargs['convertEntities']
|
||||||
|
warnings.warn(
|
||||||
|
"BS4 does not respect the convertEntities argument to the "
|
||||||
|
"BeautifulSoup constructor. Entities are always converted "
|
||||||
|
"to Unicode characters.")
|
||||||
|
|
||||||
|
if 'markupMassage' in kwargs:
|
||||||
|
del kwargs['markupMassage']
|
||||||
|
warnings.warn(
|
||||||
|
"BS4 does not respect the markupMassage argument to the "
|
||||||
|
"BeautifulSoup constructor. The tree builder is responsible "
|
||||||
|
"for any necessary markup massage.")
|
||||||
|
|
||||||
|
if 'smartQuotesTo' in kwargs:
|
||||||
|
del kwargs['smartQuotesTo']
|
||||||
|
warnings.warn(
|
||||||
|
"BS4 does not respect the smartQuotesTo argument to the "
|
||||||
|
"BeautifulSoup constructor. Smart quotes are always converted "
|
||||||
|
"to Unicode characters.")
|
||||||
|
|
||||||
|
if 'selfClosingTags' in kwargs:
|
||||||
|
del kwargs['selfClosingTags']
|
||||||
|
warnings.warn(
|
||||||
|
"BS4 does not respect the selfClosingTags argument to the "
|
||||||
|
"BeautifulSoup constructor. The tree builder is responsible "
|
||||||
|
"for understanding self-closing tags.")
|
||||||
|
|
||||||
|
if 'isHTML' in kwargs:
|
||||||
|
del kwargs['isHTML']
|
||||||
|
warnings.warn(
|
||||||
|
"BS4 does not respect the isHTML argument to the "
|
||||||
|
"BeautifulSoup constructor. Suggest you use "
|
||||||
|
"features='lxml' for HTML and features='lxml-xml' for "
|
||||||
|
"XML.")
|
||||||
|
|
||||||
|
def deprecated_argument(old_name, new_name):
|
||||||
|
if old_name in kwargs:
|
||||||
|
warnings.warn(
|
||||||
|
'The "%s" argument to the BeautifulSoup constructor '
|
||||||
|
'has been renamed to "%s."' % (old_name, new_name),
|
||||||
|
DeprecationWarning
|
||||||
|
)
|
||||||
|
return kwargs.pop(old_name)
|
||||||
|
return None
|
||||||
|
|
||||||
|
parse_only = parse_only or deprecated_argument(
|
||||||
|
"parseOnlyThese", "parse_only")
|
||||||
|
|
||||||
|
from_encoding = from_encoding or deprecated_argument(
|
||||||
|
"fromEncoding", "from_encoding")
|
||||||
|
|
||||||
|
if from_encoding and isinstance(markup, str):
|
||||||
|
warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.")
|
||||||
|
from_encoding = None
|
||||||
|
|
||||||
|
self.element_classes = element_classes or dict()
|
||||||
|
|
||||||
|
# We need this information to track whether or not the builder
|
||||||
|
# was specified well enough that we can omit the 'you need to
|
||||||
|
# specify a parser' warning.
|
||||||
|
original_builder = builder
|
||||||
|
original_features = features
|
||||||
|
|
||||||
|
if isinstance(builder, type):
|
||||||
|
# A builder class was passed in; it needs to be instantiated.
|
||||||
|
builder_class = builder
|
||||||
|
builder = None
|
||||||
|
elif builder is None:
|
||||||
|
if isinstance(features, str):
|
||||||
|
features = [features]
|
||||||
|
if features is None or len(features) == 0:
|
||||||
|
features = self.DEFAULT_BUILDER_FEATURES
|
||||||
|
builder_class = builder_registry.lookup(*features)
|
||||||
|
if builder_class is None:
|
||||||
|
raise FeatureNotFound(
|
||||||
|
"Couldn't find a tree builder with the features you "
|
||||||
|
"requested: %s. Do you need to install a parser library?"
|
||||||
|
% ",".join(features))
|
||||||
|
|
||||||
|
# At this point either we have a TreeBuilder instance in
|
||||||
|
# builder, or we have a builder_class that we can instantiate
|
||||||
|
# with the remaining **kwargs.
|
||||||
|
if builder is None:
|
||||||
|
builder = builder_class(**kwargs)
|
||||||
|
if not original_builder and not (
|
||||||
|
original_features == builder.NAME or
|
||||||
|
original_features in builder.ALTERNATE_NAMES
|
||||||
|
) and markup:
|
||||||
|
# The user did not tell us which TreeBuilder to use,
|
||||||
|
# and we had to guess. Issue a warning.
|
||||||
|
if builder.is_xml:
|
||||||
|
markup_type = "XML"
|
||||||
|
else:
|
||||||
|
markup_type = "HTML"
|
||||||
|
|
||||||
|
# This code adapted from warnings.py so that we get the same line
|
||||||
|
# of code as our warnings.warn() call gets, even if the answer is wrong
|
||||||
|
# (as it may be in a multithreading situation).
|
||||||
|
caller = None
|
||||||
|
try:
|
||||||
|
caller = sys._getframe(1)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
if caller:
|
||||||
|
globals = caller.f_globals
|
||||||
|
line_number = caller.f_lineno
|
||||||
|
else:
|
||||||
|
globals = sys.__dict__
|
||||||
|
line_number= 1
|
||||||
|
filename = globals.get('__file__')
|
||||||
|
if filename:
|
||||||
|
fnl = filename.lower()
|
||||||
|
if fnl.endswith((".pyc", ".pyo")):
|
||||||
|
filename = filename[:-1]
|
||||||
|
if filename:
|
||||||
|
# If there is no filename at all, the user is most likely in a REPL,
|
||||||
|
# and the warning is not necessary.
|
||||||
|
values = dict(
|
||||||
|
filename=filename,
|
||||||
|
line_number=line_number,
|
||||||
|
parser=builder.NAME,
|
||||||
|
markup_type=markup_type
|
||||||
|
)
|
||||||
|
warnings.warn(
|
||||||
|
self.NO_PARSER_SPECIFIED_WARNING % values,
|
||||||
|
GuessedAtParserWarning, stacklevel=2
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if kwargs:
|
||||||
|
warnings.warn("Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`.")
|
||||||
|
|
||||||
|
self.builder = builder
|
||||||
|
self.is_xml = builder.is_xml
|
||||||
|
self.known_xml = self.is_xml
|
||||||
|
self._namespaces = dict()
|
||||||
|
self.parse_only = parse_only
|
||||||
|
|
||||||
|
if hasattr(markup, 'read'): # It's a file-type object.
|
||||||
|
markup = markup.read()
|
||||||
|
elif len(markup) <= 256 and (
|
||||||
|
(isinstance(markup, bytes) and not b'<' in markup)
|
||||||
|
or (isinstance(markup, str) and not '<' in markup)
|
||||||
|
):
|
||||||
|
# Issue warnings for a couple beginner problems
|
||||||
|
# involving passing non-markup to Beautiful Soup.
|
||||||
|
# Beautiful Soup will still parse the input as markup,
|
||||||
|
# since that is sometimes the intended behavior.
|
||||||
|
if not self._markup_is_url(markup):
|
||||||
|
self._markup_resembles_filename(markup)
|
||||||
|
|
||||||
|
rejections = []
|
||||||
|
success = False
|
||||||
|
for (self.markup, self.original_encoding, self.declared_html_encoding,
|
||||||
|
self.contains_replacement_characters) in (
|
||||||
|
self.builder.prepare_markup(
|
||||||
|
markup, from_encoding, exclude_encodings=exclude_encodings)):
|
||||||
|
self.reset()
|
||||||
|
self.builder.initialize_soup(self)
|
||||||
|
try:
|
||||||
|
self._feed()
|
||||||
|
success = True
|
||||||
|
break
|
||||||
|
except ParserRejectedMarkup as e:
|
||||||
|
rejections.append(e)
|
||||||
|
pass
|
||||||
|
|
||||||
|
if not success:
|
||||||
|
other_exceptions = [str(e) for e in rejections]
|
||||||
|
raise ParserRejectedMarkup(
|
||||||
|
"The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + "\n ".join(other_exceptions)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Clear out the markup and remove the builder's circular
|
||||||
|
# reference to this object.
|
||||||
|
self.markup = None
|
||||||
|
self.builder.soup = None
|
||||||
|
|
||||||
|
def __copy__(self):
|
||||||
|
"""Copy a BeautifulSoup object by converting the document to a string and parsing it again."""
|
||||||
|
copy = type(self)(
|
||||||
|
self.encode('utf-8'), builder=self.builder, from_encoding='utf-8'
|
||||||
|
)
|
||||||
|
|
||||||
|
# Although we encoded the tree to UTF-8, that may not have
|
||||||
|
# been the encoding of the original markup. Set the copy's
|
||||||
|
# .original_encoding to reflect the original object's
|
||||||
|
# .original_encoding.
|
||||||
|
copy.original_encoding = self.original_encoding
|
||||||
|
return copy
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
# Frequently a tree builder can't be pickled.
|
||||||
|
d = dict(self.__dict__)
|
||||||
|
if 'builder' in d and d['builder'] is not None and not self.builder.picklable:
|
||||||
|
d['builder'] = None
|
||||||
|
return d
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _decode_markup(cls, markup):
|
||||||
|
"""Ensure `markup` is bytes so it's safe to send into warnings.warn.
|
||||||
|
|
||||||
|
TODO: warnings.warn had this problem back in 2010 but it might not
|
||||||
|
anymore.
|
||||||
|
"""
|
||||||
|
if isinstance(markup, bytes):
|
||||||
|
decoded = markup.decode('utf-8', 'replace')
|
||||||
|
else:
|
||||||
|
decoded = markup
|
||||||
|
return decoded
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _markup_is_url(cls, markup):
|
||||||
|
"""Error-handling method to raise a warning if incoming markup looks
|
||||||
|
like a URL.
|
||||||
|
|
||||||
|
:param markup: A string.
|
||||||
|
:return: Whether or not the markup resembles a URL
|
||||||
|
closely enough to justify a warning.
|
||||||
|
"""
|
||||||
|
if isinstance(markup, bytes):
|
||||||
|
space = b' '
|
||||||
|
cant_start_with = (b"http:", b"https:")
|
||||||
|
elif isinstance(markup, str):
|
||||||
|
space = ' '
|
||||||
|
cant_start_with = ("http:", "https:")
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if any(markup.startswith(prefix) for prefix in cant_start_with):
|
||||||
|
if not space in markup:
|
||||||
|
warnings.warn(
|
||||||
|
'The input looks more like a URL than markup. You may want to use'
|
||||||
|
' an HTTP client like requests to get the document behind'
|
||||||
|
' the URL, and feed that document to Beautiful Soup.',
|
||||||
|
MarkupResemblesLocatorWarning
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _markup_resembles_filename(cls, markup):
|
||||||
|
"""Error-handling method to raise a warning if incoming markup
|
||||||
|
resembles a filename.
|
||||||
|
|
||||||
|
:param markup: A bytestring or string.
|
||||||
|
:return: Whether or not the markup resembles a filename
|
||||||
|
closely enough to justify a warning.
|
||||||
|
"""
|
||||||
|
path_characters = '/\\'
|
||||||
|
extensions = ['.html', '.htm', '.xml', '.xhtml', '.txt']
|
||||||
|
if isinstance(markup, bytes):
|
||||||
|
path_characters = path_characters.encode("utf8")
|
||||||
|
extensions = [x.encode('utf8') for x in extensions]
|
||||||
|
filelike = False
|
||||||
|
if any(x in markup for x in path_characters):
|
||||||
|
filelike = True
|
||||||
|
else:
|
||||||
|
lower = markup.lower()
|
||||||
|
if any(lower.endswith(ext) for ext in extensions):
|
||||||
|
filelike = True
|
||||||
|
if filelike:
|
||||||
|
warnings.warn(
|
||||||
|
'The input looks more like a filename than markup. You may'
|
||||||
|
' want to open this file and pass the filehandle into'
|
||||||
|
' Beautiful Soup.',
|
||||||
|
MarkupResemblesLocatorWarning
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _feed(self):
|
||||||
|
"""Internal method that parses previously set markup, creating a large
|
||||||
|
number of Tag and NavigableString objects.
|
||||||
|
"""
|
||||||
|
# Convert the document to Unicode.
|
||||||
|
self.builder.reset()
|
||||||
|
|
||||||
|
self.builder.feed(self.markup)
|
||||||
|
# Close out any unfinished strings and close all the open tags.
|
||||||
|
self.endData()
|
||||||
|
while self.currentTag.name != self.ROOT_TAG_NAME:
|
||||||
|
self.popTag()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
"""Reset this object to a state as though it had never parsed any
|
||||||
|
markup.
|
||||||
|
"""
|
||||||
|
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
|
||||||
|
self.hidden = 1
|
||||||
|
self.builder.reset()
|
||||||
|
self.current_data = []
|
||||||
|
self.currentTag = None
|
||||||
|
self.tagStack = []
|
||||||
|
self.open_tag_counter = Counter()
|
||||||
|
self.preserve_whitespace_tag_stack = []
|
||||||
|
self.string_container_stack = []
|
||||||
|
self.pushTag(self)
|
||||||
|
|
||||||
|
def new_tag(self, name, namespace=None, nsprefix=None, attrs={},
|
||||||
|
sourceline=None, sourcepos=None, **kwattrs):
|
||||||
|
"""Create a new Tag associated with this BeautifulSoup object.
|
||||||
|
|
||||||
|
:param name: The name of the new Tag.
|
||||||
|
:param namespace: The URI of the new Tag's XML namespace, if any.
|
||||||
|
:param prefix: The prefix for the new Tag's XML namespace, if any.
|
||||||
|
:param attrs: A dictionary of this Tag's attribute values; can
|
||||||
|
be used instead of `kwattrs` for attributes like 'class'
|
||||||
|
that are reserved words in Python.
|
||||||
|
:param sourceline: The line number where this tag was
|
||||||
|
(purportedly) found in its source document.
|
||||||
|
:param sourcepos: The character position within `sourceline` where this
|
||||||
|
tag was (purportedly) found.
|
||||||
|
:param kwattrs: Keyword arguments for the new Tag's attribute values.
|
||||||
|
|
||||||
|
"""
|
||||||
|
kwattrs.update(attrs)
|
||||||
|
return self.element_classes.get(Tag, Tag)(
|
||||||
|
None, self.builder, name, namespace, nsprefix, kwattrs,
|
||||||
|
sourceline=sourceline, sourcepos=sourcepos
|
||||||
|
)
|
||||||
|
|
||||||
|
def string_container(self, base_class=None):
|
||||||
|
container = base_class or NavigableString
|
||||||
|
|
||||||
|
# There may be a general override of NavigableString.
|
||||||
|
container = self.element_classes.get(
|
||||||
|
container, container
|
||||||
|
)
|
||||||
|
|
||||||
|
# On top of that, we may be inside a tag that needs a special
|
||||||
|
# container class.
|
||||||
|
if self.string_container_stack and container is NavigableString:
|
||||||
|
container = self.builder.string_containers.get(
|
||||||
|
self.string_container_stack[-1].name, container
|
||||||
|
)
|
||||||
|
return container
|
||||||
|
|
||||||
|
def new_string(self, s, subclass=None):
|
||||||
|
"""Create a new NavigableString associated with this BeautifulSoup
|
||||||
|
object.
|
||||||
|
"""
|
||||||
|
container = self.string_container(subclass)
|
||||||
|
return container(s)
|
||||||
|
|
||||||
|
def insert_before(self, *args):
|
||||||
|
"""This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
|
||||||
|
it because there is nothing before or after it in the parse tree.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
|
||||||
|
|
||||||
|
def insert_after(self, *args):
|
||||||
|
"""This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
|
||||||
|
it because there is nothing before or after it in the parse tree.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
|
||||||
|
|
||||||
|
def popTag(self):
|
||||||
|
"""Internal method called by _popToTag when a tag is closed."""
|
||||||
|
tag = self.tagStack.pop()
|
||||||
|
if tag.name in self.open_tag_counter:
|
||||||
|
self.open_tag_counter[tag.name] -= 1
|
||||||
|
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
|
||||||
|
self.preserve_whitespace_tag_stack.pop()
|
||||||
|
if self.string_container_stack and tag == self.string_container_stack[-1]:
|
||||||
|
self.string_container_stack.pop()
|
||||||
|
#print("Pop", tag.name)
|
||||||
|
if self.tagStack:
|
||||||
|
self.currentTag = self.tagStack[-1]
|
||||||
|
return self.currentTag
|
||||||
|
|
||||||
|
def pushTag(self, tag):
|
||||||
|
"""Internal method called by handle_starttag when a tag is opened."""
|
||||||
|
#print("Push", tag.name)
|
||||||
|
if self.currentTag is not None:
|
||||||
|
self.currentTag.contents.append(tag)
|
||||||
|
self.tagStack.append(tag)
|
||||||
|
self.currentTag = self.tagStack[-1]
|
||||||
|
if tag.name != self.ROOT_TAG_NAME:
|
||||||
|
self.open_tag_counter[tag.name] += 1
|
||||||
|
if tag.name in self.builder.preserve_whitespace_tags:
|
||||||
|
self.preserve_whitespace_tag_stack.append(tag)
|
||||||
|
if tag.name in self.builder.string_containers:
|
||||||
|
self.string_container_stack.append(tag)
|
||||||
|
|
||||||
|
def endData(self, containerClass=None):
|
||||||
|
"""Method called by the TreeBuilder when the end of a data segment
|
||||||
|
occurs.
|
||||||
|
"""
|
||||||
|
if self.current_data:
|
||||||
|
current_data = ''.join(self.current_data)
|
||||||
|
# If whitespace is not preserved, and this string contains
|
||||||
|
# nothing but ASCII spaces, replace it with a single space
|
||||||
|
# or newline.
|
||||||
|
if not self.preserve_whitespace_tag_stack:
|
||||||
|
strippable = True
|
||||||
|
for i in current_data:
|
||||||
|
if i not in self.ASCII_SPACES:
|
||||||
|
strippable = False
|
||||||
|
break
|
||||||
|
if strippable:
|
||||||
|
if '\n' in current_data:
|
||||||
|
current_data = '\n'
|
||||||
|
else:
|
||||||
|
current_data = ' '
|
||||||
|
|
||||||
|
# Reset the data collector.
|
||||||
|
self.current_data = []
|
||||||
|
|
||||||
|
# Should we add this string to the tree at all?
|
||||||
|
if self.parse_only and len(self.tagStack) <= 1 and \
|
||||||
|
(not self.parse_only.text or \
|
||||||
|
not self.parse_only.search(current_data)):
|
||||||
|
return
|
||||||
|
|
||||||
|
containerClass = self.string_container(containerClass)
|
||||||
|
o = containerClass(current_data)
|
||||||
|
self.object_was_parsed(o)
|
||||||
|
|
||||||
|
def object_was_parsed(self, o, parent=None, most_recent_element=None):
|
||||||
|
"""Method called by the TreeBuilder to integrate an object into the parse tree."""
|
||||||
|
if parent is None:
|
||||||
|
parent = self.currentTag
|
||||||
|
if most_recent_element is not None:
|
||||||
|
previous_element = most_recent_element
|
||||||
|
else:
|
||||||
|
previous_element = self._most_recent_element
|
||||||
|
|
||||||
|
next_element = previous_sibling = next_sibling = None
|
||||||
|
if isinstance(o, Tag):
|
||||||
|
next_element = o.next_element
|
||||||
|
next_sibling = o.next_sibling
|
||||||
|
previous_sibling = o.previous_sibling
|
||||||
|
if previous_element is None:
|
||||||
|
previous_element = o.previous_element
|
||||||
|
|
||||||
|
fix = parent.next_element is not None
|
||||||
|
|
||||||
|
o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
|
||||||
|
|
||||||
|
self._most_recent_element = o
|
||||||
|
parent.contents.append(o)
|
||||||
|
|
||||||
|
# Check if we are inserting into an already parsed node.
|
||||||
|
if fix:
|
||||||
|
self._linkage_fixer(parent)
|
||||||
|
|
||||||
|
def _linkage_fixer(self, el):
|
||||||
|
"""Make sure linkage of this fragment is sound."""
|
||||||
|
|
||||||
|
first = el.contents[0]
|
||||||
|
child = el.contents[-1]
|
||||||
|
descendant = child
|
||||||
|
|
||||||
|
if child is first and el.parent is not None:
|
||||||
|
# Parent should be linked to first child
|
||||||
|
el.next_element = child
|
||||||
|
# We are no longer linked to whatever this element is
|
||||||
|
prev_el = child.previous_element
|
||||||
|
if prev_el is not None and prev_el is not el:
|
||||||
|
prev_el.next_element = None
|
||||||
|
# First child should be linked to the parent, and no previous siblings.
|
||||||
|
child.previous_element = el
|
||||||
|
child.previous_sibling = None
|
||||||
|
|
||||||
|
# We have no sibling as we've been appended as the last.
|
||||||
|
child.next_sibling = None
|
||||||
|
|
||||||
|
# This index is a tag, dig deeper for a "last descendant"
|
||||||
|
if isinstance(child, Tag) and child.contents:
|
||||||
|
descendant = child._last_descendant(False)
|
||||||
|
|
||||||
|
# As the final step, link last descendant. It should be linked
|
||||||
|
# to the parent's next sibling (if found), else walk up the chain
|
||||||
|
# and find a parent with a sibling. It should have no next sibling.
|
||||||
|
descendant.next_element = None
|
||||||
|
descendant.next_sibling = None
|
||||||
|
target = el
|
||||||
|
while True:
|
||||||
|
if target is None:
|
||||||
|
break
|
||||||
|
elif target.next_sibling is not None:
|
||||||
|
descendant.next_element = target.next_sibling
|
||||||
|
target.next_sibling.previous_element = child
|
||||||
|
break
|
||||||
|
target = target.parent
|
||||||
|
|
||||||
|
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
|
||||||
|
"""Pops the tag stack up to and including the most recent
|
||||||
|
instance of the given tag.
|
||||||
|
|
||||||
|
If there are no open tags with the given name, nothing will be
|
||||||
|
popped.
|
||||||
|
|
||||||
|
:param name: Pop up to the most recent tag with this name.
|
||||||
|
:param nsprefix: The namespace prefix that goes with `name`.
|
||||||
|
:param inclusivePop: It this is false, pops the tag stack up
|
||||||
|
to but *not* including the most recent instqance of the
|
||||||
|
given tag.
|
||||||
|
|
||||||
|
"""
|
||||||
|
#print("Popping to %s" % name)
|
||||||
|
if name == self.ROOT_TAG_NAME:
|
||||||
|
# The BeautifulSoup object itself can never be popped.
|
||||||
|
return
|
||||||
|
|
||||||
|
most_recently_popped = None
|
||||||
|
|
||||||
|
stack_size = len(self.tagStack)
|
||||||
|
for i in range(stack_size - 1, 0, -1):
|
||||||
|
if not self.open_tag_counter.get(name):
|
||||||
|
break
|
||||||
|
t = self.tagStack[i]
|
||||||
|
if (name == t.name and nsprefix == t.prefix):
|
||||||
|
if inclusivePop:
|
||||||
|
most_recently_popped = self.popTag()
|
||||||
|
break
|
||||||
|
most_recently_popped = self.popTag()
|
||||||
|
|
||||||
|
return most_recently_popped
|
||||||
|
|
||||||
|
def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None,
|
||||||
|
sourcepos=None, namespaces=None):
|
||||||
|
"""Called by the tree builder when a new tag is encountered.
|
||||||
|
|
||||||
|
:param name: Name of the tag.
|
||||||
|
:param nsprefix: Namespace prefix for the tag.
|
||||||
|
:param attrs: A dictionary of attribute values.
|
||||||
|
:param sourceline: The line number where this tag was found in its
|
||||||
|
source document.
|
||||||
|
:param sourcepos: The character position within `sourceline` where this
|
||||||
|
tag was found.
|
||||||
|
:param namespaces: A dictionary of all namespace prefix mappings
|
||||||
|
currently in scope in the document.
|
||||||
|
|
||||||
|
If this method returns None, the tag was rejected by an active
|
||||||
|
SoupStrainer. You should proceed as if the tag had not occurred
|
||||||
|
in the document. For instance, if this was a self-closing tag,
|
||||||
|
don't call handle_endtag.
|
||||||
|
"""
|
||||||
|
# print("Start tag %s: %s" % (name, attrs))
|
||||||
|
self.endData()
|
||||||
|
|
||||||
|
if (self.parse_only and len(self.tagStack) <= 1
|
||||||
|
and (self.parse_only.text
|
||||||
|
or not self.parse_only.search_tag(name, attrs))):
|
||||||
|
return None
|
||||||
|
|
||||||
|
tag = self.element_classes.get(Tag, Tag)(
|
||||||
|
self, self.builder, name, namespace, nsprefix, attrs,
|
||||||
|
self.currentTag, self._most_recent_element,
|
||||||
|
sourceline=sourceline, sourcepos=sourcepos,
|
||||||
|
namespaces=namespaces
|
||||||
|
)
|
||||||
|
if tag is None:
|
||||||
|
return tag
|
||||||
|
if self._most_recent_element is not None:
|
||||||
|
self._most_recent_element.next_element = tag
|
||||||
|
self._most_recent_element = tag
|
||||||
|
self.pushTag(tag)
|
||||||
|
return tag
|
||||||
|
|
||||||
|
def handle_endtag(self, name, nsprefix=None):
|
||||||
|
"""Called by the tree builder when an ending tag is encountered.
|
||||||
|
|
||||||
|
:param name: Name of the tag.
|
||||||
|
:param nsprefix: Namespace prefix for the tag.
|
||||||
|
"""
|
||||||
|
#print("End tag: " + name)
|
||||||
|
self.endData()
|
||||||
|
self._popToTag(name, nsprefix)
|
||||||
|
|
||||||
|
def handle_data(self, data):
|
||||||
|
"""Called by the tree builder when a chunk of textual data is encountered."""
|
||||||
|
self.current_data.append(data)
|
||||||
|
|
||||||
|
def decode(self, pretty_print=False,
|
||||||
|
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
|
||||||
|
formatter="minimal"):
|
||||||
|
"""Returns a string or Unicode representation of the parse tree
|
||||||
|
as an HTML or XML document.
|
||||||
|
|
||||||
|
:param pretty_print: If this is True, indentation will be used to
|
||||||
|
make the document more readable.
|
||||||
|
:param eventual_encoding: The encoding of the final document.
|
||||||
|
If this is None, the document will be a Unicode string.
|
||||||
|
"""
|
||||||
|
if self.is_xml:
|
||||||
|
# Print the XML declaration
|
||||||
|
encoding_part = ''
|
||||||
|
if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS:
|
||||||
|
# This is a special Python encoding; it can't actually
|
||||||
|
# go into an XML document because it means nothing
|
||||||
|
# outside of Python.
|
||||||
|
eventual_encoding = None
|
||||||
|
if eventual_encoding != None:
|
||||||
|
encoding_part = ' encoding="%s"' % eventual_encoding
|
||||||
|
prefix = '<?xml version="1.0"%s?>\n' % encoding_part
|
||||||
|
else:
|
||||||
|
prefix = ''
|
||||||
|
if not pretty_print:
|
||||||
|
indent_level = None
|
||||||
|
else:
|
||||||
|
indent_level = 0
|
||||||
|
return prefix + super(BeautifulSoup, self).decode(
|
||||||
|
indent_level, eventual_encoding, formatter)
|
||||||
|
|
||||||
|
# Aliases to make it easier to get started quickly, e.g. 'from bs4 import _soup'
|
||||||
|
_s = BeautifulSoup
|
||||||
|
_soup = BeautifulSoup
|
||||||
|
|
||||||
|
class BeautifulStoneSoup(BeautifulSoup):
|
||||||
|
"""Deprecated interface to an XML parser."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
kwargs['features'] = 'xml'
|
||||||
|
warnings.warn(
|
||||||
|
'The BeautifulStoneSoup class is deprecated. Instead of using '
|
||||||
|
'it, pass features="xml" into the BeautifulSoup constructor.',
|
||||||
|
DeprecationWarning
|
||||||
|
)
|
||||||
|
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class StopParsing(Exception):
|
||||||
|
"""Exception raised by a TreeBuilder if it's unable to continue parsing."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
class FeatureNotFound(ValueError):
|
||||||
|
"""Exception raised by the BeautifulSoup constructor if no parser with the
|
||||||
|
requested features is found.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
#If this file is run as a script, act as an HTML pretty-printer.
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import sys
|
||||||
|
soup = BeautifulSoup(sys.stdin)
|
||||||
|
print((soup.prettify()))
|
|
@ -0,0 +1,631 @@
|
||||||
|
# Use of this source code is governed by the MIT license.
|
||||||
|
__license__ = "MIT"
|
||||||
|
|
||||||
|
from collections import defaultdict
|
||||||
|
import itertools
|
||||||
|
import re
|
||||||
|
import warnings
|
||||||
|
import sys
|
||||||
|
from bs4.element import (
|
||||||
|
CharsetMetaAttributeValue,
|
||||||
|
ContentMetaAttributeValue,
|
||||||
|
RubyParenthesisString,
|
||||||
|
RubyTextString,
|
||||||
|
Stylesheet,
|
||||||
|
Script,
|
||||||
|
TemplateString,
|
||||||
|
nonwhitespace_re
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'HTMLTreeBuilder',
|
||||||
|
'SAXTreeBuilder',
|
||||||
|
'TreeBuilder',
|
||||||
|
'TreeBuilderRegistry',
|
||||||
|
]
|
||||||
|
|
||||||
|
# Some useful features for a TreeBuilder to have.
|
||||||
|
FAST = 'fast'
|
||||||
|
PERMISSIVE = 'permissive'
|
||||||
|
STRICT = 'strict'
|
||||||
|
XML = 'xml'
|
||||||
|
HTML = 'html'
|
||||||
|
HTML_5 = 'html5'
|
||||||
|
|
||||||
|
class XMLParsedAsHTMLWarning(UserWarning):
|
||||||
|
"""The warning issued when an HTML parser is used to parse
|
||||||
|
XML that is not XHTML.
|
||||||
|
"""
|
||||||
|
MESSAGE = """It looks like you're parsing an XML document using an HTML parser. If this really is an HTML document (maybe it's XHTML?), you can ignore or filter this warning. If it's XML, you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the lxml package installed, and pass the keyword argument `features="xml"` into the BeautifulSoup constructor."""
|
||||||
|
|
||||||
|
|
||||||
|
class TreeBuilderRegistry(object):
|
||||||
|
"""A way of looking up TreeBuilder subclasses by their name or by desired
|
||||||
|
features.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.builders_for_feature = defaultdict(list)
|
||||||
|
self.builders = []
|
||||||
|
|
||||||
|
def register(self, treebuilder_class):
|
||||||
|
"""Register a treebuilder based on its advertised features.
|
||||||
|
|
||||||
|
:param treebuilder_class: A subclass of Treebuilder. its .features
|
||||||
|
attribute should list its features.
|
||||||
|
"""
|
||||||
|
for feature in treebuilder_class.features:
|
||||||
|
self.builders_for_feature[feature].insert(0, treebuilder_class)
|
||||||
|
self.builders.insert(0, treebuilder_class)
|
||||||
|
|
||||||
|
def lookup(self, *features):
|
||||||
|
"""Look up a TreeBuilder subclass with the desired features.
|
||||||
|
|
||||||
|
:param features: A list of features to look for. If none are
|
||||||
|
provided, the most recently registered TreeBuilder subclass
|
||||||
|
will be used.
|
||||||
|
:return: A TreeBuilder subclass, or None if there's no
|
||||||
|
registered subclass with all the requested features.
|
||||||
|
"""
|
||||||
|
if len(self.builders) == 0:
|
||||||
|
# There are no builders at all.
|
||||||
|
return None
|
||||||
|
|
||||||
|
if len(features) == 0:
|
||||||
|
# They didn't ask for any features. Give them the most
|
||||||
|
# recently registered builder.
|
||||||
|
return self.builders[0]
|
||||||
|
|
||||||
|
# Go down the list of features in order, and eliminate any builders
|
||||||
|
# that don't match every feature.
|
||||||
|
features = list(features)
|
||||||
|
features.reverse()
|
||||||
|
candidates = None
|
||||||
|
candidate_set = None
|
||||||
|
while len(features) > 0:
|
||||||
|
feature = features.pop()
|
||||||
|
we_have_the_feature = self.builders_for_feature.get(feature, [])
|
||||||
|
if len(we_have_the_feature) > 0:
|
||||||
|
if candidates is None:
|
||||||
|
candidates = we_have_the_feature
|
||||||
|
candidate_set = set(candidates)
|
||||||
|
else:
|
||||||
|
# Eliminate any candidates that don't have this feature.
|
||||||
|
candidate_set = candidate_set.intersection(
|
||||||
|
set(we_have_the_feature))
|
||||||
|
|
||||||
|
# The only valid candidates are the ones in candidate_set.
|
||||||
|
# Go through the original list of candidates and pick the first one
|
||||||
|
# that's in candidate_set.
|
||||||
|
if candidate_set is None:
|
||||||
|
return None
|
||||||
|
for candidate in candidates:
|
||||||
|
if candidate in candidate_set:
|
||||||
|
return candidate
|
||||||
|
return None
|
||||||
|
|
||||||
|
# The BeautifulSoup class will take feature lists from developers and use them
|
||||||
|
# to look up builders in this registry.
|
||||||
|
builder_registry = TreeBuilderRegistry()
|
||||||
|
|
||||||
|
class TreeBuilder(object):
|
||||||
|
"""Turn a textual document into a Beautiful Soup object tree."""
|
||||||
|
|
||||||
|
NAME = "[Unknown tree builder]"
|
||||||
|
ALTERNATE_NAMES = []
|
||||||
|
features = []
|
||||||
|
|
||||||
|
is_xml = False
|
||||||
|
picklable = False
|
||||||
|
empty_element_tags = None # A tag will be considered an empty-element
|
||||||
|
# tag when and only when it has no contents.
|
||||||
|
|
||||||
|
# A value for these tag/attribute combinations is a space- or
|
||||||
|
# comma-separated list of CDATA, rather than a single CDATA.
|
||||||
|
DEFAULT_CDATA_LIST_ATTRIBUTES = {}
|
||||||
|
|
||||||
|
# Whitespace should be preserved inside these tags.
|
||||||
|
DEFAULT_PRESERVE_WHITESPACE_TAGS = set()
|
||||||
|
|
||||||
|
# The textual contents of tags with these names should be
|
||||||
|
# instantiated with some class other than NavigableString.
|
||||||
|
DEFAULT_STRING_CONTAINERS = {}
|
||||||
|
|
||||||
|
USE_DEFAULT = object()
|
||||||
|
|
||||||
|
# Most parsers don't keep track of line numbers.
|
||||||
|
TRACKS_LINE_NUMBERS = False
|
||||||
|
|
||||||
|
def __init__(self, multi_valued_attributes=USE_DEFAULT,
|
||||||
|
preserve_whitespace_tags=USE_DEFAULT,
|
||||||
|
store_line_numbers=USE_DEFAULT,
|
||||||
|
string_containers=USE_DEFAULT,
|
||||||
|
):
|
||||||
|
"""Constructor.
|
||||||
|
|
||||||
|
:param multi_valued_attributes: If this is set to None, the
|
||||||
|
TreeBuilder will not turn any values for attributes like
|
||||||
|
'class' into lists. Setting this to a dictionary will
|
||||||
|
customize this behavior; look at DEFAULT_CDATA_LIST_ATTRIBUTES
|
||||||
|
for an example.
|
||||||
|
|
||||||
|
Internally, these are called "CDATA list attributes", but that
|
||||||
|
probably doesn't make sense to an end-user, so the argument name
|
||||||
|
is `multi_valued_attributes`.
|
||||||
|
|
||||||
|
:param preserve_whitespace_tags: A list of tags to treat
|
||||||
|
the way <pre> tags are treated in HTML. Tags in this list
|
||||||
|
are immune from pretty-printing; their contents will always be
|
||||||
|
output as-is.
|
||||||
|
|
||||||
|
:param string_containers: A dictionary mapping tag names to
|
||||||
|
the classes that should be instantiated to contain the textual
|
||||||
|
contents of those tags. The default is to use NavigableString
|
||||||
|
for every tag, no matter what the name. You can override the
|
||||||
|
default by changing DEFAULT_STRING_CONTAINERS.
|
||||||
|
|
||||||
|
:param store_line_numbers: If the parser keeps track of the
|
||||||
|
line numbers and positions of the original markup, that
|
||||||
|
information will, by default, be stored in each corresponding
|
||||||
|
`Tag` object. You can turn this off by passing
|
||||||
|
store_line_numbers=False. If the parser you're using doesn't
|
||||||
|
keep track of this information, then setting store_line_numbers=True
|
||||||
|
will do nothing.
|
||||||
|
"""
|
||||||
|
self.soup = None
|
||||||
|
if multi_valued_attributes is self.USE_DEFAULT:
|
||||||
|
multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES
|
||||||
|
self.cdata_list_attributes = multi_valued_attributes
|
||||||
|
if preserve_whitespace_tags is self.USE_DEFAULT:
|
||||||
|
preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS
|
||||||
|
self.preserve_whitespace_tags = preserve_whitespace_tags
|
||||||
|
if store_line_numbers == self.USE_DEFAULT:
|
||||||
|
store_line_numbers = self.TRACKS_LINE_NUMBERS
|
||||||
|
self.store_line_numbers = store_line_numbers
|
||||||
|
if string_containers == self.USE_DEFAULT:
|
||||||
|
string_containers = self.DEFAULT_STRING_CONTAINERS
|
||||||
|
self.string_containers = string_containers
|
||||||
|
|
||||||
|
def initialize_soup(self, soup):
|
||||||
|
"""The BeautifulSoup object has been initialized and is now
|
||||||
|
being associated with the TreeBuilder.
|
||||||
|
|
||||||
|
:param soup: A BeautifulSoup object.
|
||||||
|
"""
|
||||||
|
self.soup = soup
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
"""Do any work necessary to reset the underlying parser
|
||||||
|
for a new document.
|
||||||
|
|
||||||
|
By default, this does nothing.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def can_be_empty_element(self, tag_name):
|
||||||
|
"""Might a tag with this name be an empty-element tag?
|
||||||
|
|
||||||
|
The final markup may or may not actually present this tag as
|
||||||
|
self-closing.
|
||||||
|
|
||||||
|
For instance: an HTMLBuilder does not consider a <p> tag to be
|
||||||
|
an empty-element tag (it's not in
|
||||||
|
HTMLBuilder.empty_element_tags). This means an empty <p> tag
|
||||||
|
will be presented as "<p></p>", not "<p/>" or "<p>".
|
||||||
|
|
||||||
|
The default implementation has no opinion about which tags are
|
||||||
|
empty-element tags, so a tag will be presented as an
|
||||||
|
empty-element tag if and only if it has no children.
|
||||||
|
"<foo></foo>" will become "<foo/>", and "<foo>bar</foo>" will
|
||||||
|
be left alone.
|
||||||
|
|
||||||
|
:param tag_name: The name of a markup tag.
|
||||||
|
"""
|
||||||
|
if self.empty_element_tags is None:
|
||||||
|
return True
|
||||||
|
return tag_name in self.empty_element_tags
|
||||||
|
|
||||||
|
def feed(self, markup):
|
||||||
|
"""Run some incoming markup through some parsing process,
|
||||||
|
populating the `BeautifulSoup` object in self.soup.
|
||||||
|
|
||||||
|
This method is not implemented in TreeBuilder; it must be
|
||||||
|
implemented in subclasses.
|
||||||
|
|
||||||
|
:return: None.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def prepare_markup(self, markup, user_specified_encoding=None,
|
||||||
|
document_declared_encoding=None, exclude_encodings=None):
|
||||||
|
"""Run any preliminary steps necessary to make incoming markup
|
||||||
|
acceptable to the parser.
|
||||||
|
|
||||||
|
:param markup: Some markup -- probably a bytestring.
|
||||||
|
:param user_specified_encoding: The user asked to try this encoding.
|
||||||
|
:param document_declared_encoding: The markup itself claims to be
|
||||||
|
in this encoding. NOTE: This argument is not used by the
|
||||||
|
calling code and can probably be removed.
|
||||||
|
:param exclude_encodings: The user asked _not_ to try any of
|
||||||
|
these encodings.
|
||||||
|
|
||||||
|
:yield: A series of 4-tuples:
|
||||||
|
(markup, encoding, declared encoding,
|
||||||
|
has undergone character replacement)
|
||||||
|
|
||||||
|
Each 4-tuple represents a strategy for converting the
|
||||||
|
document to Unicode and parsing it. Each strategy will be tried
|
||||||
|
in turn.
|
||||||
|
|
||||||
|
By default, the only strategy is to parse the markup
|
||||||
|
as-is. See `LXMLTreeBuilderForXML` and
|
||||||
|
`HTMLParserTreeBuilder` for implementations that take into
|
||||||
|
account the quirks of particular parsers.
|
||||||
|
"""
|
||||||
|
yield markup, None, None, False
|
||||||
|
|
||||||
|
def test_fragment_to_document(self, fragment):
|
||||||
|
"""Wrap an HTML fragment to make it look like a document.
|
||||||
|
|
||||||
|
Different parsers do this differently. For instance, lxml
|
||||||
|
introduces an empty <head> tag, and html5lib
|
||||||
|
doesn't. Abstracting this away lets us write simple tests
|
||||||
|
which run HTML fragments through the parser and compare the
|
||||||
|
results against other HTML fragments.
|
||||||
|
|
||||||
|
This method should not be used outside of tests.
|
||||||
|
|
||||||
|
:param fragment: A string -- fragment of HTML.
|
||||||
|
:return: A string -- a full HTML document.
|
||||||
|
"""
|
||||||
|
return fragment
|
||||||
|
|
||||||
|
def set_up_substitutions(self, tag):
|
||||||
|
"""Set up any substitutions that will need to be performed on
|
||||||
|
a `Tag` when it's output as a string.
|
||||||
|
|
||||||
|
By default, this does nothing. See `HTMLTreeBuilder` for a
|
||||||
|
case where this is used.
|
||||||
|
|
||||||
|
:param tag: A `Tag`
|
||||||
|
:return: Whether or not a substitution was performed.
|
||||||
|
"""
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
|
||||||
|
"""When an attribute value is associated with a tag that can
|
||||||
|
have multiple values for that attribute, convert the string
|
||||||
|
value to a list of strings.
|
||||||
|
|
||||||
|
Basically, replaces class="foo bar" with class=["foo", "bar"]
|
||||||
|
|
||||||
|
NOTE: This method modifies its input in place.
|
||||||
|
|
||||||
|
:param tag_name: The name of a tag.
|
||||||
|
:param attrs: A dictionary containing the tag's attributes.
|
||||||
|
Any appropriate attribute values will be modified in place.
|
||||||
|
"""
|
||||||
|
if not attrs:
|
||||||
|
return attrs
|
||||||
|
if self.cdata_list_attributes:
|
||||||
|
universal = self.cdata_list_attributes.get('*', [])
|
||||||
|
tag_specific = self.cdata_list_attributes.get(
|
||||||
|
tag_name.lower(), None)
|
||||||
|
for attr in list(attrs.keys()):
|
||||||
|
if attr in universal or (tag_specific and attr in tag_specific):
|
||||||
|
# We have a "class"-type attribute whose string
|
||||||
|
# value is a whitespace-separated list of
|
||||||
|
# values. Split it into a list.
|
||||||
|
value = attrs[attr]
|
||||||
|
if isinstance(value, str):
|
||||||
|
values = nonwhitespace_re.findall(value)
|
||||||
|
else:
|
||||||
|
# html5lib sometimes calls setAttributes twice
|
||||||
|
# for the same tag when rearranging the parse
|
||||||
|
# tree. On the second call the attribute value
|
||||||
|
# here is already a list. If this happens,
|
||||||
|
# leave the value alone rather than trying to
|
||||||
|
# split it again.
|
||||||
|
values = value
|
||||||
|
attrs[attr] = values
|
||||||
|
return attrs
|
||||||
|
|
||||||
|
class SAXTreeBuilder(TreeBuilder):
|
||||||
|
"""A Beautiful Soup treebuilder that listens for SAX events.
|
||||||
|
|
||||||
|
This is not currently used for anything, but it demonstrates
|
||||||
|
how a simple TreeBuilder would work.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def feed(self, markup):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def startElement(self, name, attrs):
|
||||||
|
attrs = dict((key[1], value) for key, value in list(attrs.items()))
|
||||||
|
#print("Start %s, %r" % (name, attrs))
|
||||||
|
self.soup.handle_starttag(name, attrs)
|
||||||
|
|
||||||
|
def endElement(self, name):
|
||||||
|
#print("End %s" % name)
|
||||||
|
self.soup.handle_endtag(name)
|
||||||
|
|
||||||
|
def startElementNS(self, nsTuple, nodeName, attrs):
|
||||||
|
# Throw away (ns, nodeName) for now.
|
||||||
|
self.startElement(nodeName, attrs)
|
||||||
|
|
||||||
|
def endElementNS(self, nsTuple, nodeName):
|
||||||
|
# Throw away (ns, nodeName) for now.
|
||||||
|
self.endElement(nodeName)
|
||||||
|
#handler.endElementNS((ns, node.nodeName), node.nodeName)
|
||||||
|
|
||||||
|
def startPrefixMapping(self, prefix, nodeValue):
|
||||||
|
# Ignore the prefix for now.
|
||||||
|
pass
|
||||||
|
|
||||||
|
def endPrefixMapping(self, prefix):
|
||||||
|
# Ignore the prefix for now.
|
||||||
|
# handler.endPrefixMapping(prefix)
|
||||||
|
pass
|
||||||
|
|
||||||
|
def characters(self, content):
|
||||||
|
self.soup.handle_data(content)
|
||||||
|
|
||||||
|
def startDocument(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def endDocument(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class HTMLTreeBuilder(TreeBuilder):
|
||||||
|
"""This TreeBuilder knows facts about HTML.
|
||||||
|
|
||||||
|
Such as which tags are empty-element tags.
|
||||||
|
"""
|
||||||
|
|
||||||
|
empty_element_tags = set([
|
||||||
|
# These are from HTML5.
|
||||||
|
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr',
|
||||||
|
|
||||||
|
# These are from earlier versions of HTML and are removed in HTML5.
|
||||||
|
'basefont', 'bgsound', 'command', 'frame', 'image', 'isindex', 'nextid', 'spacer'
|
||||||
|
])
|
||||||
|
|
||||||
|
# The HTML standard defines these as block-level elements. Beautiful
|
||||||
|
# Soup does not treat these elements differently from other elements,
|
||||||
|
# but it may do so eventually, and this information is available if
|
||||||
|
# you need to use it.
|
||||||
|
block_elements = set(["address", "article", "aside", "blockquote", "canvas", "dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "li", "main", "nav", "noscript", "ol", "output", "p", "pre", "section", "table", "tfoot", "ul", "video"])
|
||||||
|
|
||||||
|
# These HTML tags need special treatment so they can be
|
||||||
|
# represented by a string class other than NavigableString.
|
||||||
|
#
|
||||||
|
# For some of these tags, it's because the HTML standard defines
|
||||||
|
# an unusual content model for them. I made this list by going
|
||||||
|
# through the HTML spec
|
||||||
|
# (https://html.spec.whatwg.org/#metadata-content) and looking for
|
||||||
|
# "metadata content" elements that can contain strings.
|
||||||
|
#
|
||||||
|
# The Ruby tags (<rt> and <rp>) are here despite being normal
|
||||||
|
# "phrasing content" tags, because the content they contain is
|
||||||
|
# qualitatively different from other text in the document, and it
|
||||||
|
# can be useful to be able to distinguish it.
|
||||||
|
#
|
||||||
|
# TODO: Arguably <noscript> could go here but it seems
|
||||||
|
# qualitatively different from the other tags.
|
||||||
|
DEFAULT_STRING_CONTAINERS = {
|
||||||
|
'rt' : RubyTextString,
|
||||||
|
'rp' : RubyParenthesisString,
|
||||||
|
'style': Stylesheet,
|
||||||
|
'script': Script,
|
||||||
|
'template': TemplateString,
|
||||||
|
}
|
||||||
|
|
||||||
|
# The HTML standard defines these attributes as containing a
|
||||||
|
# space-separated list of values, not a single value. That is,
|
||||||
|
# class="foo bar" means that the 'class' attribute has two values,
|
||||||
|
# 'foo' and 'bar', not the single value 'foo bar'. When we
|
||||||
|
# encounter one of these attributes, we will parse its value into
|
||||||
|
# a list of values if possible. Upon output, the list will be
|
||||||
|
# converted back into a string.
|
||||||
|
DEFAULT_CDATA_LIST_ATTRIBUTES = {
|
||||||
|
"*" : ['class', 'accesskey', 'dropzone'],
|
||||||
|
"a" : ['rel', 'rev'],
|
||||||
|
"link" : ['rel', 'rev'],
|
||||||
|
"td" : ["headers"],
|
||||||
|
"th" : ["headers"],
|
||||||
|
"td" : ["headers"],
|
||||||
|
"form" : ["accept-charset"],
|
||||||
|
"object" : ["archive"],
|
||||||
|
|
||||||
|
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
|
||||||
|
"area" : ["rel"],
|
||||||
|
"icon" : ["sizes"],
|
||||||
|
"iframe" : ["sandbox"],
|
||||||
|
"output" : ["for"],
|
||||||
|
}
|
||||||
|
|
||||||
|
DEFAULT_PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
|
||||||
|
|
||||||
|
def set_up_substitutions(self, tag):
|
||||||
|
"""Replace the declared encoding in a <meta> tag with a placeholder,
|
||||||
|
to be substituted when the tag is output to a string.
|
||||||
|
|
||||||
|
An HTML document may come in to Beautiful Soup as one
|
||||||
|
encoding, but exit in a different encoding, and the <meta> tag
|
||||||
|
needs to be changed to reflect this.
|
||||||
|
|
||||||
|
:param tag: A `Tag`
|
||||||
|
:return: Whether or not a substitution was performed.
|
||||||
|
"""
|
||||||
|
# We are only interested in <meta> tags
|
||||||
|
if tag.name != 'meta':
|
||||||
|
return False
|
||||||
|
|
||||||
|
http_equiv = tag.get('http-equiv')
|
||||||
|
content = tag.get('content')
|
||||||
|
charset = tag.get('charset')
|
||||||
|
|
||||||
|
# We are interested in <meta> tags that say what encoding the
|
||||||
|
# document was originally in. This means HTML 5-style <meta>
|
||||||
|
# tags that provide the "charset" attribute. It also means
|
||||||
|
# HTML 4-style <meta> tags that provide the "content"
|
||||||
|
# attribute and have "http-equiv" set to "content-type".
|
||||||
|
#
|
||||||
|
# In both cases we will replace the value of the appropriate
|
||||||
|
# attribute with a standin object that can take on any
|
||||||
|
# encoding.
|
||||||
|
meta_encoding = None
|
||||||
|
if charset is not None:
|
||||||
|
# HTML 5 style:
|
||||||
|
# <meta charset="utf8">
|
||||||
|
meta_encoding = charset
|
||||||
|
tag['charset'] = CharsetMetaAttributeValue(charset)
|
||||||
|
|
||||||
|
elif (content is not None and http_equiv is not None
|
||||||
|
and http_equiv.lower() == 'content-type'):
|
||||||
|
# HTML 4 style:
|
||||||
|
# <meta http-equiv="content-type" content="text/html; charset=utf8">
|
||||||
|
tag['content'] = ContentMetaAttributeValue(content)
|
||||||
|
|
||||||
|
return (meta_encoding is not None)
|
||||||
|
|
||||||
|
class DetectsXMLParsedAsHTML(object):
|
||||||
|
"""A mixin class for any class (a TreeBuilder, or some class used by a
|
||||||
|
TreeBuilder) that's in a position to detect whether an XML
|
||||||
|
document is being incorrectly parsed as HTML, and issue an
|
||||||
|
appropriate warning.
|
||||||
|
|
||||||
|
This requires being able to observe an incoming processing
|
||||||
|
instruction that might be an XML declaration, and also able to
|
||||||
|
observe tags as they're opened. If you can't do that for a given
|
||||||
|
TreeBuilder, there's a less reliable implementation based on
|
||||||
|
examining the raw markup.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Regular expression for seeing if markup has an <html> tag.
|
||||||
|
LOOKS_LIKE_HTML = re.compile("<[^ +]html", re.I)
|
||||||
|
LOOKS_LIKE_HTML_B = re.compile(b"<[^ +]html", re.I)
|
||||||
|
|
||||||
|
XML_PREFIX = '<?xml'
|
||||||
|
XML_PREFIX_B = b'<?xml'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def warn_if_markup_looks_like_xml(cls, markup):
|
||||||
|
"""Perform a check on some markup to see if it looks like XML
|
||||||
|
that's not XHTML. If so, issue a warning.
|
||||||
|
|
||||||
|
This is much less reliable than doing the check while parsing,
|
||||||
|
but some of the tree builders can't do that.
|
||||||
|
|
||||||
|
:return: True if the markup looks like non-XHTML XML, False
|
||||||
|
otherwise.
|
||||||
|
"""
|
||||||
|
if isinstance(markup, bytes):
|
||||||
|
prefix = cls.XML_PREFIX_B
|
||||||
|
looks_like_html = cls.LOOKS_LIKE_HTML_B
|
||||||
|
else:
|
||||||
|
prefix = cls.XML_PREFIX
|
||||||
|
looks_like_html = cls.LOOKS_LIKE_HTML
|
||||||
|
|
||||||
|
if (markup is not None
|
||||||
|
and markup.startswith(prefix)
|
||||||
|
and not looks_like_html.search(markup[:500])
|
||||||
|
):
|
||||||
|
cls._warn()
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _warn(cls):
|
||||||
|
"""Issue a warning about XML being parsed as HTML."""
|
||||||
|
warnings.warn(
|
||||||
|
XMLParsedAsHTMLWarning.MESSAGE, XMLParsedAsHTMLWarning
|
||||||
|
)
|
||||||
|
|
||||||
|
def _initialize_xml_detector(self):
|
||||||
|
"""Call this method before parsing a document."""
|
||||||
|
self._first_processing_instruction = None
|
||||||
|
self._root_tag = None
|
||||||
|
|
||||||
|
def _document_might_be_xml(self, processing_instruction):
|
||||||
|
"""Call this method when encountering an XML declaration, or a
|
||||||
|
"processing instruction" that might be an XML declaration.
|
||||||
|
"""
|
||||||
|
if (self._first_processing_instruction is not None
|
||||||
|
or self._root_tag is not None):
|
||||||
|
# The document has already started. Don't bother checking
|
||||||
|
# anymore.
|
||||||
|
return
|
||||||
|
|
||||||
|
self._first_processing_instruction = processing_instruction
|
||||||
|
|
||||||
|
# We won't know until we encounter the first tag whether or
|
||||||
|
# not this is actually a problem.
|
||||||
|
|
||||||
|
def _root_tag_encountered(self, name):
|
||||||
|
"""Call this when you encounter the document's root tag.
|
||||||
|
|
||||||
|
This is where we actually check whether an XML document is
|
||||||
|
being incorrectly parsed as HTML, and issue the warning.
|
||||||
|
"""
|
||||||
|
if self._root_tag is not None:
|
||||||
|
# This method was incorrectly called multiple times. Do
|
||||||
|
# nothing.
|
||||||
|
return
|
||||||
|
|
||||||
|
self._root_tag = name
|
||||||
|
if (name != 'html' and self._first_processing_instruction is not None
|
||||||
|
and self._first_processing_instruction.lower().startswith('xml ')):
|
||||||
|
# We encountered an XML declaration and then a tag other
|
||||||
|
# than 'html'. This is a reliable indicator that a
|
||||||
|
# non-XHTML document is being parsed as XML.
|
||||||
|
self._warn()
|
||||||
|
|
||||||
|
|
||||||
|
def register_treebuilders_from(module):
|
||||||
|
"""Copy TreeBuilders from the given module into this module."""
|
||||||
|
this_module = sys.modules[__name__]
|
||||||
|
for name in module.__all__:
|
||||||
|
obj = getattr(module, name)
|
||||||
|
|
||||||
|
if issubclass(obj, TreeBuilder):
|
||||||
|
setattr(this_module, name, obj)
|
||||||
|
this_module.__all__.append(name)
|
||||||
|
# Register the builder while we're at it.
|
||||||
|
this_module.builder_registry.register(obj)
|
||||||
|
|
||||||
|
class ParserRejectedMarkup(Exception):
|
||||||
|
"""An Exception to be raised when the underlying parser simply
|
||||||
|
refuses to parse the given markup.
|
||||||
|
"""
|
||||||
|
def __init__(self, message_or_exception):
|
||||||
|
"""Explain why the parser rejected the given markup, either
|
||||||
|
with a textual explanation or another exception.
|
||||||
|
"""
|
||||||
|
if isinstance(message_or_exception, Exception):
|
||||||
|
e = message_or_exception
|
||||||
|
message_or_exception = "%s: %s" % (e.__class__.__name__, str(e))
|
||||||
|
super(ParserRejectedMarkup, self).__init__(message_or_exception)
|
||||||
|
|
||||||
|
# Builders are registered in reverse order of priority, so that custom
|
||||||
|
# builder registrations will take precedence. In general, we want lxml
|
||||||
|
# to take precedence over html5lib, because it's faster. And we only
|
||||||
|
# want to use HTMLParser as a last resort.
|
||||||
|
from . import _htmlparser
|
||||||
|
register_treebuilders_from(_htmlparser)
|
||||||
|
try:
|
||||||
|
from . import _html5lib
|
||||||
|
register_treebuilders_from(_html5lib)
|
||||||
|
except ImportError:
|
||||||
|
# They don't have html5lib installed.
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
from . import _lxml
|
||||||
|
register_treebuilders_from(_lxml)
|
||||||
|
except ImportError:
|
||||||
|
# They don't have lxml installed.
|
||||||
|
pass
|
|
@ -0,0 +1,473 @@
|
||||||
|
# Use of this source code is governed by the MIT license.
|
||||||
|
__license__ = "MIT"
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'HTML5TreeBuilder',
|
||||||
|
]
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
import re
|
||||||
|
from bs4.builder import (
|
||||||
|
DetectsXMLParsedAsHTML,
|
||||||
|
PERMISSIVE,
|
||||||
|
HTML,
|
||||||
|
HTML_5,
|
||||||
|
HTMLTreeBuilder,
|
||||||
|
)
|
||||||
|
from bs4.element import (
|
||||||
|
NamespacedAttribute,
|
||||||
|
nonwhitespace_re,
|
||||||
|
)
|
||||||
|
import html5lib
|
||||||
|
from html5lib.constants import (
|
||||||
|
namespaces,
|
||||||
|
prefixes,
|
||||||
|
)
|
||||||
|
from bs4.element import (
|
||||||
|
Comment,
|
||||||
|
Doctype,
|
||||||
|
NavigableString,
|
||||||
|
Tag,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Pre-0.99999999
|
||||||
|
from html5lib.treebuilders import _base as treebuilder_base
|
||||||
|
new_html5lib = False
|
||||||
|
except ImportError as e:
|
||||||
|
# 0.99999999 and up
|
||||||
|
from html5lib.treebuilders import base as treebuilder_base
|
||||||
|
new_html5lib = True
|
||||||
|
|
||||||
|
class HTML5TreeBuilder(HTMLTreeBuilder):
|
||||||
|
"""Use html5lib to build a tree.
|
||||||
|
|
||||||
|
Note that this TreeBuilder does not support some features common
|
||||||
|
to HTML TreeBuilders. Some of these features could theoretically
|
||||||
|
be implemented, but at the very least it's quite difficult,
|
||||||
|
because html5lib moves the parse tree around as it's being built.
|
||||||
|
|
||||||
|
* This TreeBuilder doesn't use different subclasses of NavigableString
|
||||||
|
based on the name of the tag in which the string was found.
|
||||||
|
|
||||||
|
* You can't use a SoupStrainer to parse only part of a document.
|
||||||
|
"""
|
||||||
|
|
||||||
|
NAME = "html5lib"
|
||||||
|
|
||||||
|
features = [NAME, PERMISSIVE, HTML_5, HTML]
|
||||||
|
|
||||||
|
# html5lib can tell us which line number and position in the
|
||||||
|
# original file is the source of an element.
|
||||||
|
TRACKS_LINE_NUMBERS = True
|
||||||
|
|
||||||
|
def prepare_markup(self, markup, user_specified_encoding,
|
||||||
|
document_declared_encoding=None, exclude_encodings=None):
|
||||||
|
# Store the user-specified encoding for use later on.
|
||||||
|
self.user_specified_encoding = user_specified_encoding
|
||||||
|
|
||||||
|
# document_declared_encoding and exclude_encodings aren't used
|
||||||
|
# ATM because the html5lib TreeBuilder doesn't use
|
||||||
|
# UnicodeDammit.
|
||||||
|
if exclude_encodings:
|
||||||
|
warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.")
|
||||||
|
|
||||||
|
# html5lib only parses HTML, so if it's given XML that's worth
|
||||||
|
# noting.
|
||||||
|
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup)
|
||||||
|
|
||||||
|
yield (markup, None, None, False)
|
||||||
|
|
||||||
|
# These methods are defined by Beautiful Soup.
|
||||||
|
def feed(self, markup):
|
||||||
|
if self.soup.parse_only is not None:
|
||||||
|
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
|
||||||
|
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
|
||||||
|
self.underlying_builder.parser = parser
|
||||||
|
extra_kwargs = dict()
|
||||||
|
if not isinstance(markup, str):
|
||||||
|
if new_html5lib:
|
||||||
|
extra_kwargs['override_encoding'] = self.user_specified_encoding
|
||||||
|
else:
|
||||||
|
extra_kwargs['encoding'] = self.user_specified_encoding
|
||||||
|
doc = parser.parse(markup, **extra_kwargs)
|
||||||
|
|
||||||
|
# Set the character encoding detected by the tokenizer.
|
||||||
|
if isinstance(markup, str):
|
||||||
|
# We need to special-case this because html5lib sets
|
||||||
|
# charEncoding to UTF-8 if it gets Unicode input.
|
||||||
|
doc.original_encoding = None
|
||||||
|
else:
|
||||||
|
original_encoding = parser.tokenizer.stream.charEncoding[0]
|
||||||
|
if not isinstance(original_encoding, str):
|
||||||
|
# In 0.99999999 and up, the encoding is an html5lib
|
||||||
|
# Encoding object. We want to use a string for compatibility
|
||||||
|
# with other tree builders.
|
||||||
|
original_encoding = original_encoding.name
|
||||||
|
doc.original_encoding = original_encoding
|
||||||
|
self.underlying_builder.parser = None
|
||||||
|
|
||||||
|
def create_treebuilder(self, namespaceHTMLElements):
|
||||||
|
self.underlying_builder = TreeBuilderForHtml5lib(
|
||||||
|
namespaceHTMLElements, self.soup,
|
||||||
|
store_line_numbers=self.store_line_numbers
|
||||||
|
)
|
||||||
|
return self.underlying_builder
|
||||||
|
|
||||||
|
def test_fragment_to_document(self, fragment):
|
||||||
|
"""See `TreeBuilder`."""
|
||||||
|
return '<html><head></head><body>%s</body></html>' % fragment
|
||||||
|
|
||||||
|
|
||||||
|
class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
|
||||||
|
|
||||||
|
def __init__(self, namespaceHTMLElements, soup=None,
|
||||||
|
store_line_numbers=True, **kwargs):
|
||||||
|
if soup:
|
||||||
|
self.soup = soup
|
||||||
|
else:
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
# TODO: Why is the parser 'html.parser' here? To avoid an
|
||||||
|
# infinite loop?
|
||||||
|
self.soup = BeautifulSoup(
|
||||||
|
"", "html.parser", store_line_numbers=store_line_numbers,
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
|
# TODO: What are **kwargs exactly? Should they be passed in
|
||||||
|
# here in addition to/instead of being passed to the BeautifulSoup
|
||||||
|
# constructor?
|
||||||
|
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
|
||||||
|
|
||||||
|
# This will be set later to an html5lib.html5parser.HTMLParser
|
||||||
|
# object, which we can use to track the current line number.
|
||||||
|
self.parser = None
|
||||||
|
self.store_line_numbers = store_line_numbers
|
||||||
|
|
||||||
|
def documentClass(self):
|
||||||
|
self.soup.reset()
|
||||||
|
return Element(self.soup, self.soup, None)
|
||||||
|
|
||||||
|
def insertDoctype(self, token):
|
||||||
|
name = token["name"]
|
||||||
|
publicId = token["publicId"]
|
||||||
|
systemId = token["systemId"]
|
||||||
|
|
||||||
|
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
|
||||||
|
self.soup.object_was_parsed(doctype)
|
||||||
|
|
||||||
|
def elementClass(self, name, namespace):
|
||||||
|
kwargs = {}
|
||||||
|
if self.parser and self.store_line_numbers:
|
||||||
|
# This represents the point immediately after the end of the
|
||||||
|
# tag. We don't know when the tag started, but we do know
|
||||||
|
# where it ended -- the character just before this one.
|
||||||
|
sourceline, sourcepos = self.parser.tokenizer.stream.position()
|
||||||
|
kwargs['sourceline'] = sourceline
|
||||||
|
kwargs['sourcepos'] = sourcepos-1
|
||||||
|
tag = self.soup.new_tag(name, namespace, **kwargs)
|
||||||
|
|
||||||
|
return Element(tag, self.soup, namespace)
|
||||||
|
|
||||||
|
def commentClass(self, data):
|
||||||
|
return TextNode(Comment(data), self.soup)
|
||||||
|
|
||||||
|
def fragmentClass(self):
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
# TODO: Why is the parser 'html.parser' here? To avoid an
|
||||||
|
# infinite loop?
|
||||||
|
self.soup = BeautifulSoup("", "html.parser")
|
||||||
|
self.soup.name = "[document_fragment]"
|
||||||
|
return Element(self.soup, self.soup, None)
|
||||||
|
|
||||||
|
def appendChild(self, node):
|
||||||
|
# XXX This code is not covered by the BS4 tests.
|
||||||
|
self.soup.append(node.element)
|
||||||
|
|
||||||
|
def getDocument(self):
|
||||||
|
return self.soup
|
||||||
|
|
||||||
|
def getFragment(self):
|
||||||
|
return treebuilder_base.TreeBuilder.getFragment(self).element
|
||||||
|
|
||||||
|
def testSerializer(self, element):
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
rv = []
|
||||||
|
doctype_re = re.compile(r'^(.*?)(?: PUBLIC "(.*?)"(?: "(.*?)")?| SYSTEM "(.*?)")?$')
|
||||||
|
|
||||||
|
def serializeElement(element, indent=0):
|
||||||
|
if isinstance(element, BeautifulSoup):
|
||||||
|
pass
|
||||||
|
if isinstance(element, Doctype):
|
||||||
|
m = doctype_re.match(element)
|
||||||
|
if m:
|
||||||
|
name = m.group(1)
|
||||||
|
if m.lastindex > 1:
|
||||||
|
publicId = m.group(2) or ""
|
||||||
|
systemId = m.group(3) or m.group(4) or ""
|
||||||
|
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
|
||||||
|
(' ' * indent, name, publicId, systemId))
|
||||||
|
else:
|
||||||
|
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, name))
|
||||||
|
else:
|
||||||
|
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
|
||||||
|
elif isinstance(element, Comment):
|
||||||
|
rv.append("|%s<!-- %s -->" % (' ' * indent, element))
|
||||||
|
elif isinstance(element, NavigableString):
|
||||||
|
rv.append("|%s\"%s\"" % (' ' * indent, element))
|
||||||
|
else:
|
||||||
|
if element.namespace:
|
||||||
|
name = "%s %s" % (prefixes[element.namespace],
|
||||||
|
element.name)
|
||||||
|
else:
|
||||||
|
name = element.name
|
||||||
|
rv.append("|%s<%s>" % (' ' * indent, name))
|
||||||
|
if element.attrs:
|
||||||
|
attributes = []
|
||||||
|
for name, value in list(element.attrs.items()):
|
||||||
|
if isinstance(name, NamespacedAttribute):
|
||||||
|
name = "%s %s" % (prefixes[name.namespace], name.name)
|
||||||
|
if isinstance(value, list):
|
||||||
|
value = " ".join(value)
|
||||||
|
attributes.append((name, value))
|
||||||
|
|
||||||
|
for name, value in sorted(attributes):
|
||||||
|
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
|
||||||
|
indent += 2
|
||||||
|
for child in element.children:
|
||||||
|
serializeElement(child, indent)
|
||||||
|
serializeElement(element, 0)
|
||||||
|
|
||||||
|
return "\n".join(rv)
|
||||||
|
|
||||||
|
class AttrList(object):
|
||||||
|
def __init__(self, element):
|
||||||
|
self.element = element
|
||||||
|
self.attrs = dict(self.element.attrs)
|
||||||
|
def __iter__(self):
|
||||||
|
return list(self.attrs.items()).__iter__()
|
||||||
|
def __setitem__(self, name, value):
|
||||||
|
# If this attribute is a multi-valued attribute for this element,
|
||||||
|
# turn its value into a list.
|
||||||
|
list_attr = self.element.cdata_list_attributes or {}
|
||||||
|
if (name in list_attr.get('*')
|
||||||
|
or (self.element.name in list_attr
|
||||||
|
and name in list_attr[self.element.name])):
|
||||||
|
# A node that is being cloned may have already undergone
|
||||||
|
# this procedure.
|
||||||
|
if not isinstance(value, list):
|
||||||
|
value = nonwhitespace_re.findall(value)
|
||||||
|
self.element[name] = value
|
||||||
|
def items(self):
|
||||||
|
return list(self.attrs.items())
|
||||||
|
def keys(self):
|
||||||
|
return list(self.attrs.keys())
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.attrs)
|
||||||
|
def __getitem__(self, name):
|
||||||
|
return self.attrs[name]
|
||||||
|
def __contains__(self, name):
|
||||||
|
return name in list(self.attrs.keys())
|
||||||
|
|
||||||
|
|
||||||
|
class Element(treebuilder_base.Node):
|
||||||
|
def __init__(self, element, soup, namespace):
|
||||||
|
treebuilder_base.Node.__init__(self, element.name)
|
||||||
|
self.element = element
|
||||||
|
self.soup = soup
|
||||||
|
self.namespace = namespace
|
||||||
|
|
||||||
|
def appendChild(self, node):
|
||||||
|
string_child = child = None
|
||||||
|
if isinstance(node, str):
|
||||||
|
# Some other piece of code decided to pass in a string
|
||||||
|
# instead of creating a TextElement object to contain the
|
||||||
|
# string.
|
||||||
|
string_child = child = node
|
||||||
|
elif isinstance(node, Tag):
|
||||||
|
# Some other piece of code decided to pass in a Tag
|
||||||
|
# instead of creating an Element object to contain the
|
||||||
|
# Tag.
|
||||||
|
child = node
|
||||||
|
elif node.element.__class__ == NavigableString:
|
||||||
|
string_child = child = node.element
|
||||||
|
node.parent = self
|
||||||
|
else:
|
||||||
|
child = node.element
|
||||||
|
node.parent = self
|
||||||
|
|
||||||
|
if not isinstance(child, str) and child.parent is not None:
|
||||||
|
node.element.extract()
|
||||||
|
|
||||||
|
if (string_child is not None and self.element.contents
|
||||||
|
and self.element.contents[-1].__class__ == NavigableString):
|
||||||
|
# We are appending a string onto another string.
|
||||||
|
# TODO This has O(n^2) performance, for input like
|
||||||
|
# "a</a>a</a>a</a>..."
|
||||||
|
old_element = self.element.contents[-1]
|
||||||
|
new_element = self.soup.new_string(old_element + string_child)
|
||||||
|
old_element.replace_with(new_element)
|
||||||
|
self.soup._most_recent_element = new_element
|
||||||
|
else:
|
||||||
|
if isinstance(node, str):
|
||||||
|
# Create a brand new NavigableString from this string.
|
||||||
|
child = self.soup.new_string(node)
|
||||||
|
|
||||||
|
# Tell Beautiful Soup to act as if it parsed this element
|
||||||
|
# immediately after the parent's last descendant. (Or
|
||||||
|
# immediately after the parent, if it has no children.)
|
||||||
|
if self.element.contents:
|
||||||
|
most_recent_element = self.element._last_descendant(False)
|
||||||
|
elif self.element.next_element is not None:
|
||||||
|
# Something from further ahead in the parse tree is
|
||||||
|
# being inserted into this earlier element. This is
|
||||||
|
# very annoying because it means an expensive search
|
||||||
|
# for the last element in the tree.
|
||||||
|
most_recent_element = self.soup._last_descendant()
|
||||||
|
else:
|
||||||
|
most_recent_element = self.element
|
||||||
|
|
||||||
|
self.soup.object_was_parsed(
|
||||||
|
child, parent=self.element,
|
||||||
|
most_recent_element=most_recent_element)
|
||||||
|
|
||||||
|
def getAttributes(self):
|
||||||
|
if isinstance(self.element, Comment):
|
||||||
|
return {}
|
||||||
|
return AttrList(self.element)
|
||||||
|
|
||||||
|
def setAttributes(self, attributes):
|
||||||
|
if attributes is not None and len(attributes) > 0:
|
||||||
|
converted_attributes = []
|
||||||
|
for name, value in list(attributes.items()):
|
||||||
|
if isinstance(name, tuple):
|
||||||
|
new_name = NamespacedAttribute(*name)
|
||||||
|
del attributes[name]
|
||||||
|
attributes[new_name] = value
|
||||||
|
|
||||||
|
self.soup.builder._replace_cdata_list_attribute_values(
|
||||||
|
self.name, attributes)
|
||||||
|
for name, value in list(attributes.items()):
|
||||||
|
self.element[name] = value
|
||||||
|
|
||||||
|
# The attributes may contain variables that need substitution.
|
||||||
|
# Call set_up_substitutions manually.
|
||||||
|
#
|
||||||
|
# The Tag constructor called this method when the Tag was created,
|
||||||
|
# but we just set/changed the attributes, so call it again.
|
||||||
|
self.soup.builder.set_up_substitutions(self.element)
|
||||||
|
attributes = property(getAttributes, setAttributes)
|
||||||
|
|
||||||
|
def insertText(self, data, insertBefore=None):
|
||||||
|
text = TextNode(self.soup.new_string(data), self.soup)
|
||||||
|
if insertBefore:
|
||||||
|
self.insertBefore(text, insertBefore)
|
||||||
|
else:
|
||||||
|
self.appendChild(text)
|
||||||
|
|
||||||
|
def insertBefore(self, node, refNode):
|
||||||
|
index = self.element.index(refNode.element)
|
||||||
|
if (node.element.__class__ == NavigableString and self.element.contents
|
||||||
|
and self.element.contents[index-1].__class__ == NavigableString):
|
||||||
|
# (See comments in appendChild)
|
||||||
|
old_node = self.element.contents[index-1]
|
||||||
|
new_str = self.soup.new_string(old_node + node.element)
|
||||||
|
old_node.replace_with(new_str)
|
||||||
|
else:
|
||||||
|
self.element.insert(index, node.element)
|
||||||
|
node.parent = self
|
||||||
|
|
||||||
|
def removeChild(self, node):
|
||||||
|
node.element.extract()
|
||||||
|
|
||||||
|
def reparentChildren(self, new_parent):
|
||||||
|
"""Move all of this tag's children into another tag."""
|
||||||
|
# print("MOVE", self.element.contents)
|
||||||
|
# print("FROM", self.element)
|
||||||
|
# print("TO", new_parent.element)
|
||||||
|
|
||||||
|
element = self.element
|
||||||
|
new_parent_element = new_parent.element
|
||||||
|
# Determine what this tag's next_element will be once all the children
|
||||||
|
# are removed.
|
||||||
|
final_next_element = element.next_sibling
|
||||||
|
|
||||||
|
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
|
||||||
|
if len(new_parent_element.contents) > 0:
|
||||||
|
# The new parent already contains children. We will be
|
||||||
|
# appending this tag's children to the end.
|
||||||
|
new_parents_last_child = new_parent_element.contents[-1]
|
||||||
|
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
|
||||||
|
else:
|
||||||
|
# The new parent contains no children.
|
||||||
|
new_parents_last_child = None
|
||||||
|
new_parents_last_descendant_next_element = new_parent_element.next_element
|
||||||
|
|
||||||
|
to_append = element.contents
|
||||||
|
if len(to_append) > 0:
|
||||||
|
# Set the first child's previous_element and previous_sibling
|
||||||
|
# to elements within the new parent
|
||||||
|
first_child = to_append[0]
|
||||||
|
if new_parents_last_descendant is not None:
|
||||||
|
first_child.previous_element = new_parents_last_descendant
|
||||||
|
else:
|
||||||
|
first_child.previous_element = new_parent_element
|
||||||
|
first_child.previous_sibling = new_parents_last_child
|
||||||
|
if new_parents_last_descendant is not None:
|
||||||
|
new_parents_last_descendant.next_element = first_child
|
||||||
|
else:
|
||||||
|
new_parent_element.next_element = first_child
|
||||||
|
if new_parents_last_child is not None:
|
||||||
|
new_parents_last_child.next_sibling = first_child
|
||||||
|
|
||||||
|
# Find the very last element being moved. It is now the
|
||||||
|
# parent's last descendant. It has no .next_sibling and
|
||||||
|
# its .next_element is whatever the previous last
|
||||||
|
# descendant had.
|
||||||
|
last_childs_last_descendant = to_append[-1]._last_descendant(False, True)
|
||||||
|
|
||||||
|
last_childs_last_descendant.next_element = new_parents_last_descendant_next_element
|
||||||
|
if new_parents_last_descendant_next_element is not None:
|
||||||
|
# TODO: This code has no test coverage and I'm not sure
|
||||||
|
# how to get html5lib to go through this path, but it's
|
||||||
|
# just the other side of the previous line.
|
||||||
|
new_parents_last_descendant_next_element.previous_element = last_childs_last_descendant
|
||||||
|
last_childs_last_descendant.next_sibling = None
|
||||||
|
|
||||||
|
for child in to_append:
|
||||||
|
child.parent = new_parent_element
|
||||||
|
new_parent_element.contents.append(child)
|
||||||
|
|
||||||
|
# Now that this element has no children, change its .next_element.
|
||||||
|
element.contents = []
|
||||||
|
element.next_element = final_next_element
|
||||||
|
|
||||||
|
# print("DONE WITH MOVE")
|
||||||
|
# print("FROM", self.element)
|
||||||
|
# print("TO", new_parent_element)
|
||||||
|
|
||||||
|
def cloneNode(self):
|
||||||
|
tag = self.soup.new_tag(self.element.name, self.namespace)
|
||||||
|
node = Element(tag, self.soup, self.namespace)
|
||||||
|
for key,value in self.attributes:
|
||||||
|
node.attributes[key] = value
|
||||||
|
return node
|
||||||
|
|
||||||
|
def hasContent(self):
|
||||||
|
return self.element.contents
|
||||||
|
|
||||||
|
def getNameTuple(self):
|
||||||
|
if self.namespace == None:
|
||||||
|
return namespaces["html"], self.name
|
||||||
|
else:
|
||||||
|
return self.namespace, self.name
|
||||||
|
|
||||||
|
nameTuple = property(getNameTuple)
|
||||||
|
|
||||||
|
class TextNode(Element):
|
||||||
|
def __init__(self, element, soup):
|
||||||
|
treebuilder_base.Node.__init__(self, None)
|
||||||
|
self.element = element
|
||||||
|
self.soup = soup
|
||||||
|
|
||||||
|
def cloneNode(self):
|
||||||
|
raise NotImplementedError
|
|
@ -0,0 +1,499 @@
|
||||||
|
# encoding: utf-8
|
||||||
|
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
|
||||||
|
|
||||||
|
# Use of this source code is governed by the MIT license.
|
||||||
|
__license__ = "MIT"
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'HTMLParserTreeBuilder',
|
||||||
|
]
|
||||||
|
|
||||||
|
from html.parser import HTMLParser
|
||||||
|
|
||||||
|
try:
|
||||||
|
from html.parser import HTMLParseError
|
||||||
|
except ImportError as e:
|
||||||
|
# HTMLParseError is removed in Python 3.5. Since it can never be
|
||||||
|
# thrown in 3.5, we can just define our own class as a placeholder.
|
||||||
|
class HTMLParseError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
|
||||||
|
# argument, which we'd like to set to False. Unfortunately,
|
||||||
|
# http://bugs.python.org/issue13273 makes strict=True a better bet
|
||||||
|
# before Python 3.2.3.
|
||||||
|
#
|
||||||
|
# At the end of this file, we monkeypatch HTMLParser so that
|
||||||
|
# strict=True works well on Python 3.2.2.
|
||||||
|
major, minor, release = sys.version_info[:3]
|
||||||
|
CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3
|
||||||
|
CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
|
||||||
|
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
|
||||||
|
|
||||||
|
|
||||||
|
from bs4.element import (
|
||||||
|
CData,
|
||||||
|
Comment,
|
||||||
|
Declaration,
|
||||||
|
Doctype,
|
||||||
|
ProcessingInstruction,
|
||||||
|
)
|
||||||
|
from bs4.dammit import EntitySubstitution, UnicodeDammit
|
||||||
|
|
||||||
|
from bs4.builder import (
|
||||||
|
DetectsXMLParsedAsHTML,
|
||||||
|
HTML,
|
||||||
|
HTMLTreeBuilder,
|
||||||
|
STRICT,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
HTMLPARSER = 'html.parser'
|
||||||
|
|
||||||
|
class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
|
||||||
|
"""A subclass of the Python standard library's HTMLParser class, which
|
||||||
|
listens for HTMLParser events and translates them into calls
|
||||||
|
to Beautiful Soup's tree construction API.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Strategies for handling duplicate attributes
|
||||||
|
IGNORE = 'ignore'
|
||||||
|
REPLACE = 'replace'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
"""Constructor.
|
||||||
|
|
||||||
|
:param on_duplicate_attribute: A strategy for what to do if a
|
||||||
|
tag includes the same attribute more than once. Accepted
|
||||||
|
values are: REPLACE (replace earlier values with later
|
||||||
|
ones, the default), IGNORE (keep the earliest value
|
||||||
|
encountered), or a callable. A callable must take three
|
||||||
|
arguments: the dictionary of attributes already processed,
|
||||||
|
the name of the duplicate attribute, and the most recent value
|
||||||
|
encountered.
|
||||||
|
"""
|
||||||
|
self.on_duplicate_attribute = kwargs.pop(
|
||||||
|
'on_duplicate_attribute', self.REPLACE
|
||||||
|
)
|
||||||
|
HTMLParser.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
# Keep a list of empty-element tags that were encountered
|
||||||
|
# without an explicit closing tag. If we encounter a closing tag
|
||||||
|
# of this type, we'll associate it with one of those entries.
|
||||||
|
#
|
||||||
|
# This isn't a stack because we don't care about the
|
||||||
|
# order. It's a list of closing tags we've already handled and
|
||||||
|
# will ignore, assuming they ever show up.
|
||||||
|
self.already_closed_empty_element = []
|
||||||
|
|
||||||
|
self._initialize_xml_detector()
|
||||||
|
|
||||||
|
def error(self, msg):
|
||||||
|
"""In Python 3, HTMLParser subclasses must implement error(), although
|
||||||
|
this requirement doesn't appear to be documented.
|
||||||
|
|
||||||
|
In Python 2, HTMLParser implements error() by raising an exception,
|
||||||
|
which we don't want to do.
|
||||||
|
|
||||||
|
In any event, this method is called only on very strange
|
||||||
|
markup and our best strategy is to pretend it didn't happen
|
||||||
|
and keep going.
|
||||||
|
"""
|
||||||
|
warnings.warn(msg)
|
||||||
|
|
||||||
|
def handle_startendtag(self, name, attrs):
|
||||||
|
"""Handle an incoming empty-element tag.
|
||||||
|
|
||||||
|
This is only called when the markup looks like <tag/>.
|
||||||
|
|
||||||
|
:param name: Name of the tag.
|
||||||
|
:param attrs: Dictionary of the tag's attributes.
|
||||||
|
"""
|
||||||
|
# is_startend() tells handle_starttag not to close the tag
|
||||||
|
# just because its name matches a known empty-element tag. We
|
||||||
|
# know that this is an empty-element tag and we want to call
|
||||||
|
# handle_endtag ourselves.
|
||||||
|
tag = self.handle_starttag(name, attrs, handle_empty_element=False)
|
||||||
|
self.handle_endtag(name)
|
||||||
|
|
||||||
|
def handle_starttag(self, name, attrs, handle_empty_element=True):
|
||||||
|
"""Handle an opening tag, e.g. '<tag>'
|
||||||
|
|
||||||
|
:param name: Name of the tag.
|
||||||
|
:param attrs: Dictionary of the tag's attributes.
|
||||||
|
:param handle_empty_element: True if this tag is known to be
|
||||||
|
an empty-element tag (i.e. there is not expected to be any
|
||||||
|
closing tag).
|
||||||
|
"""
|
||||||
|
# XXX namespace
|
||||||
|
attr_dict = {}
|
||||||
|
for key, value in attrs:
|
||||||
|
# Change None attribute values to the empty string
|
||||||
|
# for consistency with the other tree builders.
|
||||||
|
if value is None:
|
||||||
|
value = ''
|
||||||
|
if key in attr_dict:
|
||||||
|
# A single attribute shows up multiple times in this
|
||||||
|
# tag. How to handle it depends on the
|
||||||
|
# on_duplicate_attribute setting.
|
||||||
|
on_dupe = self.on_duplicate_attribute
|
||||||
|
if on_dupe == self.IGNORE:
|
||||||
|
pass
|
||||||
|
elif on_dupe in (None, self.REPLACE):
|
||||||
|
attr_dict[key] = value
|
||||||
|
else:
|
||||||
|
on_dupe(attr_dict, key, value)
|
||||||
|
else:
|
||||||
|
attr_dict[key] = value
|
||||||
|
attrvalue = '""'
|
||||||
|
#print("START", name)
|
||||||
|
sourceline, sourcepos = self.getpos()
|
||||||
|
tag = self.soup.handle_starttag(
|
||||||
|
name, None, None, attr_dict, sourceline=sourceline,
|
||||||
|
sourcepos=sourcepos
|
||||||
|
)
|
||||||
|
if tag and tag.is_empty_element and handle_empty_element:
|
||||||
|
# Unlike other parsers, html.parser doesn't send separate end tag
|
||||||
|
# events for empty-element tags. (It's handled in
|
||||||
|
# handle_startendtag, but only if the original markup looked like
|
||||||
|
# <tag/>.)
|
||||||
|
#
|
||||||
|
# So we need to call handle_endtag() ourselves. Since we
|
||||||
|
# know the start event is identical to the end event, we
|
||||||
|
# don't want handle_endtag() to cross off any previous end
|
||||||
|
# events for tags of this name.
|
||||||
|
self.handle_endtag(name, check_already_closed=False)
|
||||||
|
|
||||||
|
# But we might encounter an explicit closing tag for this tag
|
||||||
|
# later on. If so, we want to ignore it.
|
||||||
|
self.already_closed_empty_element.append(name)
|
||||||
|
|
||||||
|
if self._root_tag is None:
|
||||||
|
self._root_tag_encountered(name)
|
||||||
|
|
||||||
|
def handle_endtag(self, name, check_already_closed=True):
|
||||||
|
"""Handle a closing tag, e.g. '</tag>'
|
||||||
|
|
||||||
|
:param name: A tag name.
|
||||||
|
:param check_already_closed: True if this tag is expected to
|
||||||
|
be the closing portion of an empty-element tag,
|
||||||
|
e.g. '<tag></tag>'.
|
||||||
|
"""
|
||||||
|
#print("END", name)
|
||||||
|
if check_already_closed and name in self.already_closed_empty_element:
|
||||||
|
# This is a redundant end tag for an empty-element tag.
|
||||||
|
# We've already called handle_endtag() for it, so just
|
||||||
|
# check it off the list.
|
||||||
|
#print("ALREADY CLOSED", name)
|
||||||
|
self.already_closed_empty_element.remove(name)
|
||||||
|
else:
|
||||||
|
self.soup.handle_endtag(name)
|
||||||
|
|
||||||
|
def handle_data(self, data):
|
||||||
|
"""Handle some textual data that shows up between tags."""
|
||||||
|
self.soup.handle_data(data)
|
||||||
|
|
||||||
|
def handle_charref(self, name):
|
||||||
|
"""Handle a numeric character reference by converting it to the
|
||||||
|
corresponding Unicode character and treating it as textual
|
||||||
|
data.
|
||||||
|
|
||||||
|
:param name: Character number, possibly in hexadecimal.
|
||||||
|
"""
|
||||||
|
# XXX workaround for a bug in HTMLParser. Remove this once
|
||||||
|
# it's fixed in all supported versions.
|
||||||
|
# http://bugs.python.org/issue13633
|
||||||
|
if name.startswith('x'):
|
||||||
|
real_name = int(name.lstrip('x'), 16)
|
||||||
|
elif name.startswith('X'):
|
||||||
|
real_name = int(name.lstrip('X'), 16)
|
||||||
|
else:
|
||||||
|
real_name = int(name)
|
||||||
|
|
||||||
|
data = None
|
||||||
|
if real_name < 256:
|
||||||
|
# HTML numeric entities are supposed to reference Unicode
|
||||||
|
# code points, but sometimes they reference code points in
|
||||||
|
# some other encoding (ahem, Windows-1252). E.g. “
|
||||||
|
# instead of É for LEFT DOUBLE QUOTATION MARK. This
|
||||||
|
# code tries to detect this situation and compensate.
|
||||||
|
for encoding in (self.soup.original_encoding, 'windows-1252'):
|
||||||
|
if not encoding:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
data = bytearray([real_name]).decode(encoding)
|
||||||
|
except UnicodeDecodeError as e:
|
||||||
|
pass
|
||||||
|
if not data:
|
||||||
|
try:
|
||||||
|
data = chr(real_name)
|
||||||
|
except (ValueError, OverflowError) as e:
|
||||||
|
pass
|
||||||
|
data = data or "\N{REPLACEMENT CHARACTER}"
|
||||||
|
self.handle_data(data)
|
||||||
|
|
||||||
|
def handle_entityref(self, name):
|
||||||
|
"""Handle a named entity reference by converting it to the
|
||||||
|
corresponding Unicode character(s) and treating it as textual
|
||||||
|
data.
|
||||||
|
|
||||||
|
:param name: Name of the entity reference.
|
||||||
|
"""
|
||||||
|
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
|
||||||
|
if character is not None:
|
||||||
|
data = character
|
||||||
|
else:
|
||||||
|
# If this were XML, it would be ambiguous whether "&foo"
|
||||||
|
# was an character entity reference with a missing
|
||||||
|
# semicolon or the literal string "&foo". Since this is
|
||||||
|
# HTML, we have a complete list of all character entity references,
|
||||||
|
# and this one wasn't found, so assume it's the literal string "&foo".
|
||||||
|
data = "&%s" % name
|
||||||
|
self.handle_data(data)
|
||||||
|
|
||||||
|
def handle_comment(self, data):
|
||||||
|
"""Handle an HTML comment.
|
||||||
|
|
||||||
|
:param data: The text of the comment.
|
||||||
|
"""
|
||||||
|
self.soup.endData()
|
||||||
|
self.soup.handle_data(data)
|
||||||
|
self.soup.endData(Comment)
|
||||||
|
|
||||||
|
def handle_decl(self, data):
|
||||||
|
"""Handle a DOCTYPE declaration.
|
||||||
|
|
||||||
|
:param data: The text of the declaration.
|
||||||
|
"""
|
||||||
|
self.soup.endData()
|
||||||
|
data = data[len("DOCTYPE "):]
|
||||||
|
self.soup.handle_data(data)
|
||||||
|
self.soup.endData(Doctype)
|
||||||
|
|
||||||
|
def unknown_decl(self, data):
|
||||||
|
"""Handle a declaration of unknown type -- probably a CDATA block.
|
||||||
|
|
||||||
|
:param data: The text of the declaration.
|
||||||
|
"""
|
||||||
|
if data.upper().startswith('CDATA['):
|
||||||
|
cls = CData
|
||||||
|
data = data[len('CDATA['):]
|
||||||
|
else:
|
||||||
|
cls = Declaration
|
||||||
|
self.soup.endData()
|
||||||
|
self.soup.handle_data(data)
|
||||||
|
self.soup.endData(cls)
|
||||||
|
|
||||||
|
def handle_pi(self, data):
|
||||||
|
"""Handle a processing instruction.
|
||||||
|
|
||||||
|
:param data: The text of the instruction.
|
||||||
|
"""
|
||||||
|
self.soup.endData()
|
||||||
|
self.soup.handle_data(data)
|
||||||
|
self._document_might_be_xml(data)
|
||||||
|
self.soup.endData(ProcessingInstruction)
|
||||||
|
|
||||||
|
|
||||||
|
class HTMLParserTreeBuilder(HTMLTreeBuilder):
|
||||||
|
"""A Beautiful soup `TreeBuilder` that uses the `HTMLParser` parser,
|
||||||
|
found in the Python standard library.
|
||||||
|
"""
|
||||||
|
is_xml = False
|
||||||
|
picklable = True
|
||||||
|
NAME = HTMLPARSER
|
||||||
|
features = [NAME, HTML, STRICT]
|
||||||
|
|
||||||
|
# The html.parser knows which line number and position in the
|
||||||
|
# original file is the source of an element.
|
||||||
|
TRACKS_LINE_NUMBERS = True
|
||||||
|
|
||||||
|
def __init__(self, parser_args=None, parser_kwargs=None, **kwargs):
|
||||||
|
"""Constructor.
|
||||||
|
|
||||||
|
:param parser_args: Positional arguments to pass into
|
||||||
|
the BeautifulSoupHTMLParser constructor, once it's
|
||||||
|
invoked.
|
||||||
|
:param parser_kwargs: Keyword arguments to pass into
|
||||||
|
the BeautifulSoupHTMLParser constructor, once it's
|
||||||
|
invoked.
|
||||||
|
:param kwargs: Keyword arguments for the superclass constructor.
|
||||||
|
"""
|
||||||
|
# Some keyword arguments will be pulled out of kwargs and placed
|
||||||
|
# into parser_kwargs.
|
||||||
|
extra_parser_kwargs = dict()
|
||||||
|
for arg in ('on_duplicate_attribute',):
|
||||||
|
if arg in kwargs:
|
||||||
|
value = kwargs.pop(arg)
|
||||||
|
extra_parser_kwargs[arg] = value
|
||||||
|
super(HTMLParserTreeBuilder, self).__init__(**kwargs)
|
||||||
|
parser_args = parser_args or []
|
||||||
|
parser_kwargs = parser_kwargs or {}
|
||||||
|
parser_kwargs.update(extra_parser_kwargs)
|
||||||
|
if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED:
|
||||||
|
parser_kwargs['strict'] = False
|
||||||
|
if CONSTRUCTOR_TAKES_CONVERT_CHARREFS:
|
||||||
|
parser_kwargs['convert_charrefs'] = False
|
||||||
|
self.parser_args = (parser_args, parser_kwargs)
|
||||||
|
|
||||||
|
def prepare_markup(self, markup, user_specified_encoding=None,
|
||||||
|
document_declared_encoding=None, exclude_encodings=None):
|
||||||
|
|
||||||
|
"""Run any preliminary steps necessary to make incoming markup
|
||||||
|
acceptable to the parser.
|
||||||
|
|
||||||
|
:param markup: Some markup -- probably a bytestring.
|
||||||
|
:param user_specified_encoding: The user asked to try this encoding.
|
||||||
|
:param document_declared_encoding: The markup itself claims to be
|
||||||
|
in this encoding.
|
||||||
|
:param exclude_encodings: The user asked _not_ to try any of
|
||||||
|
these encodings.
|
||||||
|
|
||||||
|
:yield: A series of 4-tuples:
|
||||||
|
(markup, encoding, declared encoding,
|
||||||
|
has undergone character replacement)
|
||||||
|
|
||||||
|
Each 4-tuple represents a strategy for converting the
|
||||||
|
document to Unicode and parsing it. Each strategy will be tried
|
||||||
|
in turn.
|
||||||
|
"""
|
||||||
|
if isinstance(markup, str):
|
||||||
|
# Parse Unicode as-is.
|
||||||
|
yield (markup, None, None, False)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Ask UnicodeDammit to sniff the most likely encoding.
|
||||||
|
|
||||||
|
# This was provided by the end-user; treat it as a known
|
||||||
|
# definite encoding per the algorithm laid out in the HTML5
|
||||||
|
# spec. (See the EncodingDetector class for details.)
|
||||||
|
known_definite_encodings = [user_specified_encoding]
|
||||||
|
|
||||||
|
# This was found in the document; treat it as a slightly lower-priority
|
||||||
|
# user encoding.
|
||||||
|
user_encodings = [document_declared_encoding]
|
||||||
|
|
||||||
|
try_encodings = [user_specified_encoding, document_declared_encoding]
|
||||||
|
dammit = UnicodeDammit(
|
||||||
|
markup,
|
||||||
|
known_definite_encodings=known_definite_encodings,
|
||||||
|
user_encodings=user_encodings,
|
||||||
|
is_html=True,
|
||||||
|
exclude_encodings=exclude_encodings
|
||||||
|
)
|
||||||
|
yield (dammit.markup, dammit.original_encoding,
|
||||||
|
dammit.declared_html_encoding,
|
||||||
|
dammit.contains_replacement_characters)
|
||||||
|
|
||||||
|
def feed(self, markup):
|
||||||
|
"""Run some incoming markup through some parsing process,
|
||||||
|
populating the `BeautifulSoup` object in self.soup.
|
||||||
|
"""
|
||||||
|
args, kwargs = self.parser_args
|
||||||
|
parser = BeautifulSoupHTMLParser(*args, **kwargs)
|
||||||
|
parser.soup = self.soup
|
||||||
|
try:
|
||||||
|
parser.feed(markup)
|
||||||
|
parser.close()
|
||||||
|
except HTMLParseError as e:
|
||||||
|
warnings.warn(RuntimeWarning(
|
||||||
|
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
|
||||||
|
raise e
|
||||||
|
parser.already_closed_empty_element = []
|
||||||
|
|
||||||
|
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
|
||||||
|
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
|
||||||
|
# string.
|
||||||
|
#
|
||||||
|
# XXX This code can be removed once most Python 3 users are on 3.2.3.
|
||||||
|
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
|
||||||
|
import re
|
||||||
|
attrfind_tolerant = re.compile(
|
||||||
|
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
|
||||||
|
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
|
||||||
|
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
|
||||||
|
|
||||||
|
locatestarttagend = re.compile(r"""
|
||||||
|
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
|
||||||
|
(?:\s+ # whitespace before attribute name
|
||||||
|
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
|
||||||
|
(?:\s*=\s* # value indicator
|
||||||
|
(?:'[^']*' # LITA-enclosed value
|
||||||
|
|\"[^\"]*\" # LIT-enclosed value
|
||||||
|
|[^'\">\s]+ # bare value
|
||||||
|
)
|
||||||
|
)?
|
||||||
|
)
|
||||||
|
)*
|
||||||
|
\s* # trailing whitespace
|
||||||
|
""", re.VERBOSE)
|
||||||
|
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
|
||||||
|
|
||||||
|
from html.parser import tagfind, attrfind
|
||||||
|
|
||||||
|
def parse_starttag(self, i):
|
||||||
|
self.__starttag_text = None
|
||||||
|
endpos = self.check_for_whole_start_tag(i)
|
||||||
|
if endpos < 0:
|
||||||
|
return endpos
|
||||||
|
rawdata = self.rawdata
|
||||||
|
self.__starttag_text = rawdata[i:endpos]
|
||||||
|
|
||||||
|
# Now parse the data between i+1 and j into a tag and attrs
|
||||||
|
attrs = []
|
||||||
|
match = tagfind.match(rawdata, i+1)
|
||||||
|
assert match, 'unexpected call to parse_starttag()'
|
||||||
|
k = match.end()
|
||||||
|
self.lasttag = tag = rawdata[i+1:k].lower()
|
||||||
|
while k < endpos:
|
||||||
|
if self.strict:
|
||||||
|
m = attrfind.match(rawdata, k)
|
||||||
|
else:
|
||||||
|
m = attrfind_tolerant.match(rawdata, k)
|
||||||
|
if not m:
|
||||||
|
break
|
||||||
|
attrname, rest, attrvalue = m.group(1, 2, 3)
|
||||||
|
if not rest:
|
||||||
|
attrvalue = None
|
||||||
|
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
|
||||||
|
attrvalue[:1] == '"' == attrvalue[-1:]:
|
||||||
|
attrvalue = attrvalue[1:-1]
|
||||||
|
if attrvalue:
|
||||||
|
attrvalue = self.unescape(attrvalue)
|
||||||
|
attrs.append((attrname.lower(), attrvalue))
|
||||||
|
k = m.end()
|
||||||
|
|
||||||
|
end = rawdata[k:endpos].strip()
|
||||||
|
if end not in (">", "/>"):
|
||||||
|
lineno, offset = self.getpos()
|
||||||
|
if "\n" in self.__starttag_text:
|
||||||
|
lineno = lineno + self.__starttag_text.count("\n")
|
||||||
|
offset = len(self.__starttag_text) \
|
||||||
|
- self.__starttag_text.rfind("\n")
|
||||||
|
else:
|
||||||
|
offset = offset + len(self.__starttag_text)
|
||||||
|
if self.strict:
|
||||||
|
self.error("junk characters in start tag: %r"
|
||||||
|
% (rawdata[k:endpos][:20],))
|
||||||
|
self.handle_data(rawdata[i:endpos])
|
||||||
|
return endpos
|
||||||
|
if end.endswith('/>'):
|
||||||
|
# XHTML-style empty tag: <span attr="value" />
|
||||||
|
self.handle_startendtag(tag, attrs)
|
||||||
|
else:
|
||||||
|
self.handle_starttag(tag, attrs)
|
||||||
|
if tag in self.CDATA_CONTENT_ELEMENTS:
|
||||||
|
self.set_cdata_mode(tag)
|
||||||
|
return endpos
|
||||||
|
|
||||||
|
def set_cdata_mode(self, elem):
|
||||||
|
self.cdata_elem = elem.lower()
|
||||||
|
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
|
||||||
|
|
||||||
|
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
|
||||||
|
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
|
||||||
|
|
||||||
|
CONSTRUCTOR_TAKES_STRICT = True
|
|
@ -0,0 +1,386 @@
|
||||||
|
# Use of this source code is governed by the MIT license.
|
||||||
|
__license__ = "MIT"
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'LXMLTreeBuilderForXML',
|
||||||
|
'LXMLTreeBuilder',
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
from collections.abc import Callable # Python 3.6
|
||||||
|
except ImportError as e:
|
||||||
|
from collections import Callable
|
||||||
|
|
||||||
|
from io import BytesIO
|
||||||
|
from io import StringIO
|
||||||
|
from lxml import etree
|
||||||
|
from bs4.element import (
|
||||||
|
Comment,
|
||||||
|
Doctype,
|
||||||
|
NamespacedAttribute,
|
||||||
|
ProcessingInstruction,
|
||||||
|
XMLProcessingInstruction,
|
||||||
|
)
|
||||||
|
from bs4.builder import (
|
||||||
|
DetectsXMLParsedAsHTML,
|
||||||
|
FAST,
|
||||||
|
HTML,
|
||||||
|
HTMLTreeBuilder,
|
||||||
|
PERMISSIVE,
|
||||||
|
ParserRejectedMarkup,
|
||||||
|
TreeBuilder,
|
||||||
|
XML)
|
||||||
|
from bs4.dammit import EncodingDetector
|
||||||
|
|
||||||
|
LXML = 'lxml'
|
||||||
|
|
||||||
|
def _invert(d):
|
||||||
|
"Invert a dictionary."
|
||||||
|
return dict((v,k) for k, v in list(d.items()))
|
||||||
|
|
||||||
|
class LXMLTreeBuilderForXML(TreeBuilder):
|
||||||
|
DEFAULT_PARSER_CLASS = etree.XMLParser
|
||||||
|
|
||||||
|
is_xml = True
|
||||||
|
processing_instruction_class = XMLProcessingInstruction
|
||||||
|
|
||||||
|
NAME = "lxml-xml"
|
||||||
|
ALTERNATE_NAMES = ["xml"]
|
||||||
|
|
||||||
|
# Well, it's permissive by XML parser standards.
|
||||||
|
features = [NAME, LXML, XML, FAST, PERMISSIVE]
|
||||||
|
|
||||||
|
CHUNK_SIZE = 512
|
||||||
|
|
||||||
|
# This namespace mapping is specified in the XML Namespace
|
||||||
|
# standard.
|
||||||
|
DEFAULT_NSMAPS = dict(xml='http://www.w3.org/XML/1998/namespace')
|
||||||
|
|
||||||
|
DEFAULT_NSMAPS_INVERTED = _invert(DEFAULT_NSMAPS)
|
||||||
|
|
||||||
|
# NOTE: If we parsed Element objects and looked at .sourceline,
|
||||||
|
# we'd be able to see the line numbers from the original document.
|
||||||
|
# But instead we build an XMLParser or HTMLParser object to serve
|
||||||
|
# as the target of parse messages, and those messages don't include
|
||||||
|
# line numbers.
|
||||||
|
# See: https://bugs.launchpad.net/lxml/+bug/1846906
|
||||||
|
|
||||||
|
def initialize_soup(self, soup):
|
||||||
|
"""Let the BeautifulSoup object know about the standard namespace
|
||||||
|
mapping.
|
||||||
|
|
||||||
|
:param soup: A `BeautifulSoup`.
|
||||||
|
"""
|
||||||
|
super(LXMLTreeBuilderForXML, self).initialize_soup(soup)
|
||||||
|
self._register_namespaces(self.DEFAULT_NSMAPS)
|
||||||
|
|
||||||
|
def _register_namespaces(self, mapping):
|
||||||
|
"""Let the BeautifulSoup object know about namespaces encountered
|
||||||
|
while parsing the document.
|
||||||
|
|
||||||
|
This might be useful later on when creating CSS selectors.
|
||||||
|
|
||||||
|
This will track (almost) all namespaces, even ones that were
|
||||||
|
only in scope for part of the document. If two namespaces have
|
||||||
|
the same prefix, only the first one encountered will be
|
||||||
|
tracked. Un-prefixed namespaces are not tracked.
|
||||||
|
|
||||||
|
:param mapping: A dictionary mapping namespace prefixes to URIs.
|
||||||
|
"""
|
||||||
|
for key, value in list(mapping.items()):
|
||||||
|
# This is 'if key' and not 'if key is not None' because we
|
||||||
|
# don't track un-prefixed namespaces. Soupselect will
|
||||||
|
# treat an un-prefixed namespace as the default, which
|
||||||
|
# causes confusion in some cases.
|
||||||
|
if key and key not in self.soup._namespaces:
|
||||||
|
# Let the BeautifulSoup object know about a new namespace.
|
||||||
|
# If there are multiple namespaces defined with the same
|
||||||
|
# prefix, the first one in the document takes precedence.
|
||||||
|
self.soup._namespaces[key] = value
|
||||||
|
|
||||||
|
def default_parser(self, encoding):
|
||||||
|
"""Find the default parser for the given encoding.
|
||||||
|
|
||||||
|
:param encoding: A string.
|
||||||
|
:return: Either a parser object or a class, which
|
||||||
|
will be instantiated with default arguments.
|
||||||
|
"""
|
||||||
|
if self._default_parser is not None:
|
||||||
|
return self._default_parser
|
||||||
|
return etree.XMLParser(
|
||||||
|
target=self, strip_cdata=False, recover=True, encoding=encoding)
|
||||||
|
|
||||||
|
def parser_for(self, encoding):
|
||||||
|
"""Instantiate an appropriate parser for the given encoding.
|
||||||
|
|
||||||
|
:param encoding: A string.
|
||||||
|
:return: A parser object such as an `etree.XMLParser`.
|
||||||
|
"""
|
||||||
|
# Use the default parser.
|
||||||
|
parser = self.default_parser(encoding)
|
||||||
|
|
||||||
|
if isinstance(parser, Callable):
|
||||||
|
# Instantiate the parser with default arguments
|
||||||
|
parser = parser(
|
||||||
|
target=self, strip_cdata=False, recover=True, encoding=encoding
|
||||||
|
)
|
||||||
|
return parser
|
||||||
|
|
||||||
|
def __init__(self, parser=None, empty_element_tags=None, **kwargs):
|
||||||
|
# TODO: Issue a warning if parser is present but not a
|
||||||
|
# callable, since that means there's no way to create new
|
||||||
|
# parsers for different encodings.
|
||||||
|
self._default_parser = parser
|
||||||
|
if empty_element_tags is not None:
|
||||||
|
self.empty_element_tags = set(empty_element_tags)
|
||||||
|
self.soup = None
|
||||||
|
self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
|
||||||
|
self.active_namespace_prefixes = [dict(self.DEFAULT_NSMAPS)]
|
||||||
|
super(LXMLTreeBuilderForXML, self).__init__(**kwargs)
|
||||||
|
|
||||||
|
def _getNsTag(self, tag):
|
||||||
|
# Split the namespace URL out of a fully-qualified lxml tag
|
||||||
|
# name. Copied from lxml's src/lxml/sax.py.
|
||||||
|
if tag[0] == '{':
|
||||||
|
return tuple(tag[1:].split('}', 1))
|
||||||
|
else:
|
||||||
|
return (None, tag)
|
||||||
|
|
||||||
|
def prepare_markup(self, markup, user_specified_encoding=None,
|
||||||
|
exclude_encodings=None,
|
||||||
|
document_declared_encoding=None):
|
||||||
|
"""Run any preliminary steps necessary to make incoming markup
|
||||||
|
acceptable to the parser.
|
||||||
|
|
||||||
|
lxml really wants to get a bytestring and convert it to
|
||||||
|
Unicode itself. So instead of using UnicodeDammit to convert
|
||||||
|
the bytestring to Unicode using different encodings, this
|
||||||
|
implementation uses EncodingDetector to iterate over the
|
||||||
|
encodings, and tell lxml to try to parse the document as each
|
||||||
|
one in turn.
|
||||||
|
|
||||||
|
:param markup: Some markup -- hopefully a bytestring.
|
||||||
|
:param user_specified_encoding: The user asked to try this encoding.
|
||||||
|
:param document_declared_encoding: The markup itself claims to be
|
||||||
|
in this encoding.
|
||||||
|
:param exclude_encodings: The user asked _not_ to try any of
|
||||||
|
these encodings.
|
||||||
|
|
||||||
|
:yield: A series of 4-tuples:
|
||||||
|
(markup, encoding, declared encoding,
|
||||||
|
has undergone character replacement)
|
||||||
|
|
||||||
|
Each 4-tuple represents a strategy for converting the
|
||||||
|
document to Unicode and parsing it. Each strategy will be tried
|
||||||
|
in turn.
|
||||||
|
"""
|
||||||
|
is_html = not self.is_xml
|
||||||
|
if is_html:
|
||||||
|
self.processing_instruction_class = ProcessingInstruction
|
||||||
|
# We're in HTML mode, so if we're given XML, that's worth
|
||||||
|
# noting.
|
||||||
|
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup)
|
||||||
|
else:
|
||||||
|
self.processing_instruction_class = XMLProcessingInstruction
|
||||||
|
|
||||||
|
if isinstance(markup, str):
|
||||||
|
# We were given Unicode. Maybe lxml can parse Unicode on
|
||||||
|
# this system?
|
||||||
|
|
||||||
|
# TODO: This is a workaround for
|
||||||
|
# https://bugs.launchpad.net/lxml/+bug/1948551.
|
||||||
|
# We can remove it once the upstream issue is fixed.
|
||||||
|
if len(markup) > 0 and markup[0] == u'\N{BYTE ORDER MARK}':
|
||||||
|
markup = markup[1:]
|
||||||
|
yield markup, None, document_declared_encoding, False
|
||||||
|
|
||||||
|
if isinstance(markup, str):
|
||||||
|
# No, apparently not. Convert the Unicode to UTF-8 and
|
||||||
|
# tell lxml to parse it as UTF-8.
|
||||||
|
yield (markup.encode("utf8"), "utf8",
|
||||||
|
document_declared_encoding, False)
|
||||||
|
|
||||||
|
# This was provided by the end-user; treat it as a known
|
||||||
|
# definite encoding per the algorithm laid out in the HTML5
|
||||||
|
# spec. (See the EncodingDetector class for details.)
|
||||||
|
known_definite_encodings = [user_specified_encoding]
|
||||||
|
|
||||||
|
# This was found in the document; treat it as a slightly lower-priority
|
||||||
|
# user encoding.
|
||||||
|
user_encodings = [document_declared_encoding]
|
||||||
|
detector = EncodingDetector(
|
||||||
|
markup, known_definite_encodings=known_definite_encodings,
|
||||||
|
user_encodings=user_encodings, is_html=is_html,
|
||||||
|
exclude_encodings=exclude_encodings
|
||||||
|
)
|
||||||
|
for encoding in detector.encodings:
|
||||||
|
yield (detector.markup, encoding, document_declared_encoding, False)
|
||||||
|
|
||||||
|
def feed(self, markup):
|
||||||
|
if isinstance(markup, bytes):
|
||||||
|
markup = BytesIO(markup)
|
||||||
|
elif isinstance(markup, str):
|
||||||
|
markup = StringIO(markup)
|
||||||
|
|
||||||
|
# Call feed() at least once, even if the markup is empty,
|
||||||
|
# or the parser won't be initialized.
|
||||||
|
data = markup.read(self.CHUNK_SIZE)
|
||||||
|
try:
|
||||||
|
self.parser = self.parser_for(self.soup.original_encoding)
|
||||||
|
self.parser.feed(data)
|
||||||
|
while len(data) != 0:
|
||||||
|
# Now call feed() on the rest of the data, chunk by chunk.
|
||||||
|
data = markup.read(self.CHUNK_SIZE)
|
||||||
|
if len(data) != 0:
|
||||||
|
self.parser.feed(data)
|
||||||
|
self.parser.close()
|
||||||
|
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
|
||||||
|
raise ParserRejectedMarkup(e)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
|
||||||
|
|
||||||
|
def start(self, name, attrs, nsmap={}):
|
||||||
|
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
|
||||||
|
attrs = dict(attrs)
|
||||||
|
nsprefix = None
|
||||||
|
# Invert each namespace map as it comes in.
|
||||||
|
if len(nsmap) == 0 and len(self.nsmaps) > 1:
|
||||||
|
# There are no new namespaces for this tag, but
|
||||||
|
# non-default namespaces are in play, so we need a
|
||||||
|
# separate tag stack to know when they end.
|
||||||
|
self.nsmaps.append(None)
|
||||||
|
elif len(nsmap) > 0:
|
||||||
|
# A new namespace mapping has come into play.
|
||||||
|
|
||||||
|
# First, Let the BeautifulSoup object know about it.
|
||||||
|
self._register_namespaces(nsmap)
|
||||||
|
|
||||||
|
# Then, add it to our running list of inverted namespace
|
||||||
|
# mappings.
|
||||||
|
self.nsmaps.append(_invert(nsmap))
|
||||||
|
|
||||||
|
# The currently active namespace prefixes have
|
||||||
|
# changed. Calculate the new mapping so it can be stored
|
||||||
|
# with all Tag objects created while these prefixes are in
|
||||||
|
# scope.
|
||||||
|
current_mapping = dict(self.active_namespace_prefixes[-1])
|
||||||
|
current_mapping.update(nsmap)
|
||||||
|
|
||||||
|
# We should not track un-prefixed namespaces as we can only hold one
|
||||||
|
# and it will be recognized as the default namespace by soupsieve,
|
||||||
|
# which may be confusing in some situations.
|
||||||
|
if '' in current_mapping:
|
||||||
|
del current_mapping['']
|
||||||
|
self.active_namespace_prefixes.append(current_mapping)
|
||||||
|
|
||||||
|
# Also treat the namespace mapping as a set of attributes on the
|
||||||
|
# tag, so we can recreate it later.
|
||||||
|
attrs = attrs.copy()
|
||||||
|
for prefix, namespace in list(nsmap.items()):
|
||||||
|
attribute = NamespacedAttribute(
|
||||||
|
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
|
||||||
|
attrs[attribute] = namespace
|
||||||
|
|
||||||
|
# Namespaces are in play. Find any attributes that came in
|
||||||
|
# from lxml with namespaces attached to their names, and
|
||||||
|
# turn then into NamespacedAttribute objects.
|
||||||
|
new_attrs = {}
|
||||||
|
for attr, value in list(attrs.items()):
|
||||||
|
namespace, attr = self._getNsTag(attr)
|
||||||
|
if namespace is None:
|
||||||
|
new_attrs[attr] = value
|
||||||
|
else:
|
||||||
|
nsprefix = self._prefix_for_namespace(namespace)
|
||||||
|
attr = NamespacedAttribute(nsprefix, attr, namespace)
|
||||||
|
new_attrs[attr] = value
|
||||||
|
attrs = new_attrs
|
||||||
|
|
||||||
|
namespace, name = self._getNsTag(name)
|
||||||
|
nsprefix = self._prefix_for_namespace(namespace)
|
||||||
|
self.soup.handle_starttag(
|
||||||
|
name, namespace, nsprefix, attrs,
|
||||||
|
namespaces=self.active_namespace_prefixes[-1]
|
||||||
|
)
|
||||||
|
|
||||||
|
def _prefix_for_namespace(self, namespace):
|
||||||
|
"""Find the currently active prefix for the given namespace."""
|
||||||
|
if namespace is None:
|
||||||
|
return None
|
||||||
|
for inverted_nsmap in reversed(self.nsmaps):
|
||||||
|
if inverted_nsmap is not None and namespace in inverted_nsmap:
|
||||||
|
return inverted_nsmap[namespace]
|
||||||
|
return None
|
||||||
|
|
||||||
|
def end(self, name):
|
||||||
|
self.soup.endData()
|
||||||
|
completed_tag = self.soup.tagStack[-1]
|
||||||
|
namespace, name = self._getNsTag(name)
|
||||||
|
nsprefix = None
|
||||||
|
if namespace is not None:
|
||||||
|
for inverted_nsmap in reversed(self.nsmaps):
|
||||||
|
if inverted_nsmap is not None and namespace in inverted_nsmap:
|
||||||
|
nsprefix = inverted_nsmap[namespace]
|
||||||
|
break
|
||||||
|
self.soup.handle_endtag(name, nsprefix)
|
||||||
|
if len(self.nsmaps) > 1:
|
||||||
|
# This tag, or one of its parents, introduced a namespace
|
||||||
|
# mapping, so pop it off the stack.
|
||||||
|
out_of_scope_nsmap = self.nsmaps.pop()
|
||||||
|
|
||||||
|
if out_of_scope_nsmap is not None:
|
||||||
|
# This tag introduced a namespace mapping which is no
|
||||||
|
# longer in scope. Recalculate the currently active
|
||||||
|
# namespace prefixes.
|
||||||
|
self.active_namespace_prefixes.pop()
|
||||||
|
|
||||||
|
def pi(self, target, data):
|
||||||
|
self.soup.endData()
|
||||||
|
data = target + ' ' + data
|
||||||
|
self.soup.handle_data(data)
|
||||||
|
self.soup.endData(self.processing_instruction_class)
|
||||||
|
|
||||||
|
def data(self, content):
|
||||||
|
self.soup.handle_data(content)
|
||||||
|
|
||||||
|
def doctype(self, name, pubid, system):
|
||||||
|
self.soup.endData()
|
||||||
|
doctype = Doctype.for_name_and_ids(name, pubid, system)
|
||||||
|
self.soup.object_was_parsed(doctype)
|
||||||
|
|
||||||
|
def comment(self, content):
|
||||||
|
"Handle comments as Comment objects."
|
||||||
|
self.soup.endData()
|
||||||
|
self.soup.handle_data(content)
|
||||||
|
self.soup.endData(Comment)
|
||||||
|
|
||||||
|
def test_fragment_to_document(self, fragment):
|
||||||
|
"""See `TreeBuilder`."""
|
||||||
|
return '<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
|
||||||
|
|
||||||
|
|
||||||
|
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
|
||||||
|
|
||||||
|
NAME = LXML
|
||||||
|
ALTERNATE_NAMES = ["lxml-html"]
|
||||||
|
|
||||||
|
features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE]
|
||||||
|
is_xml = False
|
||||||
|
processing_instruction_class = ProcessingInstruction
|
||||||
|
|
||||||
|
def default_parser(self, encoding):
|
||||||
|
return etree.HTMLParser
|
||||||
|
|
||||||
|
def feed(self, markup):
|
||||||
|
encoding = self.soup.original_encoding
|
||||||
|
try:
|
||||||
|
self.parser = self.parser_for(encoding)
|
||||||
|
self.parser.feed(markup)
|
||||||
|
self.parser.close()
|
||||||
|
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
|
||||||
|
raise ParserRejectedMarkup(e)
|
||||||
|
|
||||||
|
|
||||||
|
def test_fragment_to_document(self, fragment):
|
||||||
|
"""See `TreeBuilder`."""
|
||||||
|
return '<html><body>%s</body></html>' % fragment
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,248 @@
|
||||||
|
"""Diagnostic functions, mainly for use when doing tech support."""
|
||||||
|
|
||||||
|
# Use of this source code is governed by the MIT license.
|
||||||
|
__license__ = "MIT"
|
||||||
|
|
||||||
|
import cProfile
|
||||||
|
from io import BytesIO
|
||||||
|
from html.parser import HTMLParser
|
||||||
|
import bs4
|
||||||
|
from bs4 import BeautifulSoup, __version__
|
||||||
|
from bs4.builder import builder_registry
|
||||||
|
|
||||||
|
import os
|
||||||
|
import pstats
|
||||||
|
import random
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
import sys
|
||||||
|
import cProfile
|
||||||
|
|
||||||
|
def diagnose(data):
|
||||||
|
"""Diagnostic suite for isolating common problems.
|
||||||
|
|
||||||
|
:param data: A string containing markup that needs to be explained.
|
||||||
|
:return: None; diagnostics are printed to standard output.
|
||||||
|
"""
|
||||||
|
print(("Diagnostic running on Beautiful Soup %s" % __version__))
|
||||||
|
print(("Python version %s" % sys.version))
|
||||||
|
|
||||||
|
basic_parsers = ["html.parser", "html5lib", "lxml"]
|
||||||
|
for name in basic_parsers:
|
||||||
|
for builder in builder_registry.builders:
|
||||||
|
if name in builder.features:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
basic_parsers.remove(name)
|
||||||
|
print((
|
||||||
|
"I noticed that %s is not installed. Installing it may help." %
|
||||||
|
name))
|
||||||
|
|
||||||
|
if 'lxml' in basic_parsers:
|
||||||
|
basic_parsers.append("lxml-xml")
|
||||||
|
try:
|
||||||
|
from lxml import etree
|
||||||
|
print(("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))))
|
||||||
|
except ImportError as e:
|
||||||
|
print(
|
||||||
|
"lxml is not installed or couldn't be imported.")
|
||||||
|
|
||||||
|
|
||||||
|
if 'html5lib' in basic_parsers:
|
||||||
|
try:
|
||||||
|
import html5lib
|
||||||
|
print(("Found html5lib version %s" % html5lib.__version__))
|
||||||
|
except ImportError as e:
|
||||||
|
print(
|
||||||
|
"html5lib is not installed or couldn't be imported.")
|
||||||
|
|
||||||
|
if hasattr(data, 'read'):
|
||||||
|
data = data.read()
|
||||||
|
elif data.startswith("http:") or data.startswith("https:"):
|
||||||
|
print(('"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data))
|
||||||
|
print("You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup.")
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
if os.path.exists(data):
|
||||||
|
print(('"%s" looks like a filename. Reading data from the file.' % data))
|
||||||
|
with open(data) as fp:
|
||||||
|
data = fp.read()
|
||||||
|
except ValueError:
|
||||||
|
# This can happen on some platforms when the 'filename' is
|
||||||
|
# too long. Assume it's data and not a filename.
|
||||||
|
pass
|
||||||
|
print("")
|
||||||
|
|
||||||
|
for parser in basic_parsers:
|
||||||
|
print(("Trying to parse your markup with %s" % parser))
|
||||||
|
success = False
|
||||||
|
try:
|
||||||
|
soup = BeautifulSoup(data, features=parser)
|
||||||
|
success = True
|
||||||
|
except Exception as e:
|
||||||
|
print(("%s could not parse the markup." % parser))
|
||||||
|
traceback.print_exc()
|
||||||
|
if success:
|
||||||
|
print(("Here's what %s did with the markup:" % parser))
|
||||||
|
print((soup.prettify()))
|
||||||
|
|
||||||
|
print(("-" * 80))
|
||||||
|
|
||||||
|
def lxml_trace(data, html=True, **kwargs):
|
||||||
|
"""Print out the lxml events that occur during parsing.
|
||||||
|
|
||||||
|
This lets you see how lxml parses a document when no Beautiful
|
||||||
|
Soup code is running. You can use this to determine whether
|
||||||
|
an lxml-specific problem is in Beautiful Soup's lxml tree builders
|
||||||
|
or in lxml itself.
|
||||||
|
|
||||||
|
:param data: Some markup.
|
||||||
|
:param html: If True, markup will be parsed with lxml's HTML parser.
|
||||||
|
if False, lxml's XML parser will be used.
|
||||||
|
"""
|
||||||
|
from lxml import etree
|
||||||
|
recover = kwargs.pop('recover', True)
|
||||||
|
if isinstance(data, str):
|
||||||
|
data = data.encode("utf8")
|
||||||
|
reader = BytesIO(data)
|
||||||
|
for event, element in etree.iterparse(
|
||||||
|
reader, html=html, recover=recover, **kwargs
|
||||||
|
):
|
||||||
|
print(("%s, %4s, %s" % (event, element.tag, element.text)))
|
||||||
|
|
||||||
|
class AnnouncingParser(HTMLParser):
|
||||||
|
"""Subclass of HTMLParser that announces parse events, without doing
|
||||||
|
anything else.
|
||||||
|
|
||||||
|
You can use this to get a picture of how html.parser sees a given
|
||||||
|
document. The easiest way to do this is to call `htmlparser_trace`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _p(self, s):
|
||||||
|
print(s)
|
||||||
|
|
||||||
|
def handle_starttag(self, name, attrs):
|
||||||
|
self._p("%s START" % name)
|
||||||
|
|
||||||
|
def handle_endtag(self, name):
|
||||||
|
self._p("%s END" % name)
|
||||||
|
|
||||||
|
def handle_data(self, data):
|
||||||
|
self._p("%s DATA" % data)
|
||||||
|
|
||||||
|
def handle_charref(self, name):
|
||||||
|
self._p("%s CHARREF" % name)
|
||||||
|
|
||||||
|
def handle_entityref(self, name):
|
||||||
|
self._p("%s ENTITYREF" % name)
|
||||||
|
|
||||||
|
def handle_comment(self, data):
|
||||||
|
self._p("%s COMMENT" % data)
|
||||||
|
|
||||||
|
def handle_decl(self, data):
|
||||||
|
self._p("%s DECL" % data)
|
||||||
|
|
||||||
|
def unknown_decl(self, data):
|
||||||
|
self._p("%s UNKNOWN-DECL" % data)
|
||||||
|
|
||||||
|
def handle_pi(self, data):
|
||||||
|
self._p("%s PI" % data)
|
||||||
|
|
||||||
|
def htmlparser_trace(data):
|
||||||
|
"""Print out the HTMLParser events that occur during parsing.
|
||||||
|
|
||||||
|
This lets you see how HTMLParser parses a document when no
|
||||||
|
Beautiful Soup code is running.
|
||||||
|
|
||||||
|
:param data: Some markup.
|
||||||
|
"""
|
||||||
|
parser = AnnouncingParser()
|
||||||
|
parser.feed(data)
|
||||||
|
|
||||||
|
_vowels = "aeiou"
|
||||||
|
_consonants = "bcdfghjklmnpqrstvwxyz"
|
||||||
|
|
||||||
|
def rword(length=5):
|
||||||
|
"Generate a random word-like string."
|
||||||
|
s = ''
|
||||||
|
for i in range(length):
|
||||||
|
if i % 2 == 0:
|
||||||
|
t = _consonants
|
||||||
|
else:
|
||||||
|
t = _vowels
|
||||||
|
s += random.choice(t)
|
||||||
|
return s
|
||||||
|
|
||||||
|
def rsentence(length=4):
|
||||||
|
"Generate a random sentence-like string."
|
||||||
|
return " ".join(rword(random.randint(4,9)) for i in range(length))
|
||||||
|
|
||||||
|
def rdoc(num_elements=1000):
|
||||||
|
"""Randomly generate an invalid HTML document."""
|
||||||
|
tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table']
|
||||||
|
elements = []
|
||||||
|
for i in range(num_elements):
|
||||||
|
choice = random.randint(0,3)
|
||||||
|
if choice == 0:
|
||||||
|
# New tag.
|
||||||
|
tag_name = random.choice(tag_names)
|
||||||
|
elements.append("<%s>" % tag_name)
|
||||||
|
elif choice == 1:
|
||||||
|
elements.append(rsentence(random.randint(1,4)))
|
||||||
|
elif choice == 2:
|
||||||
|
# Close a tag.
|
||||||
|
tag_name = random.choice(tag_names)
|
||||||
|
elements.append("</%s>" % tag_name)
|
||||||
|
return "<html>" + "\n".join(elements) + "</html>"
|
||||||
|
|
||||||
|
def benchmark_parsers(num_elements=100000):
|
||||||
|
"""Very basic head-to-head performance benchmark."""
|
||||||
|
print(("Comparative parser benchmark on Beautiful Soup %s" % __version__))
|
||||||
|
data = rdoc(num_elements)
|
||||||
|
print(("Generated a large invalid HTML document (%d bytes)." % len(data)))
|
||||||
|
|
||||||
|
for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
|
||||||
|
success = False
|
||||||
|
try:
|
||||||
|
a = time.time()
|
||||||
|
soup = BeautifulSoup(data, parser)
|
||||||
|
b = time.time()
|
||||||
|
success = True
|
||||||
|
except Exception as e:
|
||||||
|
print(("%s could not parse the markup." % parser))
|
||||||
|
traceback.print_exc()
|
||||||
|
if success:
|
||||||
|
print(("BS4+%s parsed the markup in %.2fs." % (parser, b-a)))
|
||||||
|
|
||||||
|
from lxml import etree
|
||||||
|
a = time.time()
|
||||||
|
etree.HTML(data)
|
||||||
|
b = time.time()
|
||||||
|
print(("Raw lxml parsed the markup in %.2fs." % (b-a)))
|
||||||
|
|
||||||
|
import html5lib
|
||||||
|
parser = html5lib.HTMLParser()
|
||||||
|
a = time.time()
|
||||||
|
parser.parse(data)
|
||||||
|
b = time.time()
|
||||||
|
print(("Raw html5lib parsed the markup in %.2fs." % (b-a)))
|
||||||
|
|
||||||
|
def profile(num_elements=100000, parser="lxml"):
|
||||||
|
"""Use Python's profiler on a randomly generated document."""
|
||||||
|
filehandle = tempfile.NamedTemporaryFile()
|
||||||
|
filename = filehandle.name
|
||||||
|
|
||||||
|
data = rdoc(num_elements)
|
||||||
|
vars = dict(bs4=bs4, data=data, parser=parser)
|
||||||
|
cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename)
|
||||||
|
|
||||||
|
stats = pstats.Stats(filename)
|
||||||
|
# stats.strip_dirs()
|
||||||
|
stats.sort_stats("cumulative")
|
||||||
|
stats.print_stats('_html5lib|bs4', 50)
|
||||||
|
|
||||||
|
# If this file is run as a script, standard input is diagnosed.
|
||||||
|
if __name__ == '__main__':
|
||||||
|
diagnose(sys.stdin.read())
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,185 @@
|
||||||
|
from bs4.dammit import EntitySubstitution
|
||||||
|
|
||||||
|
class Formatter(EntitySubstitution):
|
||||||
|
"""Describes a strategy to use when outputting a parse tree to a string.
|
||||||
|
|
||||||
|
Some parts of this strategy come from the distinction between
|
||||||
|
HTML4, HTML5, and XML. Others are configurable by the user.
|
||||||
|
|
||||||
|
Formatters are passed in as the `formatter` argument to methods
|
||||||
|
like `PageElement.encode`. Most people won't need to think about
|
||||||
|
formatters, and most people who need to think about them can pass
|
||||||
|
in one of these predefined strings as `formatter` rather than
|
||||||
|
making a new Formatter object:
|
||||||
|
|
||||||
|
For HTML documents:
|
||||||
|
* 'html' - HTML entity substitution for generic HTML documents. (default)
|
||||||
|
* 'html5' - HTML entity substitution for HTML5 documents, as
|
||||||
|
well as some optimizations in the way tags are rendered.
|
||||||
|
* 'minimal' - Only make the substitutions necessary to guarantee
|
||||||
|
valid HTML.
|
||||||
|
* None - Do not perform any substitution. This will be faster
|
||||||
|
but may result in invalid markup.
|
||||||
|
|
||||||
|
For XML documents:
|
||||||
|
* 'html' - Entity substitution for XHTML documents.
|
||||||
|
* 'minimal' - Only make the substitutions necessary to guarantee
|
||||||
|
valid XML. (default)
|
||||||
|
* None - Do not perform any substitution. This will be faster
|
||||||
|
but may result in invalid markup.
|
||||||
|
"""
|
||||||
|
# Registries of XML and HTML formatters.
|
||||||
|
XML_FORMATTERS = {}
|
||||||
|
HTML_FORMATTERS = {}
|
||||||
|
|
||||||
|
HTML = 'html'
|
||||||
|
XML = 'xml'
|
||||||
|
|
||||||
|
HTML_DEFAULTS = dict(
|
||||||
|
cdata_containing_tags=set(["script", "style"]),
|
||||||
|
)
|
||||||
|
|
||||||
|
def _default(self, language, value, kwarg):
|
||||||
|
if value is not None:
|
||||||
|
return value
|
||||||
|
if language == self.XML:
|
||||||
|
return set()
|
||||||
|
return self.HTML_DEFAULTS[kwarg]
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, language=None, entity_substitution=None,
|
||||||
|
void_element_close_prefix='/', cdata_containing_tags=None,
|
||||||
|
empty_attributes_are_booleans=False, indent=1,
|
||||||
|
):
|
||||||
|
"""Constructor.
|
||||||
|
|
||||||
|
:param language: This should be Formatter.XML if you are formatting
|
||||||
|
XML markup and Formatter.HTML if you are formatting HTML markup.
|
||||||
|
|
||||||
|
:param entity_substitution: A function to call to replace special
|
||||||
|
characters with XML/HTML entities. For examples, see
|
||||||
|
bs4.dammit.EntitySubstitution.substitute_html and substitute_xml.
|
||||||
|
:param void_element_close_prefix: By default, void elements
|
||||||
|
are represented as <tag/> (XML rules) rather than <tag>
|
||||||
|
(HTML rules). To get <tag>, pass in the empty string.
|
||||||
|
:param cdata_containing_tags: The list of tags that are defined
|
||||||
|
as containing CDATA in this dialect. For example, in HTML,
|
||||||
|
<script> and <style> tags are defined as containing CDATA,
|
||||||
|
and their contents should not be formatted.
|
||||||
|
:param blank_attributes_are_booleans: Render attributes whose value
|
||||||
|
is the empty string as HTML-style boolean attributes.
|
||||||
|
(Attributes whose value is None are always rendered this way.)
|
||||||
|
|
||||||
|
:param indent: If indent is a non-negative integer or string,
|
||||||
|
then the contents of elements will be indented
|
||||||
|
appropriately when pretty-printing. An indent level of 0,
|
||||||
|
negative, or "" will only insert newlines. Using a
|
||||||
|
positive integer indent indents that many spaces per
|
||||||
|
level. If indent is a string (such as "\t"), that string
|
||||||
|
is used to indent each level. The default behavior to
|
||||||
|
indent one space per level.
|
||||||
|
"""
|
||||||
|
self.language = language
|
||||||
|
self.entity_substitution = entity_substitution
|
||||||
|
self.void_element_close_prefix = void_element_close_prefix
|
||||||
|
self.cdata_containing_tags = self._default(
|
||||||
|
language, cdata_containing_tags, 'cdata_containing_tags'
|
||||||
|
)
|
||||||
|
self.empty_attributes_are_booleans=empty_attributes_are_booleans
|
||||||
|
if indent is None:
|
||||||
|
indent = 0
|
||||||
|
if isinstance(indent, int):
|
||||||
|
if indent < 0:
|
||||||
|
indent = 0
|
||||||
|
indent = ' ' * indent
|
||||||
|
elif isinstance(indent, str):
|
||||||
|
indent = indent
|
||||||
|
else:
|
||||||
|
indent = ' '
|
||||||
|
self.indent = indent
|
||||||
|
|
||||||
|
def substitute(self, ns):
|
||||||
|
"""Process a string that needs to undergo entity substitution.
|
||||||
|
This may be a string encountered in an attribute value or as
|
||||||
|
text.
|
||||||
|
|
||||||
|
:param ns: A string.
|
||||||
|
:return: A string with certain characters replaced by named
|
||||||
|
or numeric entities.
|
||||||
|
"""
|
||||||
|
if not self.entity_substitution:
|
||||||
|
return ns
|
||||||
|
from .element import NavigableString
|
||||||
|
if (isinstance(ns, NavigableString)
|
||||||
|
and ns.parent is not None
|
||||||
|
and ns.parent.name in self.cdata_containing_tags):
|
||||||
|
# Do nothing.
|
||||||
|
return ns
|
||||||
|
# Substitute.
|
||||||
|
return self.entity_substitution(ns)
|
||||||
|
|
||||||
|
def attribute_value(self, value):
|
||||||
|
"""Process the value of an attribute.
|
||||||
|
|
||||||
|
:param ns: A string.
|
||||||
|
:return: A string with certain characters replaced by named
|
||||||
|
or numeric entities.
|
||||||
|
"""
|
||||||
|
return self.substitute(value)
|
||||||
|
|
||||||
|
def attributes(self, tag):
|
||||||
|
"""Reorder a tag's attributes however you want.
|
||||||
|
|
||||||
|
By default, attributes are sorted alphabetically. This makes
|
||||||
|
behavior consistent between Python 2 and Python 3, and preserves
|
||||||
|
backwards compatibility with older versions of Beautiful Soup.
|
||||||
|
|
||||||
|
If `empty_boolean_attributes` is True, then attributes whose
|
||||||
|
values are set to the empty string will be treated as boolean
|
||||||
|
attributes.
|
||||||
|
"""
|
||||||
|
if tag.attrs is None:
|
||||||
|
return []
|
||||||
|
return sorted(
|
||||||
|
(k, (None if self.empty_attributes_are_booleans and v == '' else v))
|
||||||
|
for k, v in list(tag.attrs.items())
|
||||||
|
)
|
||||||
|
|
||||||
|
class HTMLFormatter(Formatter):
|
||||||
|
"""A generic Formatter for HTML."""
|
||||||
|
REGISTRY = {}
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
return super(HTMLFormatter, self).__init__(self.HTML, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class XMLFormatter(Formatter):
|
||||||
|
"""A generic Formatter for XML."""
|
||||||
|
REGISTRY = {}
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
return super(XMLFormatter, self).__init__(self.XML, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
# Set up aliases for the default formatters.
|
||||||
|
HTMLFormatter.REGISTRY['html'] = HTMLFormatter(
|
||||||
|
entity_substitution=EntitySubstitution.substitute_html
|
||||||
|
)
|
||||||
|
HTMLFormatter.REGISTRY["html5"] = HTMLFormatter(
|
||||||
|
entity_substitution=EntitySubstitution.substitute_html,
|
||||||
|
void_element_close_prefix=None,
|
||||||
|
empty_attributes_are_booleans=True,
|
||||||
|
)
|
||||||
|
HTMLFormatter.REGISTRY["minimal"] = HTMLFormatter(
|
||||||
|
entity_substitution=EntitySubstitution.substitute_xml
|
||||||
|
)
|
||||||
|
HTMLFormatter.REGISTRY[None] = HTMLFormatter(
|
||||||
|
entity_substitution=None
|
||||||
|
)
|
||||||
|
XMLFormatter.REGISTRY["html"] = XMLFormatter(
|
||||||
|
entity_substitution=EntitySubstitution.substitute_html
|
||||||
|
)
|
||||||
|
XMLFormatter.REGISTRY["minimal"] = XMLFormatter(
|
||||||
|
entity_substitution=EntitySubstitution.substitute_xml
|
||||||
|
)
|
||||||
|
XMLFormatter.REGISTRY[None] = Formatter(
|
||||||
|
Formatter(Formatter.XML, entity_substitution=None)
|
||||||
|
)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,29 @@
|
||||||
|
import pytest
|
||||||
|
from unittest.mock import patch
|
||||||
|
from bs4.builder import DetectsXMLParsedAsHTML
|
||||||
|
|
||||||
|
class TestDetectsXMLParsedAsHTML(object):
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"markup,looks_like_xml",
|
||||||
|
[("No xml declaration", False),
|
||||||
|
("<html>obviously HTML</html", False),
|
||||||
|
("<?xml ><html>Actually XHTML</html>", False),
|
||||||
|
("<?xml> < html>Tricky XHTML</html>", False),
|
||||||
|
("<?xml ><no-html-tag>", True),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_warn_if_markup_looks_like_xml(self, markup, looks_like_xml):
|
||||||
|
# Test of our ability to guess at whether markup looks XML-ish
|
||||||
|
# _and_ not HTML-ish.
|
||||||
|
with patch('bs4.builder.DetectsXMLParsedAsHTML._warn') as mock:
|
||||||
|
for data in markup, markup.encode('utf8'):
|
||||||
|
result = DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(
|
||||||
|
data
|
||||||
|
)
|
||||||
|
assert result == looks_like_xml
|
||||||
|
if looks_like_xml:
|
||||||
|
assert mock.called
|
||||||
|
else:
|
||||||
|
assert not mock.called
|
||||||
|
mock.reset_mock()
|
|
@ -0,0 +1,136 @@
|
||||||
|
"""Tests of the builder registry."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
from bs4.builder import (
|
||||||
|
builder_registry as registry,
|
||||||
|
HTMLParserTreeBuilder,
|
||||||
|
TreeBuilderRegistry,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
from bs4.builder import HTML5TreeBuilder
|
||||||
|
HTML5LIB_PRESENT = True
|
||||||
|
except ImportError:
|
||||||
|
HTML5LIB_PRESENT = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
from bs4.builder import (
|
||||||
|
LXMLTreeBuilderForXML,
|
||||||
|
LXMLTreeBuilder,
|
||||||
|
)
|
||||||
|
LXML_PRESENT = True
|
||||||
|
except ImportError:
|
||||||
|
LXML_PRESENT = False
|
||||||
|
|
||||||
|
|
||||||
|
class TestBuiltInRegistry(object):
|
||||||
|
"""Test the built-in registry with the default builders registered."""
|
||||||
|
|
||||||
|
def test_combination(self):
|
||||||
|
assert registry.lookup('strict', 'html') == HTMLParserTreeBuilder
|
||||||
|
if LXML_PRESENT:
|
||||||
|
assert registry.lookup('fast', 'html') == LXMLTreeBuilder
|
||||||
|
assert registry.lookup('permissive', 'xml') == LXMLTreeBuilderForXML
|
||||||
|
if HTML5LIB_PRESENT:
|
||||||
|
assert registry.lookup('html5lib', 'html') == HTML5TreeBuilder
|
||||||
|
|
||||||
|
def test_lookup_by_markup_type(self):
|
||||||
|
if LXML_PRESENT:
|
||||||
|
assert registry.lookup('html') == LXMLTreeBuilder
|
||||||
|
assert registry.lookup('xml') == LXMLTreeBuilderForXML
|
||||||
|
else:
|
||||||
|
assert registry.lookup('xml') == None
|
||||||
|
if HTML5LIB_PRESENT:
|
||||||
|
assert registry.lookup('html') == HTML5TreeBuilder
|
||||||
|
else:
|
||||||
|
assert registry.lookup('html') == HTMLParserTreeBuilder
|
||||||
|
|
||||||
|
def test_named_library(self):
|
||||||
|
if LXML_PRESENT:
|
||||||
|
assert registry.lookup('lxml', 'xml') == LXMLTreeBuilderForXML
|
||||||
|
assert registry.lookup('lxml', 'html') == LXMLTreeBuilder
|
||||||
|
if HTML5LIB_PRESENT:
|
||||||
|
assert registry.lookup('html5lib') == HTML5TreeBuilder
|
||||||
|
|
||||||
|
assert registry.lookup('html.parser') == HTMLParserTreeBuilder
|
||||||
|
|
||||||
|
def test_beautifulsoup_constructor_does_lookup(self):
|
||||||
|
|
||||||
|
with warnings.catch_warnings(record=True) as w:
|
||||||
|
# This will create a warning about not explicitly
|
||||||
|
# specifying a parser, but we'll ignore it.
|
||||||
|
|
||||||
|
# You can pass in a string.
|
||||||
|
BeautifulSoup("", features="html")
|
||||||
|
# Or a list of strings.
|
||||||
|
BeautifulSoup("", features=["html", "fast"])
|
||||||
|
pass
|
||||||
|
|
||||||
|
# You'll get an exception if BS can't find an appropriate
|
||||||
|
# builder.
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
BeautifulSoup("", features="no-such-feature")
|
||||||
|
|
||||||
|
class TestRegistry(object):
|
||||||
|
"""Test the TreeBuilderRegistry class in general."""
|
||||||
|
|
||||||
|
def setup_method(self):
|
||||||
|
self.registry = TreeBuilderRegistry()
|
||||||
|
|
||||||
|
def builder_for_features(self, *feature_list):
|
||||||
|
cls = type('Builder_' + '_'.join(feature_list),
|
||||||
|
(object,), {'features' : feature_list})
|
||||||
|
|
||||||
|
self.registry.register(cls)
|
||||||
|
return cls
|
||||||
|
|
||||||
|
def test_register_with_no_features(self):
|
||||||
|
builder = self.builder_for_features()
|
||||||
|
|
||||||
|
# Since the builder advertises no features, you can't find it
|
||||||
|
# by looking up features.
|
||||||
|
assert self.registry.lookup('foo') is None
|
||||||
|
|
||||||
|
# But you can find it by doing a lookup with no features, if
|
||||||
|
# this happens to be the only registered builder.
|
||||||
|
assert self.registry.lookup() == builder
|
||||||
|
|
||||||
|
def test_register_with_features_makes_lookup_succeed(self):
|
||||||
|
builder = self.builder_for_features('foo', 'bar')
|
||||||
|
assert self.registry.lookup('foo') is builder
|
||||||
|
assert self.registry.lookup('bar') is builder
|
||||||
|
|
||||||
|
def test_lookup_fails_when_no_builder_implements_feature(self):
|
||||||
|
builder = self.builder_for_features('foo', 'bar')
|
||||||
|
assert self.registry.lookup('baz') is None
|
||||||
|
|
||||||
|
def test_lookup_gets_most_recent_registration_when_no_feature_specified(self):
|
||||||
|
builder1 = self.builder_for_features('foo')
|
||||||
|
builder2 = self.builder_for_features('bar')
|
||||||
|
assert self.registry.lookup() == builder2
|
||||||
|
|
||||||
|
def test_lookup_fails_when_no_tree_builders_registered(self):
|
||||||
|
assert self.registry.lookup() is None
|
||||||
|
|
||||||
|
def test_lookup_gets_most_recent_builder_supporting_all_features(self):
|
||||||
|
has_one = self.builder_for_features('foo')
|
||||||
|
has_the_other = self.builder_for_features('bar')
|
||||||
|
has_both_early = self.builder_for_features('foo', 'bar', 'baz')
|
||||||
|
has_both_late = self.builder_for_features('foo', 'bar', 'quux')
|
||||||
|
lacks_one = self.builder_for_features('bar')
|
||||||
|
has_the_other = self.builder_for_features('foo')
|
||||||
|
|
||||||
|
# There are two builders featuring 'foo' and 'bar', but
|
||||||
|
# the one that also features 'quux' was registered later.
|
||||||
|
assert self.registry.lookup('foo', 'bar') == has_both_late
|
||||||
|
|
||||||
|
# There is only one builder featuring 'foo', 'bar', and 'baz'.
|
||||||
|
assert self.registry.lookup('foo', 'bar', 'baz') == has_both_early
|
||||||
|
|
||||||
|
def test_lookup_fails_when_cannot_reconcile_requested_features(self):
|
||||||
|
builder1 = self.builder_for_features('foo', 'bar')
|
||||||
|
builder2 = self.builder_for_features('foo', 'baz')
|
||||||
|
assert self.registry.lookup('bar', 'baz') is None
|
|
@ -0,0 +1,371 @@
|
||||||
|
# encoding: utf-8
|
||||||
|
import pytest
|
||||||
|
import logging
|
||||||
|
import bs4
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
from bs4.dammit import (
|
||||||
|
EntitySubstitution,
|
||||||
|
EncodingDetector,
|
||||||
|
UnicodeDammit,
|
||||||
|
)
|
||||||
|
|
||||||
|
class TestUnicodeDammit(object):
|
||||||
|
"""Standalone tests of UnicodeDammit."""
|
||||||
|
|
||||||
|
def test_unicode_input(self):
|
||||||
|
markup = "I'm already Unicode! \N{SNOWMAN}"
|
||||||
|
dammit = UnicodeDammit(markup)
|
||||||
|
assert dammit.unicode_markup == markup
|
||||||
|
|
||||||
|
def test_smart_quotes_to_unicode(self):
|
||||||
|
markup = b"<foo>\x91\x92\x93\x94</foo>"
|
||||||
|
dammit = UnicodeDammit(markup)
|
||||||
|
assert dammit.unicode_markup == "<foo>\u2018\u2019\u201c\u201d</foo>"
|
||||||
|
|
||||||
|
def test_smart_quotes_to_xml_entities(self):
|
||||||
|
markup = b"<foo>\x91\x92\x93\x94</foo>"
|
||||||
|
dammit = UnicodeDammit(markup, smart_quotes_to="xml")
|
||||||
|
assert dammit.unicode_markup == "<foo>‘’“”</foo>"
|
||||||
|
|
||||||
|
def test_smart_quotes_to_html_entities(self):
|
||||||
|
markup = b"<foo>\x91\x92\x93\x94</foo>"
|
||||||
|
dammit = UnicodeDammit(markup, smart_quotes_to="html")
|
||||||
|
assert dammit.unicode_markup == "<foo>‘’“”</foo>"
|
||||||
|
|
||||||
|
def test_smart_quotes_to_ascii(self):
|
||||||
|
markup = b"<foo>\x91\x92\x93\x94</foo>"
|
||||||
|
dammit = UnicodeDammit(markup, smart_quotes_to="ascii")
|
||||||
|
assert dammit.unicode_markup == """<foo>''""</foo>"""
|
||||||
|
|
||||||
|
def test_detect_utf8(self):
|
||||||
|
utf8 = b"Sacr\xc3\xa9 bleu! \xe2\x98\x83"
|
||||||
|
dammit = UnicodeDammit(utf8)
|
||||||
|
assert dammit.original_encoding.lower() == 'utf-8'
|
||||||
|
assert dammit.unicode_markup == 'Sacr\xe9 bleu! \N{SNOWMAN}'
|
||||||
|
|
||||||
|
def test_convert_hebrew(self):
|
||||||
|
hebrew = b"\xed\xe5\xec\xf9"
|
||||||
|
dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
|
||||||
|
assert dammit.original_encoding.lower() == 'iso-8859-8'
|
||||||
|
assert dammit.unicode_markup == '\u05dd\u05d5\u05dc\u05e9'
|
||||||
|
|
||||||
|
def test_dont_see_smart_quotes_where_there_are_none(self):
|
||||||
|
utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
|
||||||
|
dammit = UnicodeDammit(utf_8)
|
||||||
|
assert dammit.original_encoding.lower() == 'utf-8'
|
||||||
|
assert dammit.unicode_markup.encode("utf-8") == utf_8
|
||||||
|
|
||||||
|
def test_ignore_inappropriate_codecs(self):
|
||||||
|
utf8_data = "Räksmörgås".encode("utf-8")
|
||||||
|
dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
|
||||||
|
assert dammit.original_encoding.lower() == 'utf-8'
|
||||||
|
|
||||||
|
def test_ignore_invalid_codecs(self):
|
||||||
|
utf8_data = "Räksmörgås".encode("utf-8")
|
||||||
|
for bad_encoding in ['.utf8', '...', 'utF---16.!']:
|
||||||
|
dammit = UnicodeDammit(utf8_data, [bad_encoding])
|
||||||
|
assert dammit.original_encoding.lower() == 'utf-8'
|
||||||
|
|
||||||
|
def test_exclude_encodings(self):
|
||||||
|
# This is UTF-8.
|
||||||
|
utf8_data = "Räksmörgås".encode("utf-8")
|
||||||
|
|
||||||
|
# But if we exclude UTF-8 from consideration, the guess is
|
||||||
|
# Windows-1252.
|
||||||
|
dammit = UnicodeDammit(utf8_data, exclude_encodings=["utf-8"])
|
||||||
|
assert dammit.original_encoding.lower() == 'windows-1252'
|
||||||
|
|
||||||
|
# And if we exclude that, there is no valid guess at all.
|
||||||
|
dammit = UnicodeDammit(
|
||||||
|
utf8_data, exclude_encodings=["utf-8", "windows-1252"])
|
||||||
|
assert dammit.original_encoding == None
|
||||||
|
|
||||||
|
class TestEncodingDetector(object):
|
||||||
|
|
||||||
|
def test_encoding_detector_replaces_junk_in_encoding_name_with_replacement_character(self):
|
||||||
|
detected = EncodingDetector(
|
||||||
|
b'<?xml version="1.0" encoding="UTF-\xdb" ?>')
|
||||||
|
encodings = list(detected.encodings)
|
||||||
|
assert 'utf-\N{REPLACEMENT CHARACTER}' in encodings
|
||||||
|
|
||||||
|
def test_detect_html5_style_meta_tag(self):
|
||||||
|
|
||||||
|
for data in (
|
||||||
|
b'<html><meta charset="euc-jp" /></html>',
|
||||||
|
b"<html><meta charset='euc-jp' /></html>",
|
||||||
|
b"<html><meta charset=euc-jp /></html>",
|
||||||
|
b"<html><meta charset=euc-jp/></html>"):
|
||||||
|
dammit = UnicodeDammit(data, is_html=True)
|
||||||
|
assert "euc-jp" == dammit.original_encoding
|
||||||
|
|
||||||
|
def test_last_ditch_entity_replacement(self):
|
||||||
|
# This is a UTF-8 document that contains bytestrings
|
||||||
|
# completely incompatible with UTF-8 (ie. encoded with some other
|
||||||
|
# encoding).
|
||||||
|
#
|
||||||
|
# Since there is no consistent encoding for the document,
|
||||||
|
# Unicode, Dammit will eventually encode the document as UTF-8
|
||||||
|
# and encode the incompatible characters as REPLACEMENT
|
||||||
|
# CHARACTER.
|
||||||
|
#
|
||||||
|
# If chardet is installed, it will detect that the document
|
||||||
|
# can be converted into ISO-8859-1 without errors. This happens
|
||||||
|
# to be the wrong encoding, but it is a consistent encoding, so the
|
||||||
|
# code we're testing here won't run.
|
||||||
|
#
|
||||||
|
# So we temporarily disable chardet if it's present.
|
||||||
|
doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<html><b>\330\250\330\252\330\261</b>
|
||||||
|
<i>\310\322\321\220\312\321\355\344</i></html>"""
|
||||||
|
chardet = bs4.dammit.chardet_dammit
|
||||||
|
logging.disable(logging.WARNING)
|
||||||
|
try:
|
||||||
|
def noop(str):
|
||||||
|
return None
|
||||||
|
bs4.dammit.chardet_dammit = noop
|
||||||
|
dammit = UnicodeDammit(doc)
|
||||||
|
assert True == dammit.contains_replacement_characters
|
||||||
|
assert "\ufffd" in dammit.unicode_markup
|
||||||
|
|
||||||
|
soup = BeautifulSoup(doc, "html.parser")
|
||||||
|
assert soup.contains_replacement_characters
|
||||||
|
finally:
|
||||||
|
logging.disable(logging.NOTSET)
|
||||||
|
bs4.dammit.chardet_dammit = chardet
|
||||||
|
|
||||||
|
def test_byte_order_mark_removed(self):
|
||||||
|
# A document written in UTF-16LE will have its byte order marker stripped.
|
||||||
|
data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
|
||||||
|
dammit = UnicodeDammit(data)
|
||||||
|
assert "<a>áé</a>" == dammit.unicode_markup
|
||||||
|
assert "utf-16le" == dammit.original_encoding
|
||||||
|
|
||||||
|
def test_known_definite_versus_user_encodings(self):
|
||||||
|
# The known_definite_encodings are used before sniffing the
|
||||||
|
# byte-order mark; the user_encodings are used afterwards.
|
||||||
|
|
||||||
|
# Here's a document in UTF-16LE.
|
||||||
|
data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
|
||||||
|
dammit = UnicodeDammit(data)
|
||||||
|
|
||||||
|
# We can process it as UTF-16 by passing it in as a known
|
||||||
|
# definite encoding.
|
||||||
|
before = UnicodeDammit(data, known_definite_encodings=["utf-16"])
|
||||||
|
assert "utf-16" == before.original_encoding
|
||||||
|
|
||||||
|
# If we pass UTF-18 as a user encoding, it's not even
|
||||||
|
# tried--the encoding sniffed from the byte-order mark takes
|
||||||
|
# precedence.
|
||||||
|
after = UnicodeDammit(data, user_encodings=["utf-8"])
|
||||||
|
assert "utf-16le" == after.original_encoding
|
||||||
|
assert ["utf-16le"] == [x[0] for x in dammit.tried_encodings]
|
||||||
|
|
||||||
|
# Here's a document in ISO-8859-8.
|
||||||
|
hebrew = b"\xed\xe5\xec\xf9"
|
||||||
|
dammit = UnicodeDammit(hebrew, known_definite_encodings=["utf-8"],
|
||||||
|
user_encodings=["iso-8859-8"])
|
||||||
|
|
||||||
|
# The known_definite_encodings don't work, BOM sniffing does
|
||||||
|
# nothing (it only works for a few UTF encodings), but one of
|
||||||
|
# the user_encodings does work.
|
||||||
|
assert "iso-8859-8" == dammit.original_encoding
|
||||||
|
assert ["utf-8", "iso-8859-8"] == [x[0] for x in dammit.tried_encodings]
|
||||||
|
|
||||||
|
def test_deprecated_override_encodings(self):
|
||||||
|
# override_encodings is a deprecated alias for
|
||||||
|
# known_definite_encodings.
|
||||||
|
hebrew = b"\xed\xe5\xec\xf9"
|
||||||
|
dammit = UnicodeDammit(
|
||||||
|
hebrew,
|
||||||
|
known_definite_encodings=["shift-jis"],
|
||||||
|
override_encodings=["utf-8"],
|
||||||
|
user_encodings=["iso-8859-8"],
|
||||||
|
)
|
||||||
|
assert "iso-8859-8" == dammit.original_encoding
|
||||||
|
|
||||||
|
# known_definite_encodings and override_encodings were tried
|
||||||
|
# before user_encodings.
|
||||||
|
assert ["shift-jis", "utf-8", "iso-8859-8"] == (
|
||||||
|
[x[0] for x in dammit.tried_encodings]
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_detwingle(self):
|
||||||
|
# Here's a UTF8 document.
|
||||||
|
utf8 = ("\N{SNOWMAN}" * 3).encode("utf8")
|
||||||
|
|
||||||
|
# Here's a Windows-1252 document.
|
||||||
|
windows_1252 = (
|
||||||
|
"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!"
|
||||||
|
"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252")
|
||||||
|
|
||||||
|
# Through some unholy alchemy, they've been stuck together.
|
||||||
|
doc = utf8 + windows_1252 + utf8
|
||||||
|
|
||||||
|
# The document can't be turned into UTF-8:
|
||||||
|
with pytest.raises(UnicodeDecodeError):
|
||||||
|
doc.decode("utf8")
|
||||||
|
|
||||||
|
# Unicode, Dammit thinks the whole document is Windows-1252,
|
||||||
|
# and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃"
|
||||||
|
|
||||||
|
# But if we run it through fix_embedded_windows_1252, it's fixed:
|
||||||
|
fixed = UnicodeDammit.detwingle(doc)
|
||||||
|
assert "☃☃☃“Hi, I like Windows!”☃☃☃" == fixed.decode("utf8")
|
||||||
|
|
||||||
|
def test_detwingle_ignores_multibyte_characters(self):
|
||||||
|
# Each of these characters has a UTF-8 representation ending
|
||||||
|
# in \x93. \x93 is a smart quote if interpreted as
|
||||||
|
# Windows-1252. But our code knows to skip over multibyte
|
||||||
|
# UTF-8 characters, so they'll survive the process unscathed.
|
||||||
|
for tricky_unicode_char in (
|
||||||
|
"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93'
|
||||||
|
"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93'
|
||||||
|
"\xf0\x90\x90\x93", # This is a CJK character, not sure which one.
|
||||||
|
):
|
||||||
|
input = tricky_unicode_char.encode("utf8")
|
||||||
|
assert input.endswith(b'\x93')
|
||||||
|
output = UnicodeDammit.detwingle(input)
|
||||||
|
assert output == input
|
||||||
|
|
||||||
|
def test_find_declared_encoding(self):
|
||||||
|
# Test our ability to find a declared encoding inside an
|
||||||
|
# XML or HTML document.
|
||||||
|
#
|
||||||
|
# Even if the document comes in as Unicode, it may be
|
||||||
|
# interesting to know what encoding was claimed
|
||||||
|
# originally.
|
||||||
|
|
||||||
|
html_unicode = '<html><head><meta charset="utf-8"></head></html>'
|
||||||
|
html_bytes = html_unicode.encode("ascii")
|
||||||
|
|
||||||
|
xml_unicode= '<?xml version="1.0" encoding="ISO-8859-1" ?>'
|
||||||
|
xml_bytes = xml_unicode.encode("ascii")
|
||||||
|
|
||||||
|
m = EncodingDetector.find_declared_encoding
|
||||||
|
assert m(html_unicode, is_html=False) is None
|
||||||
|
assert "utf-8" == m(html_unicode, is_html=True)
|
||||||
|
assert "utf-8" == m(html_bytes, is_html=True)
|
||||||
|
|
||||||
|
assert "iso-8859-1" == m(xml_unicode)
|
||||||
|
assert "iso-8859-1" == m(xml_bytes)
|
||||||
|
|
||||||
|
# Normally, only the first few kilobytes of a document are checked for
|
||||||
|
# an encoding.
|
||||||
|
spacer = b' ' * 5000
|
||||||
|
assert m(spacer + html_bytes) is None
|
||||||
|
assert m(spacer + xml_bytes) is None
|
||||||
|
|
||||||
|
# But you can tell find_declared_encoding to search an entire
|
||||||
|
# HTML document.
|
||||||
|
assert (
|
||||||
|
m(spacer + html_bytes, is_html=True, search_entire_document=True)
|
||||||
|
== "utf-8"
|
||||||
|
)
|
||||||
|
|
||||||
|
# The XML encoding declaration has to be the very first thing
|
||||||
|
# in the document. We'll allow whitespace before the document
|
||||||
|
# starts, but nothing else.
|
||||||
|
assert m(xml_bytes, search_entire_document=True) == "iso-8859-1"
|
||||||
|
assert m(b' ' + xml_bytes, search_entire_document=True) == "iso-8859-1"
|
||||||
|
assert m(b'a' + xml_bytes, search_entire_document=True) is None
|
||||||
|
|
||||||
|
|
||||||
|
class TestEntitySubstitution(object):
|
||||||
|
"""Standalone tests of the EntitySubstitution class."""
|
||||||
|
def setup_method(self):
|
||||||
|
self.sub = EntitySubstitution
|
||||||
|
|
||||||
|
def test_simple_html_substitution(self):
|
||||||
|
# Unicode characters corresponding to named HTML entites
|
||||||
|
# are substituted, and no others.
|
||||||
|
s = "foo\u2200\N{SNOWMAN}\u00f5bar"
|
||||||
|
assert self.sub.substitute_html(s) == "foo∀\N{SNOWMAN}õbar"
|
||||||
|
|
||||||
|
def test_smart_quote_substitution(self):
|
||||||
|
# MS smart quotes are a common source of frustration, so we
|
||||||
|
# give them a special test.
|
||||||
|
quotes = b"\x91\x92foo\x93\x94"
|
||||||
|
dammit = UnicodeDammit(quotes)
|
||||||
|
assert self.sub.substitute_html(dammit.markup) == "‘’foo“”"
|
||||||
|
|
||||||
|
def test_html5_entity(self):
|
||||||
|
# Some HTML5 entities correspond to single- or multi-character
|
||||||
|
# Unicode sequences.
|
||||||
|
|
||||||
|
for entity, u in (
|
||||||
|
# A few spot checks of our ability to recognize
|
||||||
|
# special character sequences and convert them
|
||||||
|
# to named entities.
|
||||||
|
('⊧', '\u22a7'),
|
||||||
|
('𝔑', '\U0001d511'),
|
||||||
|
('≧̸', '\u2267\u0338'),
|
||||||
|
('¬', '\xac'),
|
||||||
|
('⫬', '\u2aec'),
|
||||||
|
|
||||||
|
# We _could_ convert | to &verbarr;, but we don't, because
|
||||||
|
# | is an ASCII character.
|
||||||
|
('|' '|'),
|
||||||
|
|
||||||
|
# Similarly for the fj ligature, which we could convert to
|
||||||
|
# fj, but we don't.
|
||||||
|
("fj", "fj"),
|
||||||
|
|
||||||
|
# We do convert _these_ ASCII characters to HTML entities,
|
||||||
|
# because that's required to generate valid HTML.
|
||||||
|
('>', '>'),
|
||||||
|
('<', '<'),
|
||||||
|
('&', '&'),
|
||||||
|
):
|
||||||
|
template = '3 %s 4'
|
||||||
|
raw = template % u
|
||||||
|
with_entities = template % entity
|
||||||
|
assert self.sub.substitute_html(raw) == with_entities
|
||||||
|
|
||||||
|
def test_html5_entity_with_variation_selector(self):
|
||||||
|
# Some HTML5 entities correspond either to a single-character
|
||||||
|
# Unicode sequence _or_ to the same character plus U+FE00,
|
||||||
|
# VARIATION SELECTOR 1. We can handle this.
|
||||||
|
data = "fjords \u2294 penguins"
|
||||||
|
markup = "fjords ⊔ penguins"
|
||||||
|
assert self.sub.substitute_html(data) == markup
|
||||||
|
|
||||||
|
data = "fjords \u2294\ufe00 penguins"
|
||||||
|
markup = "fjords ⊔︀ penguins"
|
||||||
|
assert self.sub.substitute_html(data) == markup
|
||||||
|
|
||||||
|
def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
|
||||||
|
s = 'Welcome to "my bar"'
|
||||||
|
assert self.sub.substitute_xml(s, False) == s
|
||||||
|
|
||||||
|
def test_xml_attribute_quoting_normally_uses_double_quotes(self):
|
||||||
|
assert self.sub.substitute_xml("Welcome", True) == '"Welcome"'
|
||||||
|
assert self.sub.substitute_xml("Bob's Bar", True) == '"Bob\'s Bar"'
|
||||||
|
|
||||||
|
def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
|
||||||
|
s = 'Welcome to "my bar"'
|
||||||
|
assert self.sub.substitute_xml(s, True) == "'Welcome to \"my bar\"'"
|
||||||
|
|
||||||
|
def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
|
||||||
|
s = 'Welcome to "Bob\'s Bar"'
|
||||||
|
assert self.sub.substitute_xml(s, True) == '"Welcome to "Bob\'s Bar""'
|
||||||
|
|
||||||
|
def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
|
||||||
|
quoted = 'Welcome to "Bob\'s Bar"'
|
||||||
|
assert self.sub.substitute_xml(quoted) == quoted
|
||||||
|
|
||||||
|
def test_xml_quoting_handles_angle_brackets(self):
|
||||||
|
assert self.sub.substitute_xml("foo<bar>") == "foo<bar>"
|
||||||
|
|
||||||
|
def test_xml_quoting_handles_ampersands(self):
|
||||||
|
assert self.sub.substitute_xml("AT&T") == "AT&T"
|
||||||
|
|
||||||
|
def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self):
|
||||||
|
assert self.sub.substitute_xml("ÁT&T") == "&Aacute;T&T"
|
||||||
|
|
||||||
|
def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self):
|
||||||
|
assert self.sub.substitute_xml_containing_entities("ÁT&T") == "ÁT&T"
|
||||||
|
|
||||||
|
def test_quotes_not_html_substituted(self):
|
||||||
|
"""There's no need to do this except inside attribute values."""
|
||||||
|
text = 'Bob\'s "bar"'
|
||||||
|
assert self.sub.substitute_html(text) == text
|
|
@ -0,0 +1,38 @@
|
||||||
|
"Test harness for doctests."
|
||||||
|
|
||||||
|
# TODO: Pretty sure this isn't used and should be deleted.
|
||||||
|
|
||||||
|
# pylint: disable-msg=E0611,W0142
|
||||||
|
|
||||||
|
__metaclass__ = type
|
||||||
|
__all__ = [
|
||||||
|
'additional_tests',
|
||||||
|
]
|
||||||
|
|
||||||
|
import atexit
|
||||||
|
import doctest
|
||||||
|
import os
|
||||||
|
#from pkg_resources import (
|
||||||
|
# resource_filename, resource_exists, resource_listdir, cleanup_resources)
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
DOCTEST_FLAGS = (
|
||||||
|
doctest.ELLIPSIS |
|
||||||
|
doctest.NORMALIZE_WHITESPACE |
|
||||||
|
doctest.REPORT_NDIFF)
|
||||||
|
|
||||||
|
|
||||||
|
# def additional_tests():
|
||||||
|
# "Run the doc tests (README.txt and docs/*, if any exist)"
|
||||||
|
# doctest_files = [
|
||||||
|
# os.path.abspath(resource_filename('bs4', 'README.txt'))]
|
||||||
|
# if resource_exists('bs4', 'docs'):
|
||||||
|
# for name in resource_listdir('bs4', 'docs'):
|
||||||
|
# if name.endswith('.txt'):
|
||||||
|
# doctest_files.append(
|
||||||
|
# os.path.abspath(
|
||||||
|
# resource_filename('bs4', 'docs/%s' % name)))
|
||||||
|
# kwargs = dict(module_relative=False, optionflags=DOCTEST_FLAGS)
|
||||||
|
# atexit.register(cleanup_resources)
|
||||||
|
# return unittest.TestSuite((
|
||||||
|
# doctest.DocFileSuite(*doctest_files, **kwargs)))
|
|
@ -0,0 +1,74 @@
|
||||||
|
"""Tests of classes in element.py.
|
||||||
|
|
||||||
|
The really big classes -- Tag, PageElement, and NavigableString --
|
||||||
|
are tested in separate files.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from bs4.element import (
|
||||||
|
CharsetMetaAttributeValue,
|
||||||
|
ContentMetaAttributeValue,
|
||||||
|
NamespacedAttribute,
|
||||||
|
)
|
||||||
|
from . import SoupTest
|
||||||
|
|
||||||
|
|
||||||
|
class TestNamedspacedAttribute(object):
|
||||||
|
|
||||||
|
def test_name_may_be_none_or_missing(self):
|
||||||
|
a = NamespacedAttribute("xmlns", None)
|
||||||
|
assert a == "xmlns"
|
||||||
|
|
||||||
|
a = NamespacedAttribute("xmlns", "")
|
||||||
|
assert a == "xmlns"
|
||||||
|
|
||||||
|
a = NamespacedAttribute("xmlns")
|
||||||
|
assert a == "xmlns"
|
||||||
|
|
||||||
|
def test_namespace_may_be_none_or_missing(self):
|
||||||
|
a = NamespacedAttribute(None, "tag")
|
||||||
|
assert a == "tag"
|
||||||
|
|
||||||
|
a = NamespacedAttribute("", "tag")
|
||||||
|
assert a == "tag"
|
||||||
|
|
||||||
|
def test_attribute_is_equivalent_to_colon_separated_string(self):
|
||||||
|
a = NamespacedAttribute("a", "b")
|
||||||
|
assert "a:b" == a
|
||||||
|
|
||||||
|
def test_attributes_are_equivalent_if_prefix_and_name_identical(self):
|
||||||
|
a = NamespacedAttribute("a", "b", "c")
|
||||||
|
b = NamespacedAttribute("a", "b", "c")
|
||||||
|
assert a == b
|
||||||
|
|
||||||
|
# The actual namespace is not considered.
|
||||||
|
c = NamespacedAttribute("a", "b", None)
|
||||||
|
assert a == c
|
||||||
|
|
||||||
|
# But name and prefix are important.
|
||||||
|
d = NamespacedAttribute("a", "z", "c")
|
||||||
|
assert a != d
|
||||||
|
|
||||||
|
e = NamespacedAttribute("z", "b", "c")
|
||||||
|
assert a != e
|
||||||
|
|
||||||
|
|
||||||
|
class TestAttributeValueWithCharsetSubstitution(object):
|
||||||
|
"""Certain attributes are designed to have the charset of the
|
||||||
|
final document substituted into their value.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_content_meta_attribute_value(self):
|
||||||
|
# The value of a CharsetMetaAttributeValue is whatever
|
||||||
|
# encoding the string is in.
|
||||||
|
value = CharsetMetaAttributeValue("euc-jp")
|
||||||
|
assert "euc-jp" == value
|
||||||
|
assert "euc-jp" == value.original_value
|
||||||
|
assert "utf8" == value.encode("utf8")
|
||||||
|
assert "ascii" == value.encode("ascii")
|
||||||
|
|
||||||
|
def test_content_meta_attribute_value(self):
|
||||||
|
value = ContentMetaAttributeValue("text/html; charset=euc-jp")
|
||||||
|
assert "text/html; charset=euc-jp" == value
|
||||||
|
assert "text/html; charset=euc-jp" == value.original_value
|
||||||
|
assert "text/html; charset=utf8" == value.encode("utf8")
|
||||||
|
assert "text/html; charset=ascii" == value.encode("ascii")
|
|
@ -0,0 +1,113 @@
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from bs4.element import Tag
|
||||||
|
from bs4.formatter import (
|
||||||
|
Formatter,
|
||||||
|
HTMLFormatter,
|
||||||
|
XMLFormatter,
|
||||||
|
)
|
||||||
|
from . import SoupTest
|
||||||
|
|
||||||
|
class TestFormatter(SoupTest):
|
||||||
|
|
||||||
|
def test_default_attributes(self):
|
||||||
|
# Test the default behavior of Formatter.attributes().
|
||||||
|
formatter = Formatter()
|
||||||
|
tag = Tag(name="tag")
|
||||||
|
tag['b'] = 1
|
||||||
|
tag['a'] = 2
|
||||||
|
|
||||||
|
# Attributes come out sorted by name. In Python 3, attributes
|
||||||
|
# normally come out of a dictionary in the order they were
|
||||||
|
# added.
|
||||||
|
assert [('a', 2), ('b', 1)] == formatter.attributes(tag)
|
||||||
|
|
||||||
|
# This works even if Tag.attrs is None, though this shouldn't
|
||||||
|
# normally happen.
|
||||||
|
tag.attrs = None
|
||||||
|
assert [] == formatter.attributes(tag)
|
||||||
|
|
||||||
|
assert ' ' == formatter.indent
|
||||||
|
|
||||||
|
def test_sort_attributes(self):
|
||||||
|
# Test the ability to override Formatter.attributes() to,
|
||||||
|
# e.g., disable the normal sorting of attributes.
|
||||||
|
class UnsortedFormatter(Formatter):
|
||||||
|
def attributes(self, tag):
|
||||||
|
self.called_with = tag
|
||||||
|
for k, v in sorted(tag.attrs.items()):
|
||||||
|
if k == 'ignore':
|
||||||
|
continue
|
||||||
|
yield k,v
|
||||||
|
|
||||||
|
soup = self.soup('<p cval="1" aval="2" ignore="ignored"></p>')
|
||||||
|
formatter = UnsortedFormatter()
|
||||||
|
decoded = soup.decode(formatter=formatter)
|
||||||
|
|
||||||
|
# attributes() was called on the <p> tag. It filtered out one
|
||||||
|
# attribute and sorted the other two.
|
||||||
|
assert formatter.called_with == soup.p
|
||||||
|
assert '<p aval="2" cval="1"></p>' == decoded
|
||||||
|
|
||||||
|
def test_empty_attributes_are_booleans(self):
|
||||||
|
# Test the behavior of empty_attributes_are_booleans as well
|
||||||
|
# as which Formatters have it enabled.
|
||||||
|
|
||||||
|
for name in ('html', 'minimal', None):
|
||||||
|
formatter = HTMLFormatter.REGISTRY[name]
|
||||||
|
assert False == formatter.empty_attributes_are_booleans
|
||||||
|
|
||||||
|
formatter = XMLFormatter.REGISTRY[None]
|
||||||
|
assert False == formatter.empty_attributes_are_booleans
|
||||||
|
|
||||||
|
formatter = HTMLFormatter.REGISTRY['html5']
|
||||||
|
assert True == formatter.empty_attributes_are_booleans
|
||||||
|
|
||||||
|
# Verify that the constructor sets the value.
|
||||||
|
formatter = Formatter(empty_attributes_are_booleans=True)
|
||||||
|
assert True == formatter.empty_attributes_are_booleans
|
||||||
|
|
||||||
|
# Now demonstrate what it does to markup.
|
||||||
|
for markup in (
|
||||||
|
"<option selected></option>",
|
||||||
|
'<option selected=""></option>'
|
||||||
|
):
|
||||||
|
soup = self.soup(markup)
|
||||||
|
for formatter in ('html', 'minimal', 'xml', None):
|
||||||
|
assert b'<option selected=""></option>' == soup.option.encode(formatter='html')
|
||||||
|
assert b'<option selected></option>' == soup.option.encode(formatter='html5')
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"indent,expect",
|
||||||
|
[
|
||||||
|
(None, '<a>\n<b>\ntext\n</b>\n</a>'),
|
||||||
|
(-1, '<a>\n<b>\ntext\n</b>\n</a>'),
|
||||||
|
(0, '<a>\n<b>\ntext\n</b>\n</a>'),
|
||||||
|
("", '<a>\n<b>\ntext\n</b>\n</a>'),
|
||||||
|
|
||||||
|
(1, '<a>\n <b>\n text\n </b>\n</a>'),
|
||||||
|
(2, '<a>\n <b>\n text\n </b>\n</a>'),
|
||||||
|
|
||||||
|
("\t", '<a>\n\t<b>\n\t\ttext\n\t</b>\n</a>'),
|
||||||
|
('abc', '<a>\nabc<b>\nabcabctext\nabc</b>\n</a>'),
|
||||||
|
|
||||||
|
# Some invalid inputs -- the default behavior is used.
|
||||||
|
(object(), '<a>\n <b>\n text\n </b>\n</a>'),
|
||||||
|
(b'bytes', '<a>\n <b>\n text\n </b>\n</a>'),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def test_indent(self, indent, expect):
|
||||||
|
# Pretty-print a tree with a Formatter set to
|
||||||
|
# indent in a certain way and verify the results.
|
||||||
|
soup = self.soup("<a><b>text</b></a>")
|
||||||
|
formatter = Formatter(indent=indent)
|
||||||
|
assert soup.prettify(formatter=formatter) == expect
|
||||||
|
|
||||||
|
# Pretty-printing only happens with prettify(), not
|
||||||
|
# encode().
|
||||||
|
assert soup.encode(formatter=formatter) != expect
|
||||||
|
|
||||||
|
def test_default_indent_value(self):
|
||||||
|
formatter = Formatter()
|
||||||
|
assert formatter.indent == ' '
|
||||||
|
|
|
@ -0,0 +1,223 @@
|
||||||
|
"""Tests to ensure that the html5lib tree builder generates good trees."""
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
try:
|
||||||
|
from bs4.builder import HTML5TreeBuilder
|
||||||
|
HTML5LIB_PRESENT = True
|
||||||
|
except ImportError as e:
|
||||||
|
HTML5LIB_PRESENT = False
|
||||||
|
from bs4.element import SoupStrainer
|
||||||
|
from . import (
|
||||||
|
HTML5TreeBuilderSmokeTest,
|
||||||
|
SoupTest,
|
||||||
|
skipIf,
|
||||||
|
)
|
||||||
|
|
||||||
|
@skipIf(
|
||||||
|
not HTML5LIB_PRESENT,
|
||||||
|
"html5lib seems not to be present, not testing its tree builder.")
|
||||||
|
class TestHTML5LibBuilder(SoupTest, HTML5TreeBuilderSmokeTest):
|
||||||
|
"""See ``HTML5TreeBuilderSmokeTest``."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def default_builder(self):
|
||||||
|
return HTML5TreeBuilder
|
||||||
|
|
||||||
|
def test_soupstrainer(self):
|
||||||
|
# The html5lib tree builder does not support SoupStrainers.
|
||||||
|
strainer = SoupStrainer("b")
|
||||||
|
markup = "<p>A <b>bold</b> statement.</p>"
|
||||||
|
with warnings.catch_warnings(record=True) as w:
|
||||||
|
soup = self.soup(markup, parse_only=strainer)
|
||||||
|
assert soup.decode() == self.document_for(markup)
|
||||||
|
|
||||||
|
assert "the html5lib tree builder doesn't support parse_only" in str(w[0].message)
|
||||||
|
|
||||||
|
def test_correctly_nested_tables(self):
|
||||||
|
"""html5lib inserts <tbody> tags where other parsers don't."""
|
||||||
|
markup = ('<table id="1">'
|
||||||
|
'<tr>'
|
||||||
|
"<td>Here's another table:"
|
||||||
|
'<table id="2">'
|
||||||
|
'<tr><td>foo</td></tr>'
|
||||||
|
'</table></td>')
|
||||||
|
|
||||||
|
self.assert_soup(
|
||||||
|
markup,
|
||||||
|
'<table id="1"><tbody><tr><td>Here\'s another table:'
|
||||||
|
'<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>'
|
||||||
|
'</td></tr></tbody></table>')
|
||||||
|
|
||||||
|
self.assert_soup(
|
||||||
|
"<table><thead><tr><td>Foo</td></tr></thead>"
|
||||||
|
"<tbody><tr><td>Bar</td></tr></tbody>"
|
||||||
|
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
|
||||||
|
|
||||||
|
def test_xml_declaration_followed_by_doctype(self):
|
||||||
|
markup = '''<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<p>foo</p>
|
||||||
|
</body>
|
||||||
|
</html>'''
|
||||||
|
soup = self.soup(markup)
|
||||||
|
# Verify that we can reach the <p> tag; this means the tree is connected.
|
||||||
|
assert b"<p>foo</p>" == soup.p.encode()
|
||||||
|
|
||||||
|
def test_reparented_markup(self):
|
||||||
|
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>'
|
||||||
|
soup = self.soup(markup)
|
||||||
|
assert "<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>" == soup.body.decode()
|
||||||
|
assert 2 == len(soup.find_all('p'))
|
||||||
|
|
||||||
|
|
||||||
|
def test_reparented_markup_ends_with_whitespace(self):
|
||||||
|
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n'
|
||||||
|
soup = self.soup(markup)
|
||||||
|
assert "<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>" == soup.body.decode()
|
||||||
|
assert 2 == len(soup.find_all('p'))
|
||||||
|
|
||||||
|
def test_reparented_markup_containing_identical_whitespace_nodes(self):
|
||||||
|
"""Verify that we keep the two whitespace nodes in this
|
||||||
|
document distinct when reparenting the adjacent <tbody> tags.
|
||||||
|
"""
|
||||||
|
markup = '<table> <tbody><tbody><ims></tbody> </table>'
|
||||||
|
soup = self.soup(markup)
|
||||||
|
space1, space2 = soup.find_all(string=' ')
|
||||||
|
tbody1, tbody2 = soup.find_all('tbody')
|
||||||
|
assert space1.next_element is tbody1
|
||||||
|
assert tbody2.next_element is space2
|
||||||
|
|
||||||
|
def test_reparented_markup_containing_children(self):
|
||||||
|
markup = '<div><a>aftermath<p><noscript>target</noscript>aftermath</a></p></div>'
|
||||||
|
soup = self.soup(markup)
|
||||||
|
noscript = soup.noscript
|
||||||
|
assert "target" == noscript.next_element
|
||||||
|
target = soup.find(string='target')
|
||||||
|
|
||||||
|
# The 'aftermath' string was duplicated; we want the second one.
|
||||||
|
final_aftermath = soup.find_all(string='aftermath')[-1]
|
||||||
|
|
||||||
|
# The <noscript> tag was moved beneath a copy of the <a> tag,
|
||||||
|
# but the 'target' string within is still connected to the
|
||||||
|
# (second) 'aftermath' string.
|
||||||
|
assert final_aftermath == target.next_element
|
||||||
|
assert target == final_aftermath.previous_element
|
||||||
|
|
||||||
|
def test_processing_instruction(self):
|
||||||
|
"""Processing instructions become comments."""
|
||||||
|
markup = b"""<?PITarget PIContent?>"""
|
||||||
|
soup = self.soup(markup)
|
||||||
|
assert str(soup).startswith("<!--?PITarget PIContent?-->")
|
||||||
|
|
||||||
|
def test_cloned_multivalue_node(self):
|
||||||
|
markup = b"""<a class="my_class"><p></a>"""
|
||||||
|
soup = self.soup(markup)
|
||||||
|
a1, a2 = soup.find_all('a')
|
||||||
|
assert a1 == a2
|
||||||
|
assert a1 is not a2
|
||||||
|
|
||||||
|
def test_foster_parenting(self):
|
||||||
|
markup = b"""<table><td></tbody>A"""
|
||||||
|
soup = self.soup(markup)
|
||||||
|
assert "<body>A<table><tbody><tr><td></td></tr></tbody></table></body>" == soup.body.decode()
|
||||||
|
|
||||||
|
def test_extraction(self):
|
||||||
|
"""
|
||||||
|
Test that extraction does not destroy the tree.
|
||||||
|
|
||||||
|
https://bugs.launchpad.net/beautifulsoup/+bug/1782928
|
||||||
|
"""
|
||||||
|
|
||||||
|
markup = """
|
||||||
|
<html><head></head>
|
||||||
|
<style>
|
||||||
|
</style><script></script><body><p>hello</p></body></html>
|
||||||
|
"""
|
||||||
|
soup = self.soup(markup)
|
||||||
|
[s.extract() for s in soup('script')]
|
||||||
|
[s.extract() for s in soup('style')]
|
||||||
|
|
||||||
|
assert len(soup.find_all("p")) == 1
|
||||||
|
|
||||||
|
def test_empty_comment(self):
|
||||||
|
"""
|
||||||
|
Test that empty comment does not break structure.
|
||||||
|
|
||||||
|
https://bugs.launchpad.net/beautifulsoup/+bug/1806598
|
||||||
|
"""
|
||||||
|
|
||||||
|
markup = """
|
||||||
|
<html>
|
||||||
|
<body>
|
||||||
|
<form>
|
||||||
|
<!----><input type="text">
|
||||||
|
</form>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
soup = self.soup(markup)
|
||||||
|
inputs = []
|
||||||
|
for form in soup.find_all('form'):
|
||||||
|
inputs.extend(form.find_all('input'))
|
||||||
|
assert len(inputs) == 1
|
||||||
|
|
||||||
|
def test_tracking_line_numbers(self):
|
||||||
|
# The html.parser TreeBuilder keeps track of line number and
|
||||||
|
# position of each element.
|
||||||
|
markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
|
||||||
|
soup = self.soup(markup)
|
||||||
|
assert 2 == soup.p.sourceline
|
||||||
|
assert 5 == soup.p.sourcepos
|
||||||
|
assert "sourceline" == soup.p.find('sourceline').name
|
||||||
|
|
||||||
|
# You can deactivate this behavior.
|
||||||
|
soup = self.soup(markup, store_line_numbers=False)
|
||||||
|
assert "sourceline" == soup.p.sourceline.name
|
||||||
|
assert "sourcepos" == soup.p.sourcepos.name
|
||||||
|
|
||||||
|
def test_special_string_containers(self):
|
||||||
|
# The html5lib tree builder doesn't support this standard feature,
|
||||||
|
# because there's no way of knowing, when a string is created,
|
||||||
|
# where in the tree it will eventually end up.
|
||||||
|
pass
|
||||||
|
|
||||||
|
def test_html5_attributes(self):
|
||||||
|
# The html5lib TreeBuilder can convert any entity named in
|
||||||
|
# the HTML5 spec to a sequence of Unicode characters, and
|
||||||
|
# convert those Unicode characters to a (potentially
|
||||||
|
# different) named entity on the way out.
|
||||||
|
#
|
||||||
|
# This is a copy of the same test from
|
||||||
|
# HTMLParserTreeBuilderSmokeTest. It's not in the superclass
|
||||||
|
# because the lxml HTML TreeBuilder _doesn't_ work this way.
|
||||||
|
for input_element, output_unicode, output_element in (
|
||||||
|
("⇄", '\u21c4', b'⇄'),
|
||||||
|
('⊧', '\u22a7', b'⊧'),
|
||||||
|
('𝔑', '\U0001d511', b'𝔑'),
|
||||||
|
('≧̸', '\u2267\u0338', b'≧̸'),
|
||||||
|
('¬', '\xac', b'¬'),
|
||||||
|
('⫬', '\u2aec', b'⫬'),
|
||||||
|
('"', '"', b'"'),
|
||||||
|
('∴', '\u2234', b'∴'),
|
||||||
|
('∴', '\u2234', b'∴'),
|
||||||
|
('∴', '\u2234', b'∴'),
|
||||||
|
("fj", 'fj', b'fj'),
|
||||||
|
("⊔", '\u2294', b'⊔'),
|
||||||
|
("⊔︀", '\u2294\ufe00', b'⊔︀'),
|
||||||
|
("'", "'", b"'"),
|
||||||
|
("|", "|", b"|"),
|
||||||
|
):
|
||||||
|
markup = '<div>%s</div>' % input_element
|
||||||
|
div = self.soup(markup).div
|
||||||
|
without_element = div.encode()
|
||||||
|
expect = b"<div>%s</div>" % output_unicode.encode("utf8")
|
||||||
|
assert without_element == expect
|
||||||
|
|
||||||
|
with_element = div.encode(formatter="html")
|
||||||
|
expect = b"<div>%s</div>" % output_element
|
||||||
|
assert with_element == expect
|
|
@ -0,0 +1,136 @@
|
||||||
|
"""Tests to ensure that the html.parser tree builder generates good
|
||||||
|
trees."""
|
||||||
|
|
||||||
|
from pdb import set_trace
|
||||||
|
import pickle
|
||||||
|
import warnings
|
||||||
|
from bs4.builder import (
|
||||||
|
HTMLParserTreeBuilder,
|
||||||
|
XMLParsedAsHTMLWarning,
|
||||||
|
)
|
||||||
|
from bs4.builder._htmlparser import BeautifulSoupHTMLParser
|
||||||
|
from . import SoupTest, HTMLTreeBuilderSmokeTest
|
||||||
|
|
||||||
|
class TestHTMLParserTreeBuilder(SoupTest, HTMLTreeBuilderSmokeTest):
|
||||||
|
|
||||||
|
default_builder = HTMLParserTreeBuilder
|
||||||
|
|
||||||
|
def test_namespaced_system_doctype(self):
|
||||||
|
# html.parser can't handle namespaced doctypes, so skip this one.
|
||||||
|
pass
|
||||||
|
|
||||||
|
def test_namespaced_public_doctype(self):
|
||||||
|
# html.parser can't handle namespaced doctypes, so skip this one.
|
||||||
|
pass
|
||||||
|
|
||||||
|
def test_builder_is_pickled(self):
|
||||||
|
"""Unlike most tree builders, HTMLParserTreeBuilder and will
|
||||||
|
be restored after pickling.
|
||||||
|
"""
|
||||||
|
tree = self.soup("<a><b>foo</a>")
|
||||||
|
dumped = pickle.dumps(tree, 2)
|
||||||
|
loaded = pickle.loads(dumped)
|
||||||
|
assert isinstance(loaded.builder, type(tree.builder))
|
||||||
|
|
||||||
|
def test_redundant_empty_element_closing_tags(self):
|
||||||
|
self.assert_soup('<br></br><br></br><br></br>', "<br/><br/><br/>")
|
||||||
|
self.assert_soup('</br></br></br>', "")
|
||||||
|
|
||||||
|
def test_empty_element(self):
|
||||||
|
# This verifies that any buffered data present when the parser
|
||||||
|
# finishes working is handled.
|
||||||
|
self.assert_soup("foo &# bar", "foo &# bar")
|
||||||
|
|
||||||
|
def test_tracking_line_numbers(self):
|
||||||
|
# The html.parser TreeBuilder keeps track of line number and
|
||||||
|
# position of each element.
|
||||||
|
markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
|
||||||
|
soup = self.soup(markup)
|
||||||
|
assert 2 == soup.p.sourceline
|
||||||
|
assert 3 == soup.p.sourcepos
|
||||||
|
assert "sourceline" == soup.p.find('sourceline').name
|
||||||
|
|
||||||
|
# You can deactivate this behavior.
|
||||||
|
soup = self.soup(markup, store_line_numbers=False)
|
||||||
|
assert "sourceline" == soup.p.sourceline.name
|
||||||
|
assert "sourcepos" == soup.p.sourcepos.name
|
||||||
|
|
||||||
|
def test_on_duplicate_attribute(self):
|
||||||
|
# The html.parser tree builder has a variety of ways of
|
||||||
|
# handling a tag that contains the same attribute multiple times.
|
||||||
|
|
||||||
|
markup = '<a class="cls" href="url1" href="url2" href="url3" id="id">'
|
||||||
|
|
||||||
|
# If you don't provide any particular value for
|
||||||
|
# on_duplicate_attribute, later values replace earlier values.
|
||||||
|
soup = self.soup(markup)
|
||||||
|
assert "url3" == soup.a['href']
|
||||||
|
assert ["cls"] == soup.a['class']
|
||||||
|
assert "id" == soup.a['id']
|
||||||
|
|
||||||
|
# You can also get this behavior explicitly.
|
||||||
|
def assert_attribute(on_duplicate_attribute, expected):
|
||||||
|
soup = self.soup(
|
||||||
|
markup, on_duplicate_attribute=on_duplicate_attribute
|
||||||
|
)
|
||||||
|
assert expected == soup.a['href']
|
||||||
|
|
||||||
|
# Verify that non-duplicate attributes are treated normally.
|
||||||
|
assert ["cls"] == soup.a['class']
|
||||||
|
assert "id" == soup.a['id']
|
||||||
|
assert_attribute(None, "url3")
|
||||||
|
assert_attribute(BeautifulSoupHTMLParser.REPLACE, "url3")
|
||||||
|
|
||||||
|
# You can ignore subsequent values in favor of the first.
|
||||||
|
assert_attribute(BeautifulSoupHTMLParser.IGNORE, "url1")
|
||||||
|
|
||||||
|
# And you can pass in a callable that does whatever you want.
|
||||||
|
def accumulate(attrs, key, value):
|
||||||
|
if not isinstance(attrs[key], list):
|
||||||
|
attrs[key] = [attrs[key]]
|
||||||
|
attrs[key].append(value)
|
||||||
|
assert_attribute(accumulate, ["url1", "url2", "url3"])
|
||||||
|
|
||||||
|
def test_html5_attributes(self):
|
||||||
|
# The html.parser TreeBuilder can convert any entity named in
|
||||||
|
# the HTML5 spec to a sequence of Unicode characters, and
|
||||||
|
# convert those Unicode characters to a (potentially
|
||||||
|
# different) named entity on the way out.
|
||||||
|
for input_element, output_unicode, output_element in (
|
||||||
|
("⇄", '\u21c4', b'⇄'),
|
||||||
|
('⊧', '\u22a7', b'⊧'),
|
||||||
|
('𝔑', '\U0001d511', b'𝔑'),
|
||||||
|
('≧̸', '\u2267\u0338', b'≧̸'),
|
||||||
|
('¬', '\xac', b'¬'),
|
||||||
|
('⫬', '\u2aec', b'⫬'),
|
||||||
|
('"', '"', b'"'),
|
||||||
|
('∴', '\u2234', b'∴'),
|
||||||
|
('∴', '\u2234', b'∴'),
|
||||||
|
('∴', '\u2234', b'∴'),
|
||||||
|
("fj", 'fj', b'fj'),
|
||||||
|
("⊔", '\u2294', b'⊔'),
|
||||||
|
("⊔︀", '\u2294\ufe00', b'⊔︀'),
|
||||||
|
("'", "'", b"'"),
|
||||||
|
("|", "|", b"|"),
|
||||||
|
):
|
||||||
|
markup = '<div>%s</div>' % input_element
|
||||||
|
div = self.soup(markup).div
|
||||||
|
without_element = div.encode()
|
||||||
|
expect = b"<div>%s</div>" % output_unicode.encode("utf8")
|
||||||
|
assert without_element == expect
|
||||||
|
|
||||||
|
with_element = div.encode(formatter="html")
|
||||||
|
expect = b"<div>%s</div>" % output_element
|
||||||
|
assert with_element == expect
|
||||||
|
|
||||||
|
class TestHTMLParserSubclass(SoupTest):
|
||||||
|
def test_error(self):
|
||||||
|
"""Verify that our HTMLParser subclass implements error() in a way
|
||||||
|
that doesn't cause a crash.
|
||||||
|
"""
|
||||||
|
parser = BeautifulSoupHTMLParser()
|
||||||
|
with warnings.catch_warnings(record=True) as warns:
|
||||||
|
parser.error("don't crash")
|
||||||
|
[warning] = warns
|
||||||
|
assert "don't crash" == str(warning.message)
|
||||||
|
|
|
@ -0,0 +1,199 @@
|
||||||
|
"""Tests to ensure that the lxml tree builder generates good trees."""
|
||||||
|
|
||||||
|
import pickle
|
||||||
|
import re
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
try:
|
||||||
|
import lxml.etree
|
||||||
|
LXML_PRESENT = True
|
||||||
|
LXML_VERSION = lxml.etree.LXML_VERSION
|
||||||
|
except ImportError as e:
|
||||||
|
LXML_PRESENT = False
|
||||||
|
LXML_VERSION = (0,)
|
||||||
|
|
||||||
|
if LXML_PRESENT:
|
||||||
|
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
|
||||||
|
|
||||||
|
from bs4 import (
|
||||||
|
BeautifulSoup,
|
||||||
|
BeautifulStoneSoup,
|
||||||
|
)
|
||||||
|
from bs4.element import Comment, Doctype, SoupStrainer
|
||||||
|
from . import (
|
||||||
|
HTMLTreeBuilderSmokeTest,
|
||||||
|
XMLTreeBuilderSmokeTest,
|
||||||
|
SoupTest,
|
||||||
|
skipIf,
|
||||||
|
)
|
||||||
|
|
||||||
|
@skipIf(
|
||||||
|
not LXML_PRESENT,
|
||||||
|
"lxml seems not to be present, not testing its tree builder.")
|
||||||
|
class TestLXMLTreeBuilder(SoupTest, HTMLTreeBuilderSmokeTest):
|
||||||
|
"""See ``HTMLTreeBuilderSmokeTest``."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def default_builder(self):
|
||||||
|
return LXMLTreeBuilder
|
||||||
|
|
||||||
|
def test_out_of_range_entity(self):
|
||||||
|
self.assert_soup(
|
||||||
|
"<p>foo�bar</p>", "<p>foobar</p>")
|
||||||
|
self.assert_soup(
|
||||||
|
"<p>foo�bar</p>", "<p>foobar</p>")
|
||||||
|
self.assert_soup(
|
||||||
|
"<p>foo�bar</p>", "<p>foobar</p>")
|
||||||
|
|
||||||
|
def test_entities_in_foreign_document_encoding(self):
|
||||||
|
# We can't implement this case correctly because by the time we
|
||||||
|
# hear about markup like "“", it's been (incorrectly) converted into
|
||||||
|
# a string like u'\x93'
|
||||||
|
pass
|
||||||
|
|
||||||
|
# In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
|
||||||
|
# test if an old version of lxml is installed.
|
||||||
|
|
||||||
|
@skipIf(
|
||||||
|
not LXML_PRESENT or LXML_VERSION < (2,3,5,0),
|
||||||
|
"Skipping doctype test for old version of lxml to avoid segfault.")
|
||||||
|
def test_empty_doctype(self):
|
||||||
|
soup = self.soup("<!DOCTYPE>")
|
||||||
|
doctype = soup.contents[0]
|
||||||
|
assert "" == doctype.strip()
|
||||||
|
|
||||||
|
def test_beautifulstonesoup_is_xml_parser(self):
|
||||||
|
# Make sure that the deprecated BSS class uses an xml builder
|
||||||
|
# if one is installed.
|
||||||
|
with warnings.catch_warnings(record=True) as w:
|
||||||
|
soup = BeautifulStoneSoup("<b />")
|
||||||
|
assert "<b/>" == str(soup.b)
|
||||||
|
assert "BeautifulStoneSoup class is deprecated" in str(w[0].message)
|
||||||
|
|
||||||
|
def test_tracking_line_numbers(self):
|
||||||
|
# The lxml TreeBuilder cannot keep track of line numbers from
|
||||||
|
# the original markup. Even if you ask for line numbers, we
|
||||||
|
# don't have 'em.
|
||||||
|
#
|
||||||
|
# This means that if you have a tag like <sourceline> or
|
||||||
|
# <sourcepos>, attribute access will find it rather than
|
||||||
|
# giving you a numeric answer.
|
||||||
|
soup = self.soup(
|
||||||
|
"\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>",
|
||||||
|
store_line_numbers=True
|
||||||
|
)
|
||||||
|
assert "sourceline" == soup.p.sourceline.name
|
||||||
|
assert "sourcepos" == soup.p.sourcepos.name
|
||||||
|
|
||||||
|
@skipIf(
|
||||||
|
not LXML_PRESENT,
|
||||||
|
"lxml seems not to be present, not testing its XML tree builder.")
|
||||||
|
class TestLXMLXMLTreeBuilder(SoupTest, XMLTreeBuilderSmokeTest):
|
||||||
|
"""See ``HTMLTreeBuilderSmokeTest``."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def default_builder(self):
|
||||||
|
return LXMLTreeBuilderForXML
|
||||||
|
|
||||||
|
def test_namespace_indexing(self):
|
||||||
|
soup = self.soup(
|
||||||
|
'<?xml version="1.1"?>\n'
|
||||||
|
'<root>'
|
||||||
|
'<tag xmlns="http://unprefixed-namespace.com">content</tag>'
|
||||||
|
'<prefix:tag2 xmlns:prefix="http://prefixed-namespace.com">content</prefix:tag2>'
|
||||||
|
'<prefix2:tag3 xmlns:prefix2="http://another-namespace.com">'
|
||||||
|
'<subtag xmlns="http://another-unprefixed-namespace.com">'
|
||||||
|
'<subsubtag xmlns="http://yet-another-unprefixed-namespace.com">'
|
||||||
|
'</prefix2:tag3>'
|
||||||
|
'</root>'
|
||||||
|
)
|
||||||
|
|
||||||
|
# The BeautifulSoup object includes every namespace prefix
|
||||||
|
# defined in the entire document. This is the default set of
|
||||||
|
# namespaces used by soupsieve.
|
||||||
|
#
|
||||||
|
# Un-prefixed namespaces are not included, and if a given
|
||||||
|
# prefix is defined twice, only the first prefix encountered
|
||||||
|
# in the document shows up here.
|
||||||
|
assert soup._namespaces == {
|
||||||
|
'xml': 'http://www.w3.org/XML/1998/namespace',
|
||||||
|
'prefix': 'http://prefixed-namespace.com',
|
||||||
|
'prefix2': 'http://another-namespace.com'
|
||||||
|
}
|
||||||
|
|
||||||
|
# A Tag object includes only the namespace prefixes
|
||||||
|
# that were in scope when it was parsed.
|
||||||
|
|
||||||
|
# We do not track un-prefixed namespaces as we can only hold
|
||||||
|
# one (the first one), and it will be recognized as the
|
||||||
|
# default namespace by soupsieve, even when operating from a
|
||||||
|
# tag with a different un-prefixed namespace.
|
||||||
|
assert soup.tag._namespaces == {
|
||||||
|
'xml': 'http://www.w3.org/XML/1998/namespace',
|
||||||
|
}
|
||||||
|
|
||||||
|
assert soup.tag2._namespaces == {
|
||||||
|
'prefix': 'http://prefixed-namespace.com',
|
||||||
|
'xml': 'http://www.w3.org/XML/1998/namespace',
|
||||||
|
}
|
||||||
|
|
||||||
|
assert soup.subtag._namespaces == {
|
||||||
|
'prefix2': 'http://another-namespace.com',
|
||||||
|
'xml': 'http://www.w3.org/XML/1998/namespace',
|
||||||
|
}
|
||||||
|
|
||||||
|
assert soup.subsubtag._namespaces == {
|
||||||
|
'prefix2': 'http://another-namespace.com',
|
||||||
|
'xml': 'http://www.w3.org/XML/1998/namespace',
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def test_namespace_interaction_with_select_and_find(self):
|
||||||
|
# Demonstrate how namespaces interact with select* and
|
||||||
|
# find* methods.
|
||||||
|
|
||||||
|
soup = self.soup(
|
||||||
|
'<?xml version="1.1"?>\n'
|
||||||
|
'<root>'
|
||||||
|
'<tag xmlns="http://unprefixed-namespace.com">content</tag>'
|
||||||
|
'<prefix:tag2 xmlns:prefix="http://prefixed-namespace.com">content</tag>'
|
||||||
|
'<subtag xmlns:prefix="http://another-namespace-same-prefix.com">'
|
||||||
|
'<prefix:tag3>'
|
||||||
|
'</subtag>'
|
||||||
|
'</root>'
|
||||||
|
)
|
||||||
|
|
||||||
|
# soupselect uses namespace URIs.
|
||||||
|
assert soup.select_one('tag').name == 'tag'
|
||||||
|
assert soup.select_one('prefix|tag2').name == 'tag2'
|
||||||
|
|
||||||
|
# If a prefix is declared more than once, only the first usage
|
||||||
|
# is registered with the BeautifulSoup object.
|
||||||
|
assert soup.select_one('prefix|tag3') is None
|
||||||
|
|
||||||
|
# But you can always explicitly specify a namespace dictionary.
|
||||||
|
assert soup.select_one(
|
||||||
|
'prefix|tag3', namespaces=soup.subtag._namespaces
|
||||||
|
).name == 'tag3'
|
||||||
|
|
||||||
|
# And a Tag (as opposed to the BeautifulSoup object) will
|
||||||
|
# have a set of default namespaces scoped to that Tag.
|
||||||
|
assert soup.subtag.select_one('prefix|tag3').name=='tag3'
|
||||||
|
|
||||||
|
# the find() methods aren't fully namespace-aware; they just
|
||||||
|
# look at prefixes.
|
||||||
|
assert soup.find('tag').name == 'tag'
|
||||||
|
assert soup.find('prefix:tag2').name == 'tag2'
|
||||||
|
assert soup.find('prefix:tag3').name == 'tag3'
|
||||||
|
assert soup.subtag.find('prefix:tag3').name == 'tag3'
|
||||||
|
|
||||||
|
def test_pickle_removes_builder(self):
|
||||||
|
# The lxml TreeBuilder is not picklable, so it won't be
|
||||||
|
# preserved in a pickle/unpickle operation.
|
||||||
|
|
||||||
|
soup = self.soup("<a>some markup</a>")
|
||||||
|
assert isinstance(soup.builder, self.default_builder)
|
||||||
|
pickled = pickle.dumps(soup)
|
||||||
|
unpickled = pickle.loads(pickled)
|
||||||
|
assert "some markup" == unpickled.a.string
|
||||||
|
assert unpickled.builder is None
|
|
@ -0,0 +1,144 @@
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from bs4.element import (
|
||||||
|
CData,
|
||||||
|
Comment,
|
||||||
|
Declaration,
|
||||||
|
Doctype,
|
||||||
|
NavigableString,
|
||||||
|
RubyParenthesisString,
|
||||||
|
RubyTextString,
|
||||||
|
Script,
|
||||||
|
Stylesheet,
|
||||||
|
TemplateString,
|
||||||
|
)
|
||||||
|
|
||||||
|
from . import SoupTest
|
||||||
|
|
||||||
|
class TestNavigableString(SoupTest):
|
||||||
|
|
||||||
|
def test_text_acquisition_methods(self):
|
||||||
|
# These methods are intended for use against Tag, but they
|
||||||
|
# work on NavigableString as well,
|
||||||
|
|
||||||
|
s = NavigableString("fee ")
|
||||||
|
cdata = CData("fie ")
|
||||||
|
comment = Comment("foe ")
|
||||||
|
|
||||||
|
assert "fee " == s.get_text()
|
||||||
|
assert "fee" == s.get_text(strip=True)
|
||||||
|
assert ["fee "] == list(s.strings)
|
||||||
|
assert ["fee"] == list(s.stripped_strings)
|
||||||
|
assert ["fee "] == list(s._all_strings())
|
||||||
|
|
||||||
|
assert "fie " == cdata.get_text()
|
||||||
|
assert "fie" == cdata.get_text(strip=True)
|
||||||
|
assert ["fie "] == list(cdata.strings)
|
||||||
|
assert ["fie"] == list(cdata.stripped_strings)
|
||||||
|
assert ["fie "] == list(cdata._all_strings())
|
||||||
|
|
||||||
|
# Since a Comment isn't normally considered 'text',
|
||||||
|
# these methods generally do nothing.
|
||||||
|
assert "" == comment.get_text()
|
||||||
|
assert [] == list(comment.strings)
|
||||||
|
assert [] == list(comment.stripped_strings)
|
||||||
|
assert [] == list(comment._all_strings())
|
||||||
|
|
||||||
|
# Unless you specifically say that comments are okay.
|
||||||
|
assert "foe" == comment.get_text(strip=True, types=Comment)
|
||||||
|
assert "foe " == comment.get_text(types=(Comment, NavigableString))
|
||||||
|
|
||||||
|
def test_string_has_immutable_name_property(self):
|
||||||
|
# string.name is defined as None and can't be modified
|
||||||
|
string = self.soup("s").string
|
||||||
|
assert None == string.name
|
||||||
|
with pytest.raises(AttributeError):
|
||||||
|
string.name = 'foo'
|
||||||
|
|
||||||
|
class TestNavigableStringSubclasses(SoupTest):
|
||||||
|
|
||||||
|
def test_cdata(self):
|
||||||
|
# None of the current builders turn CDATA sections into CData
|
||||||
|
# objects, but you can create them manually.
|
||||||
|
soup = self.soup("")
|
||||||
|
cdata = CData("foo")
|
||||||
|
soup.insert(1, cdata)
|
||||||
|
assert str(soup) == "<![CDATA[foo]]>"
|
||||||
|
assert soup.find(string="foo") == "foo"
|
||||||
|
assert soup.contents[0] == "foo"
|
||||||
|
|
||||||
|
def test_cdata_is_never_formatted(self):
|
||||||
|
"""Text inside a CData object is passed into the formatter.
|
||||||
|
|
||||||
|
But the return value is ignored.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.count = 0
|
||||||
|
def increment(*args):
|
||||||
|
self.count += 1
|
||||||
|
return "BITTER FAILURE"
|
||||||
|
|
||||||
|
soup = self.soup("")
|
||||||
|
cdata = CData("<><><>")
|
||||||
|
soup.insert(1, cdata)
|
||||||
|
assert b"<![CDATA[<><><>]]>" == soup.encode(formatter=increment)
|
||||||
|
assert 1 == self.count
|
||||||
|
|
||||||
|
def test_doctype_ends_in_newline(self):
|
||||||
|
# Unlike other NavigableString subclasses, a DOCTYPE always ends
|
||||||
|
# in a newline.
|
||||||
|
doctype = Doctype("foo")
|
||||||
|
soup = self.soup("")
|
||||||
|
soup.insert(1, doctype)
|
||||||
|
assert soup.encode() == b"<!DOCTYPE foo>\n"
|
||||||
|
|
||||||
|
def test_declaration(self):
|
||||||
|
d = Declaration("foo")
|
||||||
|
assert "<?foo?>" == d.output_ready()
|
||||||
|
|
||||||
|
def test_default_string_containers(self):
|
||||||
|
# In some cases, we use different NavigableString subclasses for
|
||||||
|
# the same text in different tags.
|
||||||
|
soup = self.soup(
|
||||||
|
"<div>text</div><script>text</script><style>text</style>"
|
||||||
|
)
|
||||||
|
assert [NavigableString, Script, Stylesheet] == [
|
||||||
|
x.__class__ for x in soup.find_all(string=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
# The TemplateString is a little unusual because it's generally found
|
||||||
|
# _inside_ children of a <template> element, not a direct child of the
|
||||||
|
# <template> element.
|
||||||
|
soup = self.soup(
|
||||||
|
"<template>Some text<p>In a tag</p></template>Some text outside"
|
||||||
|
)
|
||||||
|
assert all(
|
||||||
|
isinstance(x, TemplateString)
|
||||||
|
for x in soup.template._all_strings(types=None)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Once the <template> tag closed, we went back to using
|
||||||
|
# NavigableString.
|
||||||
|
outside = soup.template.next_sibling
|
||||||
|
assert isinstance(outside, NavigableString)
|
||||||
|
assert not isinstance(outside, TemplateString)
|
||||||
|
|
||||||
|
# The TemplateString is also unusual because it can contain
|
||||||
|
# NavigableString subclasses of _other_ types, such as
|
||||||
|
# Comment.
|
||||||
|
markup = b"<template>Some text<p>In a tag</p><!--with a comment--></template>"
|
||||||
|
soup = self.soup(markup)
|
||||||
|
assert markup == soup.template.encode("utf8")
|
||||||
|
|
||||||
|
def test_ruby_strings(self):
|
||||||
|
markup = "<ruby>漢 <rp>(</rp><rt>kan</rt><rp>)</rp> 字 <rp>(</rp><rt>ji</rt><rp>)</rp></ruby>"
|
||||||
|
soup = self.soup(markup)
|
||||||
|
assert isinstance(soup.rp.string, RubyParenthesisString)
|
||||||
|
assert isinstance(soup.rt.string, RubyTextString)
|
||||||
|
|
||||||
|
# Just as a demo, here's what this means for get_text usage.
|
||||||
|
assert "漢字" == soup.get_text(strip=True)
|
||||||
|
assert "漢(kan)字(ji)" == soup.get_text(
|
||||||
|
strip=True,
|
||||||
|
types=(NavigableString, RubyTextString, RubyParenthesisString)
|
||||||
|
)
|
|
@ -0,0 +1,751 @@
|
||||||
|
"""Tests of the bs4.element.PageElement class"""
|
||||||
|
import copy
|
||||||
|
import pickle
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from soupsieve import SelectorSyntaxError
|
||||||
|
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
from bs4.element import (
|
||||||
|
Comment,
|
||||||
|
SoupStrainer,
|
||||||
|
)
|
||||||
|
from . import SoupTest
|
||||||
|
|
||||||
|
|
||||||
|
class TestEncoding(SoupTest):
|
||||||
|
"""Test the ability to encode objects into strings."""
|
||||||
|
|
||||||
|
def test_unicode_string_can_be_encoded(self):
|
||||||
|
html = "<b>\N{SNOWMAN}</b>"
|
||||||
|
soup = self.soup(html)
|
||||||
|
assert soup.b.string.encode("utf-8") == "\N{SNOWMAN}".encode("utf-8")
|
||||||
|
|
||||||
|
def test_tag_containing_unicode_string_can_be_encoded(self):
|
||||||
|
html = "<b>\N{SNOWMAN}</b>"
|
||||||
|
soup = self.soup(html)
|
||||||
|
assert soup.b.encode("utf-8") == html.encode("utf-8")
|
||||||
|
|
||||||
|
def test_encoding_substitutes_unrecognized_characters_by_default(self):
|
||||||
|
html = "<b>\N{SNOWMAN}</b>"
|
||||||
|
soup = self.soup(html)
|
||||||
|
assert soup.b.encode("ascii") == b"<b>☃</b>"
|
||||||
|
|
||||||
|
def test_encoding_can_be_made_strict(self):
|
||||||
|
html = "<b>\N{SNOWMAN}</b>"
|
||||||
|
soup = self.soup(html)
|
||||||
|
with pytest.raises(UnicodeEncodeError):
|
||||||
|
soup.encode("ascii", errors="strict")
|
||||||
|
|
||||||
|
def test_decode_contents(self):
|
||||||
|
html = "<b>\N{SNOWMAN}</b>"
|
||||||
|
soup = self.soup(html)
|
||||||
|
assert "\N{SNOWMAN}" == soup.b.decode_contents()
|
||||||
|
|
||||||
|
def test_encode_contents(self):
|
||||||
|
html = "<b>\N{SNOWMAN}</b>"
|
||||||
|
soup = self.soup(html)
|
||||||
|
assert "\N{SNOWMAN}".encode("utf8") == soup.b.encode_contents(
|
||||||
|
encoding="utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_deprecated_renderContents(self):
|
||||||
|
html = "<b>\N{SNOWMAN}</b>"
|
||||||
|
soup = self.soup(html)
|
||||||
|
assert "\N{SNOWMAN}".encode("utf8") == soup.b.renderContents()
|
||||||
|
|
||||||
|
def test_repr(self):
|
||||||
|
html = "<b>\N{SNOWMAN}</b>"
|
||||||
|
soup = self.soup(html)
|
||||||
|
assert html == repr(soup)
|
||||||
|
|
||||||
|
|
||||||
|
class TestFormatters(SoupTest):
|
||||||
|
"""Test the formatting feature, used by methods like decode() and
|
||||||
|
prettify(), and the formatters themselves.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_default_formatter_is_minimal(self):
|
||||||
|
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||||
|
soup = self.soup(markup)
|
||||||
|
decoded = soup.decode(formatter="minimal")
|
||||||
|
# The < is converted back into < but the e-with-acute is left alone.
|
||||||
|
assert decoded == self.document_for(
|
||||||
|
"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_formatter_html(self):
|
||||||
|
markup = "<br><b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||||
|
soup = self.soup(markup)
|
||||||
|
decoded = soup.decode(formatter="html")
|
||||||
|
assert decoded == self.document_for(
|
||||||
|
"<br/><b><<Sacré bleu!>></b>"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_formatter_html5(self):
|
||||||
|
markup = "<br><b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||||
|
soup = self.soup(markup)
|
||||||
|
decoded = soup.decode(formatter="html5")
|
||||||
|
assert decoded == self.document_for(
|
||||||
|
"<br><b><<Sacré bleu!>></b>"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_formatter_minimal(self):
|
||||||
|
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||||
|
soup = self.soup(markup)
|
||||||
|
decoded = soup.decode(formatter="minimal")
|
||||||
|
# The < is converted back into < but the e-with-acute is left alone.
|
||||||
|
assert decoded == self.document_for(
|
||||||
|
"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_formatter_null(self):
|
||||||
|
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||||
|
soup = self.soup(markup)
|
||||||
|
decoded = soup.decode(formatter=None)
|
||||||
|
# Neither the angle brackets nor the e-with-acute are converted.
|
||||||
|
# This is not valid HTML, but it's what the user wanted.
|
||||||
|
assert decoded == self.document_for(
|
||||||
|
"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_formatter_custom(self):
|
||||||
|
markup = "<b><foo></b><b>bar</b><br/>"
|
||||||
|
soup = self.soup(markup)
|
||||||
|
decoded = soup.decode(formatter = lambda x: x.upper())
|
||||||
|
# Instead of normal entity conversion code, the custom
|
||||||
|
# callable is called on every string.
|
||||||
|
assert decoded == self.document_for("<b><FOO></b><b>BAR</b><br/>")
|
||||||
|
|
||||||
|
def test_formatter_is_run_on_attribute_values(self):
|
||||||
|
markup = '<a href="http://a.com?a=b&c=é">e</a>'
|
||||||
|
soup = self.soup(markup)
|
||||||
|
a = soup.a
|
||||||
|
|
||||||
|
expect_minimal = '<a href="http://a.com?a=b&c=é">e</a>'
|
||||||
|
|
||||||
|
assert expect_minimal == a.decode()
|
||||||
|
assert expect_minimal == a.decode(formatter="minimal")
|
||||||
|
|
||||||
|
expect_html = '<a href="http://a.com?a=b&c=é">e</a>'
|
||||||
|
assert expect_html == a.decode(formatter="html")
|
||||||
|
|
||||||
|
assert markup == a.decode(formatter=None)
|
||||||
|
expect_upper = '<a href="HTTP://A.COM?A=B&C=É">E</a>'
|
||||||
|
assert expect_upper == a.decode(formatter=lambda x: x.upper())
|
||||||
|
|
||||||
|
def test_formatter_skips_script_tag_for_html_documents(self):
|
||||||
|
doc = """
|
||||||
|
<script type="text/javascript">
|
||||||
|
console.log("< < hey > > ");
|
||||||
|
</script>
|
||||||
|
"""
|
||||||
|
encoded = BeautifulSoup(doc, 'html.parser').encode()
|
||||||
|
assert b"< < hey > >" in encoded
|
||||||
|
|
||||||
|
def test_formatter_skips_style_tag_for_html_documents(self):
|
||||||
|
doc = """
|
||||||
|
<style type="text/css">
|
||||||
|
console.log("< < hey > > ");
|
||||||
|
</style>
|
||||||
|
"""
|
||||||
|
encoded = BeautifulSoup(doc, 'html.parser').encode()
|
||||||
|
assert b"< < hey > >" in encoded
|
||||||
|
|
||||||
|
def test_prettify_leaves_preformatted_text_alone(self):
|
||||||
|
soup = self.soup("<div> foo <pre> \tbar\n \n </pre> baz <textarea> eee\nfff\t</textarea></div>")
|
||||||
|
# Everything outside the <pre> tag is reformatted, but everything
|
||||||
|
# inside is left alone.
|
||||||
|
assert '<div>\n foo\n <pre> \tbar\n \n </pre>\n baz\n <textarea> eee\nfff\t</textarea>\n</div>' == soup.div.prettify()
|
||||||
|
|
||||||
|
def test_prettify_accepts_formatter_function(self):
|
||||||
|
soup = BeautifulSoup("<html><body>foo</body></html>", 'html.parser')
|
||||||
|
pretty = soup.prettify(formatter = lambda x: x.upper())
|
||||||
|
assert "FOO" in pretty
|
||||||
|
|
||||||
|
def test_prettify_outputs_unicode_by_default(self):
|
||||||
|
soup = self.soup("<a></a>")
|
||||||
|
assert str == type(soup.prettify())
|
||||||
|
|
||||||
|
def test_prettify_can_encode_data(self):
|
||||||
|
soup = self.soup("<a></a>")
|
||||||
|
assert bytes == type(soup.prettify("utf-8"))
|
||||||
|
|
||||||
|
def test_html_entity_substitution_off_by_default(self):
|
||||||
|
markup = "<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>"
|
||||||
|
soup = self.soup(markup)
|
||||||
|
encoded = soup.b.encode("utf-8")
|
||||||
|
assert encoded == markup.encode('utf-8')
|
||||||
|
|
||||||
|
def test_encoding_substitution(self):
|
||||||
|
# Here's the <meta> tag saying that a document is
|
||||||
|
# encoded in Shift-JIS.
|
||||||
|
meta_tag = ('<meta content="text/html; charset=x-sjis" '
|
||||||
|
'http-equiv="Content-type"/>')
|
||||||
|
soup = self.soup(meta_tag)
|
||||||
|
|
||||||
|
# Parse the document, and the charset apprears unchanged.
|
||||||
|
assert soup.meta['content'] == 'text/html; charset=x-sjis'
|
||||||
|
|
||||||
|
# Encode the document into some encoding, and the encoding is
|
||||||
|
# substituted into the meta tag.
|
||||||
|
utf_8 = soup.encode("utf-8")
|
||||||
|
assert b"charset=utf-8" in utf_8
|
||||||
|
|
||||||
|
euc_jp = soup.encode("euc_jp")
|
||||||
|
assert b"charset=euc_jp" in euc_jp
|
||||||
|
|
||||||
|
shift_jis = soup.encode("shift-jis")
|
||||||
|
assert b"charset=shift-jis" in shift_jis
|
||||||
|
|
||||||
|
utf_16_u = soup.encode("utf-16").decode("utf-16")
|
||||||
|
assert "charset=utf-16" in utf_16_u
|
||||||
|
|
||||||
|
def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self):
|
||||||
|
markup = ('<head><meta content="text/html; charset=x-sjis" '
|
||||||
|
'http-equiv="Content-type"/></head><pre>foo</pre>')
|
||||||
|
|
||||||
|
# Beautiful Soup used to try to rewrite the meta tag even if the
|
||||||
|
# meta tag got filtered out by the strainer. This test makes
|
||||||
|
# sure that doesn't happen.
|
||||||
|
strainer = SoupStrainer('pre')
|
||||||
|
soup = self.soup(markup, parse_only=strainer)
|
||||||
|
assert soup.contents[0].name == 'pre'
|
||||||
|
|
||||||
|
|
||||||
|
class TestCSSSelectors(SoupTest):
|
||||||
|
"""Test basic CSS selector functionality.
|
||||||
|
|
||||||
|
This functionality is implemented in soupsieve, which has a much
|
||||||
|
more comprehensive test suite, so this is basically an extra check
|
||||||
|
that soupsieve works as expected.
|
||||||
|
"""
|
||||||
|
|
||||||
|
HTML = """
|
||||||
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
|
||||||
|
"http://www.w3.org/TR/html4/strict.dtd">
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>The title</title>
|
||||||
|
<link rel="stylesheet" href="blah.css" type="text/css" id="l1">
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<custom-dashed-tag class="dashed" id="dash1">Hello there.</custom-dashed-tag>
|
||||||
|
<div id="main" class="fancy">
|
||||||
|
<div id="inner">
|
||||||
|
<h1 id="header1">An H1</h1>
|
||||||
|
<p>Some text</p>
|
||||||
|
<p class="onep" id="p1">Some more text</p>
|
||||||
|
<h2 id="header2">An H2</h2>
|
||||||
|
<p class="class1 class2 class3" id="pmulti">Another</p>
|
||||||
|
<a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a>
|
||||||
|
<h2 id="header3">Another H2</h2>
|
||||||
|
<a id="me" href="http://simonwillison.net/" rel="me">me</a>
|
||||||
|
<span class="s1">
|
||||||
|
<a href="#" id="s1a1">span1a1</a>
|
||||||
|
<a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a>
|
||||||
|
<span class="span2">
|
||||||
|
<a href="#" id="s2a1">span2a1</a>
|
||||||
|
</span>
|
||||||
|
<span class="span3"></span>
|
||||||
|
<custom-dashed-tag class="dashed" id="dash2"/>
|
||||||
|
<div data-tag="dashedvalue" id="data1"/>
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
<x id="xid">
|
||||||
|
<z id="zida"/>
|
||||||
|
<z id="zidab"/>
|
||||||
|
<z id="zidac"/>
|
||||||
|
</x>
|
||||||
|
<y id="yid">
|
||||||
|
<z id="zidb"/>
|
||||||
|
</y>
|
||||||
|
<p lang="en" id="lang-en">English</p>
|
||||||
|
<p lang="en-gb" id="lang-en-gb">English UK</p>
|
||||||
|
<p lang="en-us" id="lang-en-us">English US</p>
|
||||||
|
<p lang="fr" id="lang-fr">French</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="footer">
|
||||||
|
</div>
|
||||||
|
"""
|
||||||
|
|
||||||
|
def setup_method(self):
|
||||||
|
self.soup = BeautifulSoup(self.HTML, 'html.parser')
|
||||||
|
|
||||||
|
def assert_selects(self, selector, expected_ids, **kwargs):
|
||||||
|
el_ids = [el['id'] for el in self.soup.select(selector, **kwargs)]
|
||||||
|
el_ids.sort()
|
||||||
|
expected_ids.sort()
|
||||||
|
assert expected_ids == el_ids, "Selector %s, expected [%s], got [%s]" % (
|
||||||
|
selector, ', '.join(expected_ids), ', '.join(el_ids)
|
||||||
|
)
|
||||||
|
|
||||||
|
assertSelect = assert_selects
|
||||||
|
|
||||||
|
def assert_select_multiple(self, *tests):
|
||||||
|
for selector, expected_ids in tests:
|
||||||
|
self.assert_selects(selector, expected_ids)
|
||||||
|
|
||||||
|
def test_one_tag_one(self):
|
||||||
|
els = self.soup.select('title')
|
||||||
|
assert len(els) == 1
|
||||||
|
assert els[0].name == 'title'
|
||||||
|
assert els[0].contents == ['The title']
|
||||||
|
|
||||||
|
def test_one_tag_many(self):
|
||||||
|
els = self.soup.select('div')
|
||||||
|
assert len(els) == 4
|
||||||
|
for div in els:
|
||||||
|
assert div.name == 'div'
|
||||||
|
|
||||||
|
el = self.soup.select_one('div')
|
||||||
|
assert 'main' == el['id']
|
||||||
|
|
||||||
|
def test_select_one_returns_none_if_no_match(self):
|
||||||
|
match = self.soup.select_one('nonexistenttag')
|
||||||
|
assert None == match
|
||||||
|
|
||||||
|
|
||||||
|
def test_tag_in_tag_one(self):
|
||||||
|
els = self.soup.select('div div')
|
||||||
|
self.assert_selects('div div', ['inner', 'data1'])
|
||||||
|
|
||||||
|
def test_tag_in_tag_many(self):
|
||||||
|
for selector in ('html div', 'html body div', 'body div'):
|
||||||
|
self.assert_selects(selector, ['data1', 'main', 'inner', 'footer'])
|
||||||
|
|
||||||
|
|
||||||
|
def test_limit(self):
|
||||||
|
self.assert_selects('html div', ['main'], limit=1)
|
||||||
|
self.assert_selects('html body div', ['inner', 'main'], limit=2)
|
||||||
|
self.assert_selects('body div', ['data1', 'main', 'inner', 'footer'],
|
||||||
|
limit=10)
|
||||||
|
|
||||||
|
def test_tag_no_match(self):
|
||||||
|
assert len(self.soup.select('del')) == 0
|
||||||
|
|
||||||
|
def test_invalid_tag(self):
|
||||||
|
with pytest.raises(SelectorSyntaxError):
|
||||||
|
self.soup.select('tag%t')
|
||||||
|
|
||||||
|
def test_select_dashed_tag_ids(self):
|
||||||
|
self.assert_selects('custom-dashed-tag', ['dash1', 'dash2'])
|
||||||
|
|
||||||
|
def test_select_dashed_by_id(self):
|
||||||
|
dashed = self.soup.select('custom-dashed-tag[id=\"dash2\"]')
|
||||||
|
assert dashed[0].name == 'custom-dashed-tag'
|
||||||
|
assert dashed[0]['id'] == 'dash2'
|
||||||
|
|
||||||
|
def test_dashed_tag_text(self):
|
||||||
|
assert self.soup.select('body > custom-dashed-tag')[0].text == 'Hello there.'
|
||||||
|
|
||||||
|
def test_select_dashed_matches_find_all(self):
|
||||||
|
assert self.soup.select('custom-dashed-tag') == self.soup.find_all('custom-dashed-tag')
|
||||||
|
|
||||||
|
def test_header_tags(self):
|
||||||
|
self.assert_select_multiple(
|
||||||
|
('h1', ['header1']),
|
||||||
|
('h2', ['header2', 'header3']),
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_class_one(self):
|
||||||
|
for selector in ('.onep', 'p.onep', 'html p.onep'):
|
||||||
|
els = self.soup.select(selector)
|
||||||
|
assert len(els) == 1
|
||||||
|
assert els[0].name == 'p'
|
||||||
|
assert els[0]['class'] == ['onep']
|
||||||
|
|
||||||
|
def test_class_mismatched_tag(self):
|
||||||
|
els = self.soup.select('div.onep')
|
||||||
|
assert len(els) == 0
|
||||||
|
|
||||||
|
def test_one_id(self):
|
||||||
|
for selector in ('div#inner', '#inner', 'div div#inner'):
|
||||||
|
self.assert_selects(selector, ['inner'])
|
||||||
|
|
||||||
|
def test_bad_id(self):
|
||||||
|
els = self.soup.select('#doesnotexist')
|
||||||
|
assert len(els) == 0
|
||||||
|
|
||||||
|
def test_items_in_id(self):
|
||||||
|
els = self.soup.select('div#inner p')
|
||||||
|
assert len(els) == 3
|
||||||
|
for el in els:
|
||||||
|
assert el.name == 'p'
|
||||||
|
assert els[1]['class'] == ['onep']
|
||||||
|
assert not els[0].has_attr('class')
|
||||||
|
|
||||||
|
def test_a_bunch_of_emptys(self):
|
||||||
|
for selector in ('div#main del', 'div#main div.oops', 'div div#main'):
|
||||||
|
assert len(self.soup.select(selector)) == 0
|
||||||
|
|
||||||
|
def test_multi_class_support(self):
|
||||||
|
for selector in ('.class1', 'p.class1', '.class2', 'p.class2',
|
||||||
|
'.class3', 'p.class3', 'html p.class2', 'div#inner .class2'):
|
||||||
|
self.assert_selects(selector, ['pmulti'])
|
||||||
|
|
||||||
|
def test_multi_class_selection(self):
|
||||||
|
for selector in ('.class1.class3', '.class3.class2',
|
||||||
|
'.class1.class2.class3'):
|
||||||
|
self.assert_selects(selector, ['pmulti'])
|
||||||
|
|
||||||
|
def test_child_selector(self):
|
||||||
|
self.assert_selects('.s1 > a', ['s1a1', 's1a2'])
|
||||||
|
self.assert_selects('.s1 > a span', ['s1a2s1'])
|
||||||
|
|
||||||
|
def test_child_selector_id(self):
|
||||||
|
self.assert_selects('.s1 > a#s1a2 span', ['s1a2s1'])
|
||||||
|
|
||||||
|
def test_attribute_equals(self):
|
||||||
|
self.assert_select_multiple(
|
||||||
|
('p[class="onep"]', ['p1']),
|
||||||
|
('p[id="p1"]', ['p1']),
|
||||||
|
('[class="onep"]', ['p1']),
|
||||||
|
('[id="p1"]', ['p1']),
|
||||||
|
('link[rel="stylesheet"]', ['l1']),
|
||||||
|
('link[type="text/css"]', ['l1']),
|
||||||
|
('link[href="blah.css"]', ['l1']),
|
||||||
|
('link[href="no-blah.css"]', []),
|
||||||
|
('[rel="stylesheet"]', ['l1']),
|
||||||
|
('[type="text/css"]', ['l1']),
|
||||||
|
('[href="blah.css"]', ['l1']),
|
||||||
|
('[href="no-blah.css"]', []),
|
||||||
|
('p[href="no-blah.css"]', []),
|
||||||
|
('[href="no-blah.css"]', []),
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_attribute_tilde(self):
|
||||||
|
self.assert_select_multiple(
|
||||||
|
('p[class~="class1"]', ['pmulti']),
|
||||||
|
('p[class~="class2"]', ['pmulti']),
|
||||||
|
('p[class~="class3"]', ['pmulti']),
|
||||||
|
('[class~="class1"]', ['pmulti']),
|
||||||
|
('[class~="class2"]', ['pmulti']),
|
||||||
|
('[class~="class3"]', ['pmulti']),
|
||||||
|
('a[rel~="friend"]', ['bob']),
|
||||||
|
('a[rel~="met"]', ['bob']),
|
||||||
|
('[rel~="friend"]', ['bob']),
|
||||||
|
('[rel~="met"]', ['bob']),
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_attribute_startswith(self):
|
||||||
|
self.assert_select_multiple(
|
||||||
|
('[rel^="style"]', ['l1']),
|
||||||
|
('link[rel^="style"]', ['l1']),
|
||||||
|
('notlink[rel^="notstyle"]', []),
|
||||||
|
('[rel^="notstyle"]', []),
|
||||||
|
('link[rel^="notstyle"]', []),
|
||||||
|
('link[href^="bla"]', ['l1']),
|
||||||
|
('a[href^="http://"]', ['bob', 'me']),
|
||||||
|
('[href^="http://"]', ['bob', 'me']),
|
||||||
|
('[id^="p"]', ['pmulti', 'p1']),
|
||||||
|
('[id^="m"]', ['me', 'main']),
|
||||||
|
('div[id^="m"]', ['main']),
|
||||||
|
('a[id^="m"]', ['me']),
|
||||||
|
('div[data-tag^="dashed"]', ['data1'])
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_attribute_endswith(self):
|
||||||
|
self.assert_select_multiple(
|
||||||
|
('[href$=".css"]', ['l1']),
|
||||||
|
('link[href$=".css"]', ['l1']),
|
||||||
|
('link[id$="1"]', ['l1']),
|
||||||
|
('[id$="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1', 'dash1']),
|
||||||
|
('div[id$="1"]', ['data1']),
|
||||||
|
('[id$="noending"]', []),
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_attribute_contains(self):
|
||||||
|
self.assert_select_multiple(
|
||||||
|
# From test_attribute_startswith
|
||||||
|
('[rel*="style"]', ['l1']),
|
||||||
|
('link[rel*="style"]', ['l1']),
|
||||||
|
('notlink[rel*="notstyle"]', []),
|
||||||
|
('[rel*="notstyle"]', []),
|
||||||
|
('link[rel*="notstyle"]', []),
|
||||||
|
('link[href*="bla"]', ['l1']),
|
||||||
|
('[href*="http://"]', ['bob', 'me']),
|
||||||
|
('[id*="p"]', ['pmulti', 'p1']),
|
||||||
|
('div[id*="m"]', ['main']),
|
||||||
|
('a[id*="m"]', ['me']),
|
||||||
|
# From test_attribute_endswith
|
||||||
|
('[href*=".css"]', ['l1']),
|
||||||
|
('link[href*=".css"]', ['l1']),
|
||||||
|
('link[id*="1"]', ['l1']),
|
||||||
|
('[id*="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1', 'dash1']),
|
||||||
|
('div[id*="1"]', ['data1']),
|
||||||
|
('[id*="noending"]', []),
|
||||||
|
# New for this test
|
||||||
|
('[href*="."]', ['bob', 'me', 'l1']),
|
||||||
|
('a[href*="."]', ['bob', 'me']),
|
||||||
|
('link[href*="."]', ['l1']),
|
||||||
|
('div[id*="n"]', ['main', 'inner']),
|
||||||
|
('div[id*="nn"]', ['inner']),
|
||||||
|
('div[data-tag*="edval"]', ['data1'])
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_attribute_exact_or_hypen(self):
|
||||||
|
self.assert_select_multiple(
|
||||||
|
('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
|
||||||
|
('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
|
||||||
|
('p[lang|="fr"]', ['lang-fr']),
|
||||||
|
('p[lang|="gb"]', []),
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_attribute_exists(self):
|
||||||
|
self.assert_select_multiple(
|
||||||
|
('[rel]', ['l1', 'bob', 'me']),
|
||||||
|
('link[rel]', ['l1']),
|
||||||
|
('a[rel]', ['bob', 'me']),
|
||||||
|
('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']),
|
||||||
|
('p[class]', ['p1', 'pmulti']),
|
||||||
|
('[blah]', []),
|
||||||
|
('p[blah]', []),
|
||||||
|
('div[data-tag]', ['data1'])
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_quoted_space_in_selector_name(self):
|
||||||
|
html = """<div style="display: wrong">nope</div>
|
||||||
|
<div style="display: right">yes</div>
|
||||||
|
"""
|
||||||
|
soup = BeautifulSoup(html, 'html.parser')
|
||||||
|
[chosen] = soup.select('div[style="display: right"]')
|
||||||
|
assert "yes" == chosen.string
|
||||||
|
|
||||||
|
def test_unsupported_pseudoclass(self):
|
||||||
|
with pytest.raises(NotImplementedError):
|
||||||
|
self.soup.select("a:no-such-pseudoclass")
|
||||||
|
|
||||||
|
with pytest.raises(SelectorSyntaxError):
|
||||||
|
self.soup.select("a:nth-of-type(a)")
|
||||||
|
|
||||||
|
def test_nth_of_type(self):
|
||||||
|
# Try to select first paragraph
|
||||||
|
els = self.soup.select('div#inner p:nth-of-type(1)')
|
||||||
|
assert len(els) == 1
|
||||||
|
assert els[0].string == 'Some text'
|
||||||
|
|
||||||
|
# Try to select third paragraph
|
||||||
|
els = self.soup.select('div#inner p:nth-of-type(3)')
|
||||||
|
assert len(els) == 1
|
||||||
|
assert els[0].string == 'Another'
|
||||||
|
|
||||||
|
# Try to select (non-existent!) fourth paragraph
|
||||||
|
els = self.soup.select('div#inner p:nth-of-type(4)')
|
||||||
|
assert len(els) == 0
|
||||||
|
|
||||||
|
# Zero will select no tags.
|
||||||
|
els = self.soup.select('div p:nth-of-type(0)')
|
||||||
|
assert len(els) == 0
|
||||||
|
|
||||||
|
def test_nth_of_type_direct_descendant(self):
|
||||||
|
els = self.soup.select('div#inner > p:nth-of-type(1)')
|
||||||
|
assert len(els) == 1
|
||||||
|
assert els[0].string == 'Some text'
|
||||||
|
|
||||||
|
def test_id_child_selector_nth_of_type(self):
|
||||||
|
self.assert_selects('#inner > p:nth-of-type(2)', ['p1'])
|
||||||
|
|
||||||
|
def test_select_on_element(self):
|
||||||
|
# Other tests operate on the tree; this operates on an element
|
||||||
|
# within the tree.
|
||||||
|
inner = self.soup.find("div", id="main")
|
||||||
|
selected = inner.select("div")
|
||||||
|
# The <div id="inner"> tag was selected. The <div id="footer">
|
||||||
|
# tag was not.
|
||||||
|
self.assert_selects_ids(selected, ['inner', 'data1'])
|
||||||
|
|
||||||
|
def test_overspecified_child_id(self):
|
||||||
|
self.assert_selects(".fancy #inner", ['inner'])
|
||||||
|
self.assert_selects(".normal #inner", [])
|
||||||
|
|
||||||
|
def test_adjacent_sibling_selector(self):
|
||||||
|
self.assert_selects('#p1 + h2', ['header2'])
|
||||||
|
self.assert_selects('#p1 + h2 + p', ['pmulti'])
|
||||||
|
self.assert_selects('#p1 + #header2 + .class1', ['pmulti'])
|
||||||
|
assert [] == self.soup.select('#p1 + p')
|
||||||
|
|
||||||
|
def test_general_sibling_selector(self):
|
||||||
|
self.assert_selects('#p1 ~ h2', ['header2', 'header3'])
|
||||||
|
self.assert_selects('#p1 ~ #header2', ['header2'])
|
||||||
|
self.assert_selects('#p1 ~ h2 + a', ['me'])
|
||||||
|
self.assert_selects('#p1 ~ h2 + [rel="me"]', ['me'])
|
||||||
|
assert [] == self.soup.select('#inner ~ h2')
|
||||||
|
|
||||||
|
def test_dangling_combinator(self):
|
||||||
|
with pytest.raises(SelectorSyntaxError):
|
||||||
|
self.soup.select('h1 >')
|
||||||
|
|
||||||
|
def test_sibling_combinator_wont_select_same_tag_twice(self):
|
||||||
|
self.assert_selects('p[lang] ~ p', ['lang-en-gb', 'lang-en-us', 'lang-fr'])
|
||||||
|
|
||||||
|
# Test the selector grouping operator (the comma)
|
||||||
|
def test_multiple_select(self):
|
||||||
|
self.assert_selects('x, y', ['xid', 'yid'])
|
||||||
|
|
||||||
|
def test_multiple_select_with_no_space(self):
|
||||||
|
self.assert_selects('x,y', ['xid', 'yid'])
|
||||||
|
|
||||||
|
def test_multiple_select_with_more_space(self):
|
||||||
|
self.assert_selects('x, y', ['xid', 'yid'])
|
||||||
|
|
||||||
|
def test_multiple_select_duplicated(self):
|
||||||
|
self.assert_selects('x, x', ['xid'])
|
||||||
|
|
||||||
|
def test_multiple_select_sibling(self):
|
||||||
|
self.assert_selects('x, y ~ p[lang=fr]', ['xid', 'lang-fr'])
|
||||||
|
|
||||||
|
def test_multiple_select_tag_and_direct_descendant(self):
|
||||||
|
self.assert_selects('x, y > z', ['xid', 'zidb'])
|
||||||
|
|
||||||
|
def test_multiple_select_direct_descendant_and_tags(self):
|
||||||
|
self.assert_selects('div > x, y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
|
||||||
|
|
||||||
|
def test_multiple_select_indirect_descendant(self):
|
||||||
|
self.assert_selects('div x,y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
|
||||||
|
|
||||||
|
def test_invalid_multiple_select(self):
|
||||||
|
with pytest.raises(SelectorSyntaxError):
|
||||||
|
self.soup.select(',x, y')
|
||||||
|
with pytest.raises(SelectorSyntaxError):
|
||||||
|
self.soup.select('x,,y')
|
||||||
|
|
||||||
|
def test_multiple_select_attrs(self):
|
||||||
|
self.assert_selects('p[lang=en], p[lang=en-gb]', ['lang-en', 'lang-en-gb'])
|
||||||
|
|
||||||
|
def test_multiple_select_ids(self):
|
||||||
|
self.assert_selects('x, y > z[id=zida], z[id=zidab], z[id=zidb]', ['xid', 'zidb', 'zidab'])
|
||||||
|
|
||||||
|
def test_multiple_select_nested(self):
|
||||||
|
self.assert_selects('body > div > x, y > z', ['xid', 'zidb'])
|
||||||
|
|
||||||
|
def test_select_duplicate_elements(self):
|
||||||
|
# When markup contains duplicate elements, a multiple select
|
||||||
|
# will find all of them.
|
||||||
|
markup = '<div class="c1"/><div class="c2"/><div class="c1"/>'
|
||||||
|
soup = BeautifulSoup(markup, 'html.parser')
|
||||||
|
selected = soup.select(".c1, .c2")
|
||||||
|
assert 3 == len(selected)
|
||||||
|
|
||||||
|
# Verify that find_all finds the same elements, though because
|
||||||
|
# of an implementation detail it finds them in a different
|
||||||
|
# order.
|
||||||
|
for element in soup.find_all(class_=['c1', 'c2']):
|
||||||
|
assert element in selected
|
||||||
|
|
||||||
|
|
||||||
|
class TestPersistence(SoupTest):
|
||||||
|
"Testing features like pickle and deepcopy."
|
||||||
|
|
||||||
|
def setup_method(self):
|
||||||
|
self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
|
||||||
|
"http://www.w3.org/TR/REC-html40/transitional.dtd">
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
||||||
|
<title>Beautiful Soup: We called him Tortoise because he taught us.</title>
|
||||||
|
<link rev="made" href="mailto:leonardr@segfault.org">
|
||||||
|
<meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping.">
|
||||||
|
<meta name="generator" content="Markov Approximation 1.4 (module: leonardr)">
|
||||||
|
<meta name="author" content="Leonard Richardson">
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<a href="foo">foo</a>
|
||||||
|
<a href="foo"><b>bar</b></a>
|
||||||
|
</body>
|
||||||
|
</html>"""
|
||||||
|
self.tree = self.soup(self.page)
|
||||||
|
|
||||||
|
def test_pickle_and_unpickle_identity(self):
|
||||||
|
# Pickling a tree, then unpickling it, yields a tree identical
|
||||||
|
# to the original.
|
||||||
|
dumped = pickle.dumps(self.tree, 2)
|
||||||
|
loaded = pickle.loads(dumped)
|
||||||
|
assert loaded.__class__ == BeautifulSoup
|
||||||
|
assert loaded.decode() == self.tree.decode()
|
||||||
|
|
||||||
|
def test_deepcopy_identity(self):
|
||||||
|
# Making a deepcopy of a tree yields an identical tree.
|
||||||
|
copied = copy.deepcopy(self.tree)
|
||||||
|
assert copied.decode() == self.tree.decode()
|
||||||
|
|
||||||
|
def test_copy_preserves_encoding(self):
|
||||||
|
soup = BeautifulSoup(b'<p> </p>', 'html.parser')
|
||||||
|
encoding = soup.original_encoding
|
||||||
|
copy = soup.__copy__()
|
||||||
|
assert "<p> </p>" == str(copy)
|
||||||
|
assert encoding == copy.original_encoding
|
||||||
|
|
||||||
|
def test_copy_preserves_builder_information(self):
|
||||||
|
|
||||||
|
tag = self.soup('<p></p>').p
|
||||||
|
|
||||||
|
# Simulate a tag obtained from a source file.
|
||||||
|
tag.sourceline = 10
|
||||||
|
tag.sourcepos = 33
|
||||||
|
|
||||||
|
copied = tag.__copy__()
|
||||||
|
|
||||||
|
# The TreeBuilder object is no longer availble, but information
|
||||||
|
# obtained from it gets copied over to the new Tag object.
|
||||||
|
assert tag.sourceline == copied.sourceline
|
||||||
|
assert tag.sourcepos == copied.sourcepos
|
||||||
|
assert tag.can_be_empty_element == copied.can_be_empty_element
|
||||||
|
assert tag.cdata_list_attributes == copied.cdata_list_attributes
|
||||||
|
assert tag.preserve_whitespace_tags == copied.preserve_whitespace_tags
|
||||||
|
|
||||||
|
def test_unicode_pickle(self):
|
||||||
|
# A tree containing Unicode characters can be pickled.
|
||||||
|
html = "<b>\N{SNOWMAN}</b>"
|
||||||
|
soup = self.soup(html)
|
||||||
|
dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL)
|
||||||
|
loaded = pickle.loads(dumped)
|
||||||
|
assert loaded.decode() == soup.decode()
|
||||||
|
|
||||||
|
def test_copy_navigablestring_is_not_attached_to_tree(self):
|
||||||
|
html = "<b>Foo<a></a></b><b>Bar</b>"
|
||||||
|
soup = self.soup(html)
|
||||||
|
s1 = soup.find(string="Foo")
|
||||||
|
s2 = copy.copy(s1)
|
||||||
|
assert s1 == s2
|
||||||
|
assert None == s2.parent
|
||||||
|
assert None == s2.next_element
|
||||||
|
assert None != s1.next_sibling
|
||||||
|
assert None == s2.next_sibling
|
||||||
|
assert None == s2.previous_element
|
||||||
|
|
||||||
|
def test_copy_navigablestring_subclass_has_same_type(self):
|
||||||
|
html = "<b><!--Foo--></b>"
|
||||||
|
soup = self.soup(html)
|
||||||
|
s1 = soup.string
|
||||||
|
s2 = copy.copy(s1)
|
||||||
|
assert s1 == s2
|
||||||
|
assert isinstance(s2, Comment)
|
||||||
|
|
||||||
|
def test_copy_entire_soup(self):
|
||||||
|
html = "<div><b>Foo<a></a></b><b>Bar</b></div>end"
|
||||||
|
soup = self.soup(html)
|
||||||
|
soup_copy = copy.copy(soup)
|
||||||
|
assert soup == soup_copy
|
||||||
|
|
||||||
|
def test_copy_tag_copies_contents(self):
|
||||||
|
html = "<div><b>Foo<a></a></b><b>Bar</b></div>end"
|
||||||
|
soup = self.soup(html)
|
||||||
|
div = soup.div
|
||||||
|
div_copy = copy.copy(div)
|
||||||
|
|
||||||
|
# The two tags look the same, and evaluate to equal.
|
||||||
|
assert str(div) == str(div_copy)
|
||||||
|
assert div == div_copy
|
||||||
|
|
||||||
|
# But they're not the same object.
|
||||||
|
assert div is not div_copy
|
||||||
|
|
||||||
|
# And they don't have the same relation to the parse tree. The
|
||||||
|
# copy is not associated with a parse tree at all.
|
||||||
|
assert None == div_copy.parent
|
||||||
|
assert None == div_copy.previous_element
|
||||||
|
assert None == div_copy.find(string='Bar').next_element
|
||||||
|
assert None != div.find(string='Bar').next_element
|
||||||
|
|
|
@ -0,0 +1,462 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""Tests of Beautiful Soup as a whole."""
|
||||||
|
|
||||||
|
from pdb import set_trace
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import pickle
|
||||||
|
import pytest
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from bs4 import (
|
||||||
|
BeautifulSoup,
|
||||||
|
BeautifulStoneSoup,
|
||||||
|
GuessedAtParserWarning,
|
||||||
|
MarkupResemblesLocatorWarning,
|
||||||
|
dammit,
|
||||||
|
)
|
||||||
|
from bs4.builder import (
|
||||||
|
builder_registry,
|
||||||
|
TreeBuilder,
|
||||||
|
ParserRejectedMarkup,
|
||||||
|
)
|
||||||
|
from bs4.element import (
|
||||||
|
Comment,
|
||||||
|
SoupStrainer,
|
||||||
|
Tag,
|
||||||
|
NavigableString,
|
||||||
|
)
|
||||||
|
|
||||||
|
from . import (
|
||||||
|
default_builder,
|
||||||
|
SoupTest,
|
||||||
|
skipIf,
|
||||||
|
)
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
try:
|
||||||
|
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
|
||||||
|
LXML_PRESENT = True
|
||||||
|
except ImportError as e:
|
||||||
|
LXML_PRESENT = False
|
||||||
|
|
||||||
|
PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2))
|
||||||
|
|
||||||
|
class TestConstructor(SoupTest):
|
||||||
|
|
||||||
|
def test_short_unicode_input(self):
|
||||||
|
data = "<h1>éé</h1>"
|
||||||
|
soup = self.soup(data)
|
||||||
|
assert "éé" == soup.h1.string
|
||||||
|
|
||||||
|
def test_embedded_null(self):
|
||||||
|
data = "<h1>foo\0bar</h1>"
|
||||||
|
soup = self.soup(data)
|
||||||
|
assert "foo\0bar" == soup.h1.string
|
||||||
|
|
||||||
|
def test_exclude_encodings(self):
|
||||||
|
utf8_data = "Räksmörgås".encode("utf-8")
|
||||||
|
soup = self.soup(utf8_data, exclude_encodings=["utf-8"])
|
||||||
|
assert "windows-1252" == soup.original_encoding
|
||||||
|
|
||||||
|
def test_custom_builder_class(self):
|
||||||
|
# Verify that you can pass in a custom Builder class and
|
||||||
|
# it'll be instantiated with the appropriate keyword arguments.
|
||||||
|
class Mock(object):
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.called_with = kwargs
|
||||||
|
self.is_xml = True
|
||||||
|
self.store_line_numbers = False
|
||||||
|
self.cdata_list_attributes = []
|
||||||
|
self.preserve_whitespace_tags = []
|
||||||
|
self.string_containers = {}
|
||||||
|
def initialize_soup(self, soup):
|
||||||
|
pass
|
||||||
|
def feed(self, markup):
|
||||||
|
self.fed = markup
|
||||||
|
def reset(self):
|
||||||
|
pass
|
||||||
|
def ignore(self, ignore):
|
||||||
|
pass
|
||||||
|
set_up_substitutions = can_be_empty_element = ignore
|
||||||
|
def prepare_markup(self, *args, **kwargs):
|
||||||
|
yield "prepared markup", "original encoding", "declared encoding", "contains replacement characters"
|
||||||
|
|
||||||
|
kwargs = dict(
|
||||||
|
var="value",
|
||||||
|
# This is a deprecated BS3-era keyword argument, which
|
||||||
|
# will be stripped out.
|
||||||
|
convertEntities=True,
|
||||||
|
)
|
||||||
|
with warnings.catch_warnings(record=True):
|
||||||
|
soup = BeautifulSoup('', builder=Mock, **kwargs)
|
||||||
|
assert isinstance(soup.builder, Mock)
|
||||||
|
assert dict(var="value") == soup.builder.called_with
|
||||||
|
assert "prepared markup" == soup.builder.fed
|
||||||
|
|
||||||
|
# You can also instantiate the TreeBuilder yourself. In this
|
||||||
|
# case, that specific object is used and any keyword arguments
|
||||||
|
# to the BeautifulSoup constructor are ignored.
|
||||||
|
builder = Mock(**kwargs)
|
||||||
|
with warnings.catch_warnings(record=True) as w:
|
||||||
|
soup = BeautifulSoup(
|
||||||
|
'', builder=builder, ignored_value=True,
|
||||||
|
)
|
||||||
|
msg = str(w[0].message)
|
||||||
|
assert msg.startswith("Keyword arguments to the BeautifulSoup constructor will be ignored.")
|
||||||
|
assert builder == soup.builder
|
||||||
|
assert kwargs == builder.called_with
|
||||||
|
|
||||||
|
def test_parser_markup_rejection(self):
|
||||||
|
# If markup is completely rejected by the parser, an
|
||||||
|
# explanatory ParserRejectedMarkup exception is raised.
|
||||||
|
class Mock(TreeBuilder):
|
||||||
|
def feed(self, *args, **kwargs):
|
||||||
|
raise ParserRejectedMarkup("Nope.")
|
||||||
|
|
||||||
|
def prepare_markup(self, *args, **kwargs):
|
||||||
|
# We're going to try two different ways of preparing this markup,
|
||||||
|
# but feed() will reject both of them.
|
||||||
|
yield markup, None, None, False
|
||||||
|
yield markup, None, None, False
|
||||||
|
|
||||||
|
|
||||||
|
import re
|
||||||
|
with pytest.raises(ParserRejectedMarkup) as exc_info:
|
||||||
|
BeautifulSoup('', builder=Mock)
|
||||||
|
assert "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help." in str(exc_info.value)
|
||||||
|
|
||||||
|
def test_cdata_list_attributes(self):
|
||||||
|
# Most attribute values are represented as scalars, but the
|
||||||
|
# HTML standard says that some attributes, like 'class' have
|
||||||
|
# space-separated lists as values.
|
||||||
|
markup = '<a id=" an id " class=" a class "></a>'
|
||||||
|
soup = self.soup(markup)
|
||||||
|
|
||||||
|
# Note that the spaces are stripped for 'class' but not for 'id'.
|
||||||
|
a = soup.a
|
||||||
|
assert " an id " == a['id']
|
||||||
|
assert ["a", "class"] == a['class']
|
||||||
|
|
||||||
|
# TreeBuilder takes an argument called 'mutli_valued_attributes' which lets
|
||||||
|
# you customize or disable this. As always, you can customize the TreeBuilder
|
||||||
|
# by passing in a keyword argument to the BeautifulSoup constructor.
|
||||||
|
soup = self.soup(markup, builder=default_builder, multi_valued_attributes=None)
|
||||||
|
assert " a class " == soup.a['class']
|
||||||
|
|
||||||
|
# Here are two ways of saying that `id` is a multi-valued
|
||||||
|
# attribute in this context, but 'class' is not.
|
||||||
|
for switcheroo in ({'*': 'id'}, {'a': 'id'}):
|
||||||
|
with warnings.catch_warnings(record=True) as w:
|
||||||
|
# This will create a warning about not explicitly
|
||||||
|
# specifying a parser, but we'll ignore it.
|
||||||
|
soup = self.soup(markup, builder=None, multi_valued_attributes=switcheroo)
|
||||||
|
a = soup.a
|
||||||
|
assert ["an", "id"] == a['id']
|
||||||
|
assert " a class " == a['class']
|
||||||
|
|
||||||
|
def test_replacement_classes(self):
|
||||||
|
# Test the ability to pass in replacements for element classes
|
||||||
|
# which will be used when building the tree.
|
||||||
|
class TagPlus(Tag):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class StringPlus(NavigableString):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class CommentPlus(Comment):
|
||||||
|
pass
|
||||||
|
|
||||||
|
soup = self.soup(
|
||||||
|
"<a><b>foo</b>bar</a><!--whee-->",
|
||||||
|
element_classes = {
|
||||||
|
Tag: TagPlus,
|
||||||
|
NavigableString: StringPlus,
|
||||||
|
Comment: CommentPlus,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# The tree was built with TagPlus, StringPlus, and CommentPlus objects,
|
||||||
|
# rather than Tag, String, and Comment objects.
|
||||||
|
assert all(
|
||||||
|
isinstance(x, (TagPlus, StringPlus, CommentPlus))
|
||||||
|
for x in soup.recursiveChildGenerator()
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_alternate_string_containers(self):
|
||||||
|
# Test the ability to customize the string containers for
|
||||||
|
# different types of tags.
|
||||||
|
class PString(NavigableString):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class BString(NavigableString):
|
||||||
|
pass
|
||||||
|
|
||||||
|
soup = self.soup(
|
||||||
|
"<div>Hello.<p>Here is <b>some <i>bolded</i></b> text",
|
||||||
|
string_containers = {
|
||||||
|
'b': BString,
|
||||||
|
'p': PString,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# The string before the <p> tag is a regular NavigableString.
|
||||||
|
assert isinstance(soup.div.contents[0], NavigableString)
|
||||||
|
|
||||||
|
# The string inside the <p> tag, but not inside the <i> tag,
|
||||||
|
# is a PString.
|
||||||
|
assert isinstance(soup.p.contents[0], PString)
|
||||||
|
|
||||||
|
# Every string inside the <b> tag is a BString, even the one that
|
||||||
|
# was also inside an <i> tag.
|
||||||
|
for s in soup.b.strings:
|
||||||
|
assert isinstance(s, BString)
|
||||||
|
|
||||||
|
# Now that parsing was complete, the string_container_stack
|
||||||
|
# (where this information was kept) has been cleared out.
|
||||||
|
assert [] == soup.string_container_stack
|
||||||
|
|
||||||
|
|
||||||
|
class TestWarnings(SoupTest):
|
||||||
|
|
||||||
|
def _assert_warning(self, warnings, cls):
|
||||||
|
for w in warnings:
|
||||||
|
if isinstance(w.message, cls):
|
||||||
|
return w
|
||||||
|
raise Exception("%s warning not found in %r" % (cls, warnings))
|
||||||
|
|
||||||
|
def _assert_no_parser_specified(self, w):
|
||||||
|
warning = self._assert_warning(w, GuessedAtParserWarning)
|
||||||
|
message = str(warning.message)
|
||||||
|
assert message.startswith(BeautifulSoup.NO_PARSER_SPECIFIED_WARNING[:60])
|
||||||
|
|
||||||
|
def test_warning_if_no_parser_specified(self):
|
||||||
|
with warnings.catch_warnings(record=True) as w:
|
||||||
|
soup = BeautifulSoup("<a><b></b></a>")
|
||||||
|
self._assert_no_parser_specified(w)
|
||||||
|
|
||||||
|
def test_warning_if_parser_specified_too_vague(self):
|
||||||
|
with warnings.catch_warnings(record=True) as w:
|
||||||
|
soup = BeautifulSoup("<a><b></b></a>", "html")
|
||||||
|
self._assert_no_parser_specified(w)
|
||||||
|
|
||||||
|
def test_no_warning_if_explicit_parser_specified(self):
|
||||||
|
with warnings.catch_warnings(record=True) as w:
|
||||||
|
soup = BeautifulSoup("<a><b></b></a>", "html.parser")
|
||||||
|
assert [] == w
|
||||||
|
|
||||||
|
def test_parseOnlyThese_renamed_to_parse_only(self):
|
||||||
|
with warnings.catch_warnings(record=True) as w:
|
||||||
|
soup = self.soup("<a><b></b></a>", parseOnlyThese=SoupStrainer("b"))
|
||||||
|
msg = str(w[0].message)
|
||||||
|
assert "parseOnlyThese" in msg
|
||||||
|
assert "parse_only" in msg
|
||||||
|
assert b"<b></b>" == soup.encode()
|
||||||
|
|
||||||
|
def test_fromEncoding_renamed_to_from_encoding(self):
|
||||||
|
with warnings.catch_warnings(record=True) as w:
|
||||||
|
utf8 = b"\xc3\xa9"
|
||||||
|
soup = self.soup(utf8, fromEncoding="utf8")
|
||||||
|
msg = str(w[0].message)
|
||||||
|
assert "fromEncoding" in msg
|
||||||
|
assert "from_encoding" in msg
|
||||||
|
assert "utf8" == soup.original_encoding
|
||||||
|
|
||||||
|
def test_unrecognized_keyword_argument(self):
|
||||||
|
with pytest.raises(TypeError):
|
||||||
|
self.soup("<a>", no_such_argument=True)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"extension",
|
||||||
|
['markup.html', 'markup.htm', 'markup.HTML', 'markup.txt',
|
||||||
|
'markup.xhtml', 'markup.xml', "/home/user/file", "c:\\user\file"]
|
||||||
|
)
|
||||||
|
def test_resembles_filename_warning(self, extension):
|
||||||
|
# A warning is issued if the "markup" looks like the name of
|
||||||
|
# an HTML or text file, or a full path to a file on disk.
|
||||||
|
with warnings.catch_warnings(record=True) as w:
|
||||||
|
soup = self.soup("markup" + extension)
|
||||||
|
warning = self._assert_warning(w, MarkupResemblesLocatorWarning)
|
||||||
|
assert "looks more like a filename" in str(warning.message)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"extension",
|
||||||
|
['markuphtml', 'markup.com', '', 'markup.js']
|
||||||
|
)
|
||||||
|
def test_resembles_filename_no_warning(self, extension):
|
||||||
|
# The 'looks more like a filename' warning is not issued if
|
||||||
|
# the markup looks like a bare string, a domain name, or a
|
||||||
|
# file that's not an HTML file.
|
||||||
|
with warnings.catch_warnings(record=True) as w:
|
||||||
|
soup = self.soup("markup" + extension)
|
||||||
|
assert [] == w
|
||||||
|
|
||||||
|
def test_url_warning_with_bytes_url(self):
|
||||||
|
url = b"http://www.crummybytes.com/"
|
||||||
|
with warnings.catch_warnings(record=True) as warning_list:
|
||||||
|
soup = self.soup(url)
|
||||||
|
warning = self._assert_warning(
|
||||||
|
warning_list, MarkupResemblesLocatorWarning
|
||||||
|
)
|
||||||
|
assert "looks more like a URL" in str(warning.message)
|
||||||
|
assert url not in str(warning.message).encode("utf8")
|
||||||
|
|
||||||
|
def test_url_warning_with_unicode_url(self):
|
||||||
|
url = "http://www.crummyunicode.com/"
|
||||||
|
with warnings.catch_warnings(record=True) as warning_list:
|
||||||
|
# note - this url must differ from the bytes one otherwise
|
||||||
|
# python's warnings system swallows the second warning
|
||||||
|
soup = self.soup(url)
|
||||||
|
warning = self._assert_warning(
|
||||||
|
warning_list, MarkupResemblesLocatorWarning
|
||||||
|
)
|
||||||
|
assert "looks more like a URL" in str(warning.message)
|
||||||
|
assert url not in str(warning.message)
|
||||||
|
|
||||||
|
def test_url_warning_with_bytes_and_space(self):
|
||||||
|
# Here the markup contains something besides a URL, so no warning
|
||||||
|
# is issued.
|
||||||
|
with warnings.catch_warnings(record=True) as warning_list:
|
||||||
|
soup = self.soup(b"http://www.crummybytes.com/ is great")
|
||||||
|
assert not any("looks more like a URL" in str(w.message)
|
||||||
|
for w in warning_list)
|
||||||
|
|
||||||
|
def test_url_warning_with_unicode_and_space(self):
|
||||||
|
with warnings.catch_warnings(record=True) as warning_list:
|
||||||
|
soup = self.soup("http://www.crummyunicode.com/ is great")
|
||||||
|
assert not any("looks more like a URL" in str(w.message)
|
||||||
|
for w in warning_list)
|
||||||
|
|
||||||
|
|
||||||
|
class TestSelectiveParsing(SoupTest):
|
||||||
|
|
||||||
|
def test_parse_with_soupstrainer(self):
|
||||||
|
markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>"
|
||||||
|
strainer = SoupStrainer("b")
|
||||||
|
soup = self.soup(markup, parse_only=strainer)
|
||||||
|
assert soup.encode() == b"<b>Yes</b><b>Yes <c>Yes</c></b>"
|
||||||
|
|
||||||
|
|
||||||
|
class TestNewTag(SoupTest):
|
||||||
|
"""Test the BeautifulSoup.new_tag() method."""
|
||||||
|
def test_new_tag(self):
|
||||||
|
soup = self.soup("")
|
||||||
|
new_tag = soup.new_tag("foo", bar="baz", attrs={"name": "a name"})
|
||||||
|
assert isinstance(new_tag, Tag)
|
||||||
|
assert "foo" == new_tag.name
|
||||||
|
assert dict(bar="baz", name="a name") == new_tag.attrs
|
||||||
|
assert None == new_tag.parent
|
||||||
|
|
||||||
|
def test_tag_inherits_self_closing_rules_from_builder(self):
|
||||||
|
if LXML_PRESENT:
|
||||||
|
xml_soup = BeautifulSoup("", "lxml-xml")
|
||||||
|
xml_br = xml_soup.new_tag("br")
|
||||||
|
xml_p = xml_soup.new_tag("p")
|
||||||
|
|
||||||
|
# Both the <br> and <p> tag are empty-element, just because
|
||||||
|
# they have no contents.
|
||||||
|
assert b"<br/>" == xml_br.encode()
|
||||||
|
assert b"<p/>" == xml_p.encode()
|
||||||
|
|
||||||
|
html_soup = BeautifulSoup("", "html.parser")
|
||||||
|
html_br = html_soup.new_tag("br")
|
||||||
|
html_p = html_soup.new_tag("p")
|
||||||
|
|
||||||
|
# The HTML builder users HTML's rules about which tags are
|
||||||
|
# empty-element tags, and the new tags reflect these rules.
|
||||||
|
assert b"<br/>" == html_br.encode()
|
||||||
|
assert b"<p></p>" == html_p.encode()
|
||||||
|
|
||||||
|
class TestNewString(SoupTest):
|
||||||
|
"""Test the BeautifulSoup.new_string() method."""
|
||||||
|
def test_new_string_creates_navigablestring(self):
|
||||||
|
soup = self.soup("")
|
||||||
|
s = soup.new_string("foo")
|
||||||
|
assert "foo" == s
|
||||||
|
assert isinstance(s, NavigableString)
|
||||||
|
|
||||||
|
def test_new_string_can_create_navigablestring_subclass(self):
|
||||||
|
soup = self.soup("")
|
||||||
|
s = soup.new_string("foo", Comment)
|
||||||
|
assert "foo" == s
|
||||||
|
assert isinstance(s, Comment)
|
||||||
|
|
||||||
|
|
||||||
|
class TestPickle(SoupTest):
|
||||||
|
# Test our ability to pickle the BeautifulSoup object itself.
|
||||||
|
|
||||||
|
def test_normal_pickle(self):
|
||||||
|
soup = self.soup("<a>some markup</a>")
|
||||||
|
pickled = pickle.dumps(soup)
|
||||||
|
unpickled = pickle.loads(pickled)
|
||||||
|
assert "some markup" == unpickled.a.string
|
||||||
|
|
||||||
|
def test_pickle_with_no_builder(self):
|
||||||
|
# We had a bug that prevented pickling from working if
|
||||||
|
# the builder wasn't set.
|
||||||
|
soup = self.soup("some markup")
|
||||||
|
soup.builder = None
|
||||||
|
pickled = pickle.dumps(soup)
|
||||||
|
unpickled = pickle.loads(pickled)
|
||||||
|
assert "some markup" == unpickled.string
|
||||||
|
|
||||||
|
class TestEncodingConversion(SoupTest):
|
||||||
|
# Test Beautiful Soup's ability to decode and encode from various
|
||||||
|
# encodings.
|
||||||
|
|
||||||
|
def setup_method(self):
|
||||||
|
self.unicode_data = '<html><head><meta charset="utf-8"/></head><body><foo>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</foo></body></html>'
|
||||||
|
self.utf8_data = self.unicode_data.encode("utf-8")
|
||||||
|
# Just so you know what it looks like.
|
||||||
|
assert self.utf8_data == b'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\xc3\xa9 bleu!</foo></body></html>'
|
||||||
|
|
||||||
|
def test_ascii_in_unicode_out(self):
|
||||||
|
# ASCII input is converted to Unicode. The original_encoding
|
||||||
|
# attribute is set to 'utf-8', a superset of ASCII.
|
||||||
|
chardet = dammit.chardet_dammit
|
||||||
|
logging.disable(logging.WARNING)
|
||||||
|
try:
|
||||||
|
def noop(str):
|
||||||
|
return None
|
||||||
|
# Disable chardet, which will realize that the ASCII is ASCII.
|
||||||
|
dammit.chardet_dammit = noop
|
||||||
|
ascii = b"<foo>a</foo>"
|
||||||
|
soup_from_ascii = self.soup(ascii)
|
||||||
|
unicode_output = soup_from_ascii.decode()
|
||||||
|
assert isinstance(unicode_output, str)
|
||||||
|
assert unicode_output == self.document_for(ascii.decode())
|
||||||
|
assert soup_from_ascii.original_encoding.lower() == "utf-8"
|
||||||
|
finally:
|
||||||
|
logging.disable(logging.NOTSET)
|
||||||
|
dammit.chardet_dammit = chardet
|
||||||
|
|
||||||
|
def test_unicode_in_unicode_out(self):
|
||||||
|
# Unicode input is left alone. The original_encoding attribute
|
||||||
|
# is not set.
|
||||||
|
soup_from_unicode = self.soup(self.unicode_data)
|
||||||
|
assert soup_from_unicode.decode() == self.unicode_data
|
||||||
|
assert soup_from_unicode.foo.string == 'Sacr\xe9 bleu!'
|
||||||
|
assert soup_from_unicode.original_encoding == None
|
||||||
|
|
||||||
|
def test_utf8_in_unicode_out(self):
|
||||||
|
# UTF-8 input is converted to Unicode. The original_encoding
|
||||||
|
# attribute is set.
|
||||||
|
soup_from_utf8 = self.soup(self.utf8_data)
|
||||||
|
assert soup_from_utf8.decode() == self.unicode_data
|
||||||
|
assert soup_from_utf8.foo.string == 'Sacr\xe9 bleu!'
|
||||||
|
|
||||||
|
def test_utf8_out(self):
|
||||||
|
# The internal data structures can be encoded as UTF-8.
|
||||||
|
soup_from_unicode = self.soup(self.unicode_data)
|
||||||
|
assert soup_from_unicode.encode('utf-8') == self.utf8_data
|
||||||
|
|
||||||
|
@skipIf(
|
||||||
|
PYTHON_3_PRE_3_2,
|
||||||
|
"Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.")
|
||||||
|
def test_attribute_name_containing_unicode_characters(self):
|
||||||
|
markup = '<div><a \N{SNOWMAN}="snowman"></a></div>'
|
||||||
|
assert self.soup(markup).div.encode("utf8") == markup.encode("utf8")
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,221 @@
|
||||||
|
import warnings
|
||||||
|
from bs4.element import (
|
||||||
|
Comment,
|
||||||
|
NavigableString,
|
||||||
|
)
|
||||||
|
from . import SoupTest
|
||||||
|
|
||||||
|
class TestTag(SoupTest):
|
||||||
|
"""Test various methods of Tag which aren't so complicated they
|
||||||
|
need their own classes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test__should_pretty_print(self):
|
||||||
|
# Test the rules about when a tag should be pretty-printed.
|
||||||
|
tag = self.soup("").new_tag("a_tag")
|
||||||
|
|
||||||
|
# No list of whitespace-preserving tags -> pretty-print
|
||||||
|
tag._preserve_whitespace_tags = None
|
||||||
|
assert True == tag._should_pretty_print(0)
|
||||||
|
|
||||||
|
# List exists but tag is not on the list -> pretty-print
|
||||||
|
tag.preserve_whitespace_tags = ["some_other_tag"]
|
||||||
|
assert True == tag._should_pretty_print(1)
|
||||||
|
|
||||||
|
# Indent level is None -> don't pretty-print
|
||||||
|
assert False == tag._should_pretty_print(None)
|
||||||
|
|
||||||
|
# Tag is on the whitespace-preserving list -> don't pretty-print
|
||||||
|
tag.preserve_whitespace_tags = ["some_other_tag", "a_tag"]
|
||||||
|
assert False == tag._should_pretty_print(1)
|
||||||
|
|
||||||
|
def test_len(self):
|
||||||
|
"""The length of a Tag is its number of children."""
|
||||||
|
soup = self.soup("<top>1<b>2</b>3</top>")
|
||||||
|
|
||||||
|
# The BeautifulSoup object itself contains one element: the
|
||||||
|
# <top> tag.
|
||||||
|
assert len(soup.contents) == 1
|
||||||
|
assert len(soup) == 1
|
||||||
|
|
||||||
|
# The <top> tag contains three elements: the text node "1", the
|
||||||
|
# <b> tag, and the text node "3".
|
||||||
|
assert len(soup.top) == 3
|
||||||
|
assert len(soup.top.contents) == 3
|
||||||
|
|
||||||
|
def test_member_access_invokes_find(self):
|
||||||
|
"""Accessing a Python member .foo invokes find('foo')"""
|
||||||
|
soup = self.soup('<b><i></i></b>')
|
||||||
|
assert soup.b == soup.find('b')
|
||||||
|
assert soup.b.i == soup.find('b').find('i')
|
||||||
|
assert soup.a == None
|
||||||
|
|
||||||
|
def test_deprecated_member_access(self):
|
||||||
|
soup = self.soup('<b><i></i></b>')
|
||||||
|
with warnings.catch_warnings(record=True) as w:
|
||||||
|
tag = soup.bTag
|
||||||
|
assert soup.b == tag
|
||||||
|
assert '.bTag is deprecated, use .find("b") instead. If you really were looking for a tag called bTag, use .find("bTag")' == str(w[0].message)
|
||||||
|
|
||||||
|
def test_has_attr(self):
|
||||||
|
"""has_attr() checks for the presence of an attribute.
|
||||||
|
|
||||||
|
Please note note: has_attr() is different from
|
||||||
|
__in__. has_attr() checks the tag's attributes and __in__
|
||||||
|
checks the tag's chidlren.
|
||||||
|
"""
|
||||||
|
soup = self.soup("<foo attr='bar'>")
|
||||||
|
assert soup.foo.has_attr('attr')
|
||||||
|
assert not soup.foo.has_attr('attr2')
|
||||||
|
|
||||||
|
def test_attributes_come_out_in_alphabetical_order(self):
|
||||||
|
markup = '<b a="1" z="5" m="3" f="2" y="4"></b>'
|
||||||
|
self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>')
|
||||||
|
|
||||||
|
def test_string(self):
|
||||||
|
# A Tag that contains only a text node makes that node
|
||||||
|
# available as .string.
|
||||||
|
soup = self.soup("<b>foo</b>")
|
||||||
|
assert soup.b.string == 'foo'
|
||||||
|
|
||||||
|
def test_empty_tag_has_no_string(self):
|
||||||
|
# A Tag with no children has no .stirng.
|
||||||
|
soup = self.soup("<b></b>")
|
||||||
|
assert soup.b.string == None
|
||||||
|
|
||||||
|
def test_tag_with_multiple_children_has_no_string(self):
|
||||||
|
# A Tag with no children has no .string.
|
||||||
|
soup = self.soup("<a>foo<b></b><b></b></b>")
|
||||||
|
assert soup.b.string == None
|
||||||
|
|
||||||
|
soup = self.soup("<a>foo<b></b>bar</b>")
|
||||||
|
assert soup.b.string == None
|
||||||
|
|
||||||
|
# Even if all the children are strings, due to trickery,
|
||||||
|
# it won't work--but this would be a good optimization.
|
||||||
|
soup = self.soup("<a>foo</b>")
|
||||||
|
soup.a.insert(1, "bar")
|
||||||
|
assert soup.a.string == None
|
||||||
|
|
||||||
|
def test_tag_with_recursive_string_has_string(self):
|
||||||
|
# A Tag with a single child which has a .string inherits that
|
||||||
|
# .string.
|
||||||
|
soup = self.soup("<a><b>foo</b></a>")
|
||||||
|
assert soup.a.string == "foo"
|
||||||
|
assert soup.string == "foo"
|
||||||
|
|
||||||
|
def test_lack_of_string(self):
|
||||||
|
"""Only a Tag containing a single text node has a .string."""
|
||||||
|
soup = self.soup("<b>f<i>e</i>o</b>")
|
||||||
|
assert soup.b.string is None
|
||||||
|
|
||||||
|
soup = self.soup("<b></b>")
|
||||||
|
assert soup.b.string is None
|
||||||
|
|
||||||
|
def test_all_text(self):
|
||||||
|
"""Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated"""
|
||||||
|
soup = self.soup("<a>a<b>r</b> <r> t </r></a>")
|
||||||
|
assert soup.a.text == "ar t "
|
||||||
|
assert soup.a.get_text(strip=True) == "art"
|
||||||
|
assert soup.a.get_text(",") == "a,r, , t "
|
||||||
|
assert soup.a.get_text(",", strip=True) == "a,r,t"
|
||||||
|
|
||||||
|
def test_get_text_ignores_special_string_containers(self):
|
||||||
|
soup = self.soup("foo<!--IGNORE-->bar")
|
||||||
|
assert soup.get_text() == "foobar"
|
||||||
|
|
||||||
|
assert soup.get_text(types=(NavigableString, Comment)) == "fooIGNOREbar"
|
||||||
|
assert soup.get_text(types=None) == "fooIGNOREbar"
|
||||||
|
|
||||||
|
soup = self.soup("foo<style>CSS</style><script>Javascript</script>bar")
|
||||||
|
assert soup.get_text() == "foobar"
|
||||||
|
|
||||||
|
def test_all_strings_ignores_special_string_containers(self):
|
||||||
|
soup = self.soup("foo<!--IGNORE-->bar")
|
||||||
|
assert ['foo', 'bar'] == list(soup.strings)
|
||||||
|
|
||||||
|
soup = self.soup("foo<style>CSS</style><script>Javascript</script>bar")
|
||||||
|
assert ['foo', 'bar'] == list(soup.strings)
|
||||||
|
|
||||||
|
def test_string_methods_inside_special_string_container_tags(self):
|
||||||
|
# Strings inside tags like <script> are generally ignored by
|
||||||
|
# methods like get_text, because they're not what humans
|
||||||
|
# consider 'text'. But if you call get_text on the <script>
|
||||||
|
# tag itself, those strings _are_ considered to be 'text',
|
||||||
|
# because there's nothing else you might be looking for.
|
||||||
|
|
||||||
|
style = self.soup("<div>a<style>Some CSS</style></div>")
|
||||||
|
template = self.soup("<div>a<template><p>Templated <b>text</b>.</p><!--With a comment.--></template></div>")
|
||||||
|
script = self.soup("<div>a<script><!--a comment-->Some text</script></div>")
|
||||||
|
|
||||||
|
assert style.div.get_text() == "a"
|
||||||
|
assert list(style.div.strings) == ["a"]
|
||||||
|
assert style.div.style.get_text() == "Some CSS"
|
||||||
|
assert list(style.div.style.strings) == ['Some CSS']
|
||||||
|
|
||||||
|
# The comment is not picked up here. That's because it was
|
||||||
|
# parsed into a Comment object, which is not considered
|
||||||
|
# interesting by template.strings.
|
||||||
|
assert template.div.get_text() == "a"
|
||||||
|
assert list(template.div.strings) == ["a"]
|
||||||
|
assert template.div.template.get_text() == "Templated text."
|
||||||
|
assert list(template.div.template.strings) == ["Templated ", "text", "."]
|
||||||
|
|
||||||
|
# The comment is included here, because it didn't get parsed
|
||||||
|
# into a Comment object--it's part of the Script string.
|
||||||
|
assert script.div.get_text() == "a"
|
||||||
|
assert list(script.div.strings) == ["a"]
|
||||||
|
assert script.div.script.get_text() == "<!--a comment-->Some text"
|
||||||
|
assert list(script.div.script.strings) == ['<!--a comment-->Some text']
|
||||||
|
|
||||||
|
|
||||||
|
class TestMultiValuedAttributes(SoupTest):
|
||||||
|
"""Test the behavior of multi-valued attributes like 'class'.
|
||||||
|
|
||||||
|
The values of such attributes are always presented as lists.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_single_value_becomes_list(self):
|
||||||
|
soup = self.soup("<a class='foo'>")
|
||||||
|
assert ["foo"] ==soup.a['class']
|
||||||
|
|
||||||
|
def test_multiple_values_becomes_list(self):
|
||||||
|
soup = self.soup("<a class='foo bar'>")
|
||||||
|
assert ["foo", "bar"] == soup.a['class']
|
||||||
|
|
||||||
|
def test_multiple_values_separated_by_weird_whitespace(self):
|
||||||
|
soup = self.soup("<a class='foo\tbar\nbaz'>")
|
||||||
|
assert ["foo", "bar", "baz"] ==soup.a['class']
|
||||||
|
|
||||||
|
def test_attributes_joined_into_string_on_output(self):
|
||||||
|
soup = self.soup("<a class='foo\tbar'>")
|
||||||
|
assert b'<a class="foo bar"></a>' == soup.a.encode()
|
||||||
|
|
||||||
|
def test_get_attribute_list(self):
|
||||||
|
soup = self.soup("<a id='abc def'>")
|
||||||
|
assert ['abc def'] == soup.a.get_attribute_list('id')
|
||||||
|
|
||||||
|
def test_accept_charset(self):
|
||||||
|
soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">')
|
||||||
|
assert ['ISO-8859-1', 'UTF-8'] == soup.form['accept-charset']
|
||||||
|
|
||||||
|
def test_cdata_attribute_applying_only_to_one_tag(self):
|
||||||
|
data = '<a accept-charset="ISO-8859-1 UTF-8"></a>'
|
||||||
|
soup = self.soup(data)
|
||||||
|
# We saw in another test that accept-charset is a cdata-list
|
||||||
|
# attribute for the <form> tag. But it's not a cdata-list
|
||||||
|
# attribute for any other tag.
|
||||||
|
assert 'ISO-8859-1 UTF-8' == soup.a['accept-charset']
|
||||||
|
|
||||||
|
def test_customization(self):
|
||||||
|
# It's possible to change which attributes of which tags
|
||||||
|
# are treated as multi-valued attributes.
|
||||||
|
#
|
||||||
|
# Here, 'id' is a multi-valued attribute and 'class' is not.
|
||||||
|
#
|
||||||
|
# TODO: This code is in the builder and should be tested there.
|
||||||
|
soup = self.soup(
|
||||||
|
'<a class="foo" id="bar">', multi_valued_attributes={ '*' : 'id' }
|
||||||
|
)
|
||||||
|
assert soup.a['class'] == 'foo'
|
||||||
|
assert soup.a['id'] == ['bar']
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1 @@
|
||||||
|
pip
|
|
@ -0,0 +1,21 @@
|
||||||
|
This package contains a modified version of ca-bundle.crt:
|
||||||
|
|
||||||
|
ca-bundle.crt -- Bundle of CA Root Certificates
|
||||||
|
|
||||||
|
Certificate data from Mozilla as of: Thu Nov 3 19:04:19 2011#
|
||||||
|
This is a bundle of X.509 certificates of public Certificate Authorities
|
||||||
|
(CA). These were automatically extracted from Mozilla's root certificates
|
||||||
|
file (certdata.txt). This file can be found in the mozilla source tree:
|
||||||
|
https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt
|
||||||
|
It contains the certificates in PEM format and therefore
|
||||||
|
can be directly used with curl / libcurl / php_curl, or with
|
||||||
|
an Apache+mod_ssl webserver for SSL client authentication.
|
||||||
|
Just configure this file as the SSLCACertificateFile.#
|
||||||
|
|
||||||
|
***** BEGIN LICENSE BLOCK *****
|
||||||
|
This Source Code Form is subject to the terms of the Mozilla Public License,
|
||||||
|
v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
|
||||||
|
one at http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
***** END LICENSE BLOCK *****
|
||||||
|
@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
|
|
@ -0,0 +1,83 @@
|
||||||
|
Metadata-Version: 2.1
|
||||||
|
Name: certifi
|
||||||
|
Version: 2022.9.24
|
||||||
|
Summary: Python package for providing Mozilla's CA Bundle.
|
||||||
|
Home-page: https://github.com/certifi/python-certifi
|
||||||
|
Author: Kenneth Reitz
|
||||||
|
Author-email: me@kennethreitz.com
|
||||||
|
License: MPL-2.0
|
||||||
|
Project-URL: Source, https://github.com/certifi/python-certifi
|
||||||
|
Platform: UNKNOWN
|
||||||
|
Classifier: Development Status :: 5 - Production/Stable
|
||||||
|
Classifier: Intended Audience :: Developers
|
||||||
|
Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
|
||||||
|
Classifier: Natural Language :: English
|
||||||
|
Classifier: Programming Language :: Python
|
||||||
|
Classifier: Programming Language :: Python :: 3
|
||||||
|
Classifier: Programming Language :: Python :: 3 :: Only
|
||||||
|
Classifier: Programming Language :: Python :: 3.6
|
||||||
|
Classifier: Programming Language :: Python :: 3.7
|
||||||
|
Classifier: Programming Language :: Python :: 3.8
|
||||||
|
Classifier: Programming Language :: Python :: 3.9
|
||||||
|
Classifier: Programming Language :: Python :: 3.10
|
||||||
|
Classifier: Programming Language :: Python :: 3.11
|
||||||
|
Requires-Python: >=3.6
|
||||||
|
License-File: LICENSE
|
||||||
|
|
||||||
|
Certifi: Python SSL Certificates
|
||||||
|
================================
|
||||||
|
|
||||||
|
Certifi provides Mozilla's carefully curated collection of Root Certificates for
|
||||||
|
validating the trustworthiness of SSL certificates while verifying the identity
|
||||||
|
of TLS hosts. It has been extracted from the `Requests`_ project.
|
||||||
|
|
||||||
|
Installation
|
||||||
|
------------
|
||||||
|
|
||||||
|
``certifi`` is available on PyPI. Simply install it with ``pip``::
|
||||||
|
|
||||||
|
$ pip install certifi
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
|
||||||
|
To reference the installed certificate authority (CA) bundle, you can use the
|
||||||
|
built-in function::
|
||||||
|
|
||||||
|
>>> import certifi
|
||||||
|
|
||||||
|
>>> certifi.where()
|
||||||
|
'/usr/local/lib/python3.7/site-packages/certifi/cacert.pem'
|
||||||
|
|
||||||
|
Or from the command line::
|
||||||
|
|
||||||
|
$ python -m certifi
|
||||||
|
/usr/local/lib/python3.7/site-packages/certifi/cacert.pem
|
||||||
|
|
||||||
|
Enjoy!
|
||||||
|
|
||||||
|
1024-bit Root Certificates
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Browsers and certificate authorities have concluded that 1024-bit keys are
|
||||||
|
unacceptably weak for certificates, particularly root certificates. For this
|
||||||
|
reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its
|
||||||
|
bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key)
|
||||||
|
certificate from the same CA. Because Mozilla removed these certificates from
|
||||||
|
its bundle, ``certifi`` removed them as well.
|
||||||
|
|
||||||
|
In previous versions, ``certifi`` provided the ``certifi.old_where()`` function
|
||||||
|
to intentionally re-add the 1024-bit roots back into your bundle. This was not
|
||||||
|
recommended in production and therefore was removed at the end of 2018.
|
||||||
|
|
||||||
|
.. _`Requests`: https://requests.readthedocs.io/en/master/
|
||||||
|
|
||||||
|
Addition/Removal of Certificates
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
Certifi does not support any addition/removal or other modification of the
|
||||||
|
CA trust store content. This project is intended to provide a reliable and
|
||||||
|
highly portable root of trust to python deployments. Look to upstream projects
|
||||||
|
for methods to use alternate trust.
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
certifi-2022.9.24.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||||
|
certifi-2022.9.24.dist-info/LICENSE,sha256=oC9sY4-fuE0G93ZMOrCF2K9-2luTwWbaVDEkeQd8b7A,1052
|
||||||
|
certifi-2022.9.24.dist-info/METADATA,sha256=33NAOmkqKTCb2u1Ys8Zth7ABWXfEuLgp-5gLp1yK_7A,2911
|
||||||
|
certifi-2022.9.24.dist-info/RECORD,,
|
||||||
|
certifi-2022.9.24.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
|
||||||
|
certifi-2022.9.24.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
|
||||||
|
certifi/__init__.py,sha256=luDjIGxDSrQ9O0zthdz5Lnt069Z_7eR1GIEefEaf-Ys,94
|
||||||
|
certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243
|
||||||
|
certifi/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
certifi/__pycache__/__main__.cpython-38.pyc,,
|
||||||
|
certifi/__pycache__/core.cpython-38.pyc,,
|
||||||
|
certifi/cacert.pem,sha256=3l8CcWt_qL42030rGieD3SLufICFX0bYtGhDl_EXVPI,286370
|
||||||
|
certifi/core.py,sha256=lhewz0zFb2b4ULsQurElmloYwQoecjWzPqY67P8T7iM,4219
|
||||||
|
certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@ -0,0 +1,5 @@
|
||||||
|
Wheel-Version: 1.0
|
||||||
|
Generator: bdist_wheel (0.37.0)
|
||||||
|
Root-Is-Purelib: true
|
||||||
|
Tag: py3-none-any
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
certifi
|
|
@ -0,0 +1,4 @@
|
||||||
|
from .core import contents, where
|
||||||
|
|
||||||
|
__all__ = ["contents", "where"]
|
||||||
|
__version__ = "2022.09.24"
|
|
@ -0,0 +1,12 @@
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
from certifi import contents, where
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("-c", "--contents", action="store_true")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.contents:
|
||||||
|
print(contents())
|
||||||
|
else:
|
||||||
|
print(where())
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,108 @@
|
||||||
|
"""
|
||||||
|
certifi.py
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
This module returns the installation location of cacert.pem or its contents.
|
||||||
|
"""
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info >= (3, 11):
|
||||||
|
|
||||||
|
from importlib.resources import as_file, files
|
||||||
|
|
||||||
|
_CACERT_CTX = None
|
||||||
|
_CACERT_PATH = None
|
||||||
|
|
||||||
|
def where() -> str:
|
||||||
|
# This is slightly terrible, but we want to delay extracting the file
|
||||||
|
# in cases where we're inside of a zipimport situation until someone
|
||||||
|
# actually calls where(), but we don't want to re-extract the file
|
||||||
|
# on every call of where(), so we'll do it once then store it in a
|
||||||
|
# global variable.
|
||||||
|
global _CACERT_CTX
|
||||||
|
global _CACERT_PATH
|
||||||
|
if _CACERT_PATH is None:
|
||||||
|
# This is slightly janky, the importlib.resources API wants you to
|
||||||
|
# manage the cleanup of this file, so it doesn't actually return a
|
||||||
|
# path, it returns a context manager that will give you the path
|
||||||
|
# when you enter it and will do any cleanup when you leave it. In
|
||||||
|
# the common case of not needing a temporary file, it will just
|
||||||
|
# return the file system location and the __exit__() is a no-op.
|
||||||
|
#
|
||||||
|
# We also have to hold onto the actual context manager, because
|
||||||
|
# it will do the cleanup whenever it gets garbage collected, so
|
||||||
|
# we will also store that at the global level as well.
|
||||||
|
_CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem"))
|
||||||
|
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
||||||
|
|
||||||
|
return _CACERT_PATH
|
||||||
|
|
||||||
|
def contents() -> str:
|
||||||
|
return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii")
|
||||||
|
|
||||||
|
elif sys.version_info >= (3, 7):
|
||||||
|
|
||||||
|
from importlib.resources import path as get_path, read_text
|
||||||
|
|
||||||
|
_CACERT_CTX = None
|
||||||
|
_CACERT_PATH = None
|
||||||
|
|
||||||
|
def where() -> str:
|
||||||
|
# This is slightly terrible, but we want to delay extracting the
|
||||||
|
# file in cases where we're inside of a zipimport situation until
|
||||||
|
# someone actually calls where(), but we don't want to re-extract
|
||||||
|
# the file on every call of where(), so we'll do it once then store
|
||||||
|
# it in a global variable.
|
||||||
|
global _CACERT_CTX
|
||||||
|
global _CACERT_PATH
|
||||||
|
if _CACERT_PATH is None:
|
||||||
|
# This is slightly janky, the importlib.resources API wants you
|
||||||
|
# to manage the cleanup of this file, so it doesn't actually
|
||||||
|
# return a path, it returns a context manager that will give
|
||||||
|
# you the path when you enter it and will do any cleanup when
|
||||||
|
# you leave it. In the common case of not needing a temporary
|
||||||
|
# file, it will just return the file system location and the
|
||||||
|
# __exit__() is a no-op.
|
||||||
|
#
|
||||||
|
# We also have to hold onto the actual context manager, because
|
||||||
|
# it will do the cleanup whenever it gets garbage collected, so
|
||||||
|
# we will also store that at the global level as well.
|
||||||
|
_CACERT_CTX = get_path("certifi", "cacert.pem")
|
||||||
|
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
||||||
|
|
||||||
|
return _CACERT_PATH
|
||||||
|
|
||||||
|
def contents() -> str:
|
||||||
|
return read_text("certifi", "cacert.pem", encoding="ascii")
|
||||||
|
|
||||||
|
else:
|
||||||
|
import os
|
||||||
|
import types
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
Package = Union[types.ModuleType, str]
|
||||||
|
Resource = Union[str, "os.PathLike"]
|
||||||
|
|
||||||
|
# This fallback will work for Python versions prior to 3.7 that lack the
|
||||||
|
# importlib.resources module but relies on the existing `where` function
|
||||||
|
# so won't address issues with environments like PyOxidizer that don't set
|
||||||
|
# __file__ on modules.
|
||||||
|
def read_text(
|
||||||
|
package: Package,
|
||||||
|
resource: Resource,
|
||||||
|
encoding: str = 'utf-8',
|
||||||
|
errors: str = 'strict'
|
||||||
|
) -> str:
|
||||||
|
with open(where(), encoding=encoding) as data:
|
||||||
|
return data.read()
|
||||||
|
|
||||||
|
# If we don't have importlib.resources, then we will just do the old logic
|
||||||
|
# of assuming we're on the filesystem and munge the path directly.
|
||||||
|
def where() -> str:
|
||||||
|
f = os.path.dirname(__file__)
|
||||||
|
|
||||||
|
return os.path.join(f, "cacert.pem")
|
||||||
|
|
||||||
|
def contents() -> str:
|
||||||
|
return read_text("certifi", "cacert.pem", encoding="ascii")
|
|
@ -0,0 +1 @@
|
||||||
|
pip
|
|
@ -0,0 +1,21 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2019 TAHRI Ahmed R.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,269 @@
|
||||||
|
Metadata-Version: 2.1
|
||||||
|
Name: charset-normalizer
|
||||||
|
Version: 2.1.1
|
||||||
|
Summary: The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet.
|
||||||
|
Home-page: https://github.com/ousret/charset_normalizer
|
||||||
|
Author: Ahmed TAHRI @Ousret
|
||||||
|
Author-email: ahmed.tahri@cloudnursery.dev
|
||||||
|
License: MIT
|
||||||
|
Project-URL: Bug Reports, https://github.com/Ousret/charset_normalizer/issues
|
||||||
|
Project-URL: Documentation, https://charset-normalizer.readthedocs.io/en/latest
|
||||||
|
Keywords: encoding,i18n,txt,text,charset,charset-detector,normalization,unicode,chardet
|
||||||
|
Classifier: Development Status :: 5 - Production/Stable
|
||||||
|
Classifier: License :: OSI Approved :: MIT License
|
||||||
|
Classifier: Intended Audience :: Developers
|
||||||
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||||
|
Classifier: Operating System :: OS Independent
|
||||||
|
Classifier: Programming Language :: Python
|
||||||
|
Classifier: Programming Language :: Python :: 3
|
||||||
|
Classifier: Programming Language :: Python :: 3.6
|
||||||
|
Classifier: Programming Language :: Python :: 3.7
|
||||||
|
Classifier: Programming Language :: Python :: 3.8
|
||||||
|
Classifier: Programming Language :: Python :: 3.9
|
||||||
|
Classifier: Programming Language :: Python :: 3.10
|
||||||
|
Classifier: Programming Language :: Python :: 3.11
|
||||||
|
Classifier: Topic :: Text Processing :: Linguistic
|
||||||
|
Classifier: Topic :: Utilities
|
||||||
|
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
||||||
|
Classifier: Typing :: Typed
|
||||||
|
Requires-Python: >=3.6.0
|
||||||
|
Description-Content-Type: text/markdown
|
||||||
|
License-File: LICENSE
|
||||||
|
Provides-Extra: unicode_backport
|
||||||
|
Requires-Dist: unicodedata2 ; extra == 'unicode_backport'
|
||||||
|
|
||||||
|
|
||||||
|
<h1 align="center">Charset Detection, for Everyone 👋 <a href="https://twitter.com/intent/tweet?text=The%20Real%20First%20Universal%20Charset%20%26%20Language%20Detector&url=https://www.github.com/Ousret/charset_normalizer&hashtags=python,encoding,chardet,developers"><img src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"/></a></h1>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<sup>The Real First Universal Charset Detector</sup><br>
|
||||||
|
<a href="https://pypi.org/project/charset-normalizer">
|
||||||
|
<img src="https://img.shields.io/pypi/pyversions/charset_normalizer.svg?orange=blue" />
|
||||||
|
</a>
|
||||||
|
<a href="https://codecov.io/gh/Ousret/charset_normalizer">
|
||||||
|
<img src="https://codecov.io/gh/Ousret/charset_normalizer/branch/master/graph/badge.svg" />
|
||||||
|
</a>
|
||||||
|
<a href="https://pepy.tech/project/charset-normalizer/">
|
||||||
|
<img alt="Download Count Total" src="https://pepy.tech/badge/charset-normalizer/month" />
|
||||||
|
</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
> A library that helps you read text from an unknown charset encoding.<br /> Motivated by `chardet`,
|
||||||
|
> I'm trying to resolve the issue by taking a new approach.
|
||||||
|
> All IANA character set names for which the Python core library provides codecs are supported.
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
>>>>> <a href="https://charsetnormalizerweb.ousret.now.sh" target="_blank">👉 Try Me Online Now, Then Adopt Me 👈 </a> <<<<<
|
||||||
|
</p>
|
||||||
|
|
||||||
|
This project offers you an alternative to **Universal Charset Encoding Detector**, also known as **Chardet**.
|
||||||
|
|
||||||
|
| Feature | [Chardet](https://github.com/chardet/chardet) | Charset Normalizer | [cChardet](https://github.com/PyYoshi/cChardet) |
|
||||||
|
| ------------- | :-------------: | :------------------: | :------------------: |
|
||||||
|
| `Fast` | ❌<br> | ✅<br> | ✅ <br> |
|
||||||
|
| `Universal**` | ❌ | ✅ | ❌ |
|
||||||
|
| `Reliable` **without** distinguishable standards | ❌ | ✅ | ✅ |
|
||||||
|
| `Reliable` **with** distinguishable standards | ✅ | ✅ | ✅ |
|
||||||
|
| `License` | LGPL-2.1<br>_restrictive_ | MIT | MPL-1.1<br>_restrictive_ |
|
||||||
|
| `Native Python` | ✅ | ✅ | ❌ |
|
||||||
|
| `Detect spoken language` | ❌ | ✅ | N/A |
|
||||||
|
| `UnicodeDecodeError Safety` | ❌ | ✅ | ❌ |
|
||||||
|
| `Whl Size` | 193.6 kB | 39.5 kB | ~200 kB |
|
||||||
|
| `Supported Encoding` | 33 | :tada: [93](https://charset-normalizer.readthedocs.io/en/latest/user/support.html#supported-encodings) | 40
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://i.imgflip.com/373iay.gif" alt="Reading Normalized Text" width="226"/><img src="https://media.tenor.com/images/c0180f70732a18b4965448d33adba3d0/tenor.gif" alt="Cat Reading Text" width="200"/>
|
||||||
|
|
||||||
|
*\*\* : They are clearly using specific code for a specific encoding even if covering most of used one*<br>
|
||||||
|
Did you got there because of the logs? See [https://charset-normalizer.readthedocs.io/en/latest/user/miscellaneous.html](https://charset-normalizer.readthedocs.io/en/latest/user/miscellaneous.html)
|
||||||
|
|
||||||
|
## ⭐ Your support
|
||||||
|
|
||||||
|
*Fork, test-it, star-it, submit your ideas! We do listen.*
|
||||||
|
|
||||||
|
## ⚡ Performance
|
||||||
|
|
||||||
|
This package offer better performance than its counterpart Chardet. Here are some numbers.
|
||||||
|
|
||||||
|
| Package | Accuracy | Mean per file (ms) | File per sec (est) |
|
||||||
|
| ------------- | :-------------: | :------------------: | :------------------: |
|
||||||
|
| [chardet](https://github.com/chardet/chardet) | 86 % | 200 ms | 5 file/sec |
|
||||||
|
| charset-normalizer | **98 %** | **39 ms** | 26 file/sec |
|
||||||
|
|
||||||
|
| Package | 99th percentile | 95th percentile | 50th percentile |
|
||||||
|
| ------------- | :-------------: | :------------------: | :------------------: |
|
||||||
|
| [chardet](https://github.com/chardet/chardet) | 1200 ms | 287 ms | 23 ms |
|
||||||
|
| charset-normalizer | 400 ms | 200 ms | 15 ms |
|
||||||
|
|
||||||
|
Chardet's performance on larger file (1MB+) are very poor. Expect huge difference on large payload.
|
||||||
|
|
||||||
|
> Stats are generated using 400+ files using default parameters. More details on used files, see GHA workflows.
|
||||||
|
> And yes, these results might change at any time. The dataset can be updated to include more files.
|
||||||
|
> The actual delays heavily depends on your CPU capabilities. The factors should remain the same.
|
||||||
|
> Keep in mind that the stats are generous and that Chardet accuracy vs our is measured using Chardet initial capability
|
||||||
|
> (eg. Supported Encoding) Challenge-them if you want.
|
||||||
|
|
||||||
|
[cchardet](https://github.com/PyYoshi/cChardet) is a non-native (cpp binding) and unmaintained faster alternative with
|
||||||
|
a better accuracy than chardet but lower than this package. If speed is the most important factor, you should try it.
|
||||||
|
|
||||||
|
## ✨ Installation
|
||||||
|
|
||||||
|
Using PyPi for latest stable
|
||||||
|
```sh
|
||||||
|
pip install charset-normalizer -U
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want a more up-to-date `unicodedata` than the one available in your Python setup.
|
||||||
|
```sh
|
||||||
|
pip install charset-normalizer[unicode_backport] -U
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🚀 Basic Usage
|
||||||
|
|
||||||
|
### CLI
|
||||||
|
This package comes with a CLI.
|
||||||
|
|
||||||
|
```
|
||||||
|
usage: normalizer [-h] [-v] [-a] [-n] [-m] [-r] [-f] [-t THRESHOLD]
|
||||||
|
file [file ...]
|
||||||
|
|
||||||
|
The Real First Universal Charset Detector. Discover originating encoding used
|
||||||
|
on text file. Normalize text to unicode.
|
||||||
|
|
||||||
|
positional arguments:
|
||||||
|
files File(s) to be analysed
|
||||||
|
|
||||||
|
optional arguments:
|
||||||
|
-h, --help show this help message and exit
|
||||||
|
-v, --verbose Display complementary information about file if any.
|
||||||
|
Stdout will contain logs about the detection process.
|
||||||
|
-a, --with-alternative
|
||||||
|
Output complementary possibilities if any. Top-level
|
||||||
|
JSON WILL be a list.
|
||||||
|
-n, --normalize Permit to normalize input file. If not set, program
|
||||||
|
does not write anything.
|
||||||
|
-m, --minimal Only output the charset detected to STDOUT. Disabling
|
||||||
|
JSON output.
|
||||||
|
-r, --replace Replace file when trying to normalize it instead of
|
||||||
|
creating a new one.
|
||||||
|
-f, --force Replace file without asking if you are sure, use this
|
||||||
|
flag with caution.
|
||||||
|
-t THRESHOLD, --threshold THRESHOLD
|
||||||
|
Define a custom maximum amount of chaos allowed in
|
||||||
|
decoded content. 0. <= chaos <= 1.
|
||||||
|
--version Show version information and exit.
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
normalizer ./data/sample.1.fr.srt
|
||||||
|
```
|
||||||
|
|
||||||
|
:tada: Since version 1.4.0 the CLI produce easily usable stdout result in JSON format.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"path": "/home/default/projects/charset_normalizer/data/sample.1.fr.srt",
|
||||||
|
"encoding": "cp1252",
|
||||||
|
"encoding_aliases": [
|
||||||
|
"1252",
|
||||||
|
"windows_1252"
|
||||||
|
],
|
||||||
|
"alternative_encodings": [
|
||||||
|
"cp1254",
|
||||||
|
"cp1256",
|
||||||
|
"cp1258",
|
||||||
|
"iso8859_14",
|
||||||
|
"iso8859_15",
|
||||||
|
"iso8859_16",
|
||||||
|
"iso8859_3",
|
||||||
|
"iso8859_9",
|
||||||
|
"latin_1",
|
||||||
|
"mbcs"
|
||||||
|
],
|
||||||
|
"language": "French",
|
||||||
|
"alphabets": [
|
||||||
|
"Basic Latin",
|
||||||
|
"Latin-1 Supplement"
|
||||||
|
],
|
||||||
|
"has_sig_or_bom": false,
|
||||||
|
"chaos": 0.149,
|
||||||
|
"coherence": 97.152,
|
||||||
|
"unicode_path": null,
|
||||||
|
"is_preferred": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Python
|
||||||
|
*Just print out normalized text*
|
||||||
|
```python
|
||||||
|
from charset_normalizer import from_path
|
||||||
|
|
||||||
|
results = from_path('./my_subtitle.srt')
|
||||||
|
|
||||||
|
print(str(results.best()))
|
||||||
|
```
|
||||||
|
|
||||||
|
*Normalize any text file*
|
||||||
|
```python
|
||||||
|
from charset_normalizer import normalize
|
||||||
|
try:
|
||||||
|
normalize('./my_subtitle.srt') # should write to disk my_subtitle-***.srt
|
||||||
|
except IOError as e:
|
||||||
|
print('Sadly, we are unable to perform charset normalization.', str(e))
|
||||||
|
```
|
||||||
|
|
||||||
|
*Upgrade your code without effort*
|
||||||
|
```python
|
||||||
|
from charset_normalizer import detect
|
||||||
|
```
|
||||||
|
|
||||||
|
The above code will behave the same as **chardet**. We ensure that we offer the best (reasonable) BC result possible.
|
||||||
|
|
||||||
|
See the docs for advanced usage : [readthedocs.io](https://charset-normalizer.readthedocs.io/en/latest/)
|
||||||
|
|
||||||
|
## 😇 Why
|
||||||
|
|
||||||
|
When I started using Chardet, I noticed that it was not suited to my expectations, and I wanted to propose a
|
||||||
|
reliable alternative using a completely different method. Also! I never back down on a good challenge!
|
||||||
|
|
||||||
|
I **don't care** about the **originating charset** encoding, because **two different tables** can
|
||||||
|
produce **two identical rendered string.**
|
||||||
|
What I want is to get readable text, the best I can.
|
||||||
|
|
||||||
|
In a way, **I'm brute forcing text decoding.** How cool is that ? 😎
|
||||||
|
|
||||||
|
Don't confuse package **ftfy** with charset-normalizer or chardet. ftfy goal is to repair unicode string whereas charset-normalizer to convert raw file in unknown encoding to unicode.
|
||||||
|
|
||||||
|
## 🍰 How
|
||||||
|
|
||||||
|
- Discard all charset encoding table that could not fit the binary content.
|
||||||
|
- Measure chaos, or the mess once opened (by chunks) with a corresponding charset encoding.
|
||||||
|
- Extract matches with the lowest mess detected.
|
||||||
|
- Additionally, we measure coherence / probe for a language.
|
||||||
|
|
||||||
|
**Wait a minute**, what is chaos/mess and coherence according to **YOU ?**
|
||||||
|
|
||||||
|
*Chaos :* I opened hundred of text files, **written by humans**, with the wrong encoding table. **I observed**, then
|
||||||
|
**I established** some ground rules about **what is obvious** when **it seems like** a mess.
|
||||||
|
I know that my interpretation of what is chaotic is very subjective, feel free to contribute in order to
|
||||||
|
improve or rewrite it.
|
||||||
|
|
||||||
|
*Coherence :* For each language there is on earth, we have computed ranked letter appearance occurrences (the best we can). So I thought
|
||||||
|
that intel is worth something here. So I use those records against decoded text to check if I can detect intelligent design.
|
||||||
|
|
||||||
|
## ⚡ Known limitations
|
||||||
|
|
||||||
|
- Language detection is unreliable when text contains two or more languages sharing identical letters. (eg. HTML (english tags) + Turkish content (Sharing Latin characters))
|
||||||
|
- Every charset detector heavily depends on sufficient content. In common cases, do not bother run detection on very tiny content.
|
||||||
|
|
||||||
|
## 👤 Contributing
|
||||||
|
|
||||||
|
Contributions, issues and feature requests are very much welcome.<br />
|
||||||
|
Feel free to check [issues page](https://github.com/ousret/charset_normalizer/issues) if you want to contribute.
|
||||||
|
|
||||||
|
## 📝 License
|
||||||
|
|
||||||
|
Copyright © 2019 [Ahmed TAHRI @Ousret](https://github.com/Ousret).<br />
|
||||||
|
This project is [MIT](https://github.com/Ousret/charset_normalizer/blob/master/LICENSE) licensed.
|
||||||
|
|
||||||
|
Characters frequencies used in this project © 2012 [Denny Vrandečić](http://simia.net/letters/)
|
|
@ -0,0 +1,33 @@
|
||||||
|
../../Scripts/normalizer.exe,sha256=LYeqa7VBX-LBBppNa2zcgY6djub9wagsFa32x9r07hk,108436
|
||||||
|
charset_normalizer-2.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||||
|
charset_normalizer-2.1.1.dist-info/LICENSE,sha256=6zGgxaT7Cbik4yBV0lweX5w1iidS_vPNcgIT0cz-4kE,1070
|
||||||
|
charset_normalizer-2.1.1.dist-info/METADATA,sha256=C99l12g4d1E9_UiW-mqPCWx7v2M_lYGWxy1GTOjXSsA,11942
|
||||||
|
charset_normalizer-2.1.1.dist-info/RECORD,,
|
||||||
|
charset_normalizer-2.1.1.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
|
||||||
|
charset_normalizer-2.1.1.dist-info/entry_points.txt,sha256=uYo8aIGLWv8YgWfSna5HnfY_En4pkF1w4bgawNAXzP0,76
|
||||||
|
charset_normalizer-2.1.1.dist-info/top_level.txt,sha256=7ASyzePr8_xuZWJsnqJjIBtyV8vhEo0wBCv1MPRRi3Q,19
|
||||||
|
charset_normalizer/__init__.py,sha256=jGhhf1IcOgCpZsr593E9fPvjWKnflVqHe_LwkOJjInU,1790
|
||||||
|
charset_normalizer/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
charset_normalizer/__pycache__/api.cpython-38.pyc,,
|
||||||
|
charset_normalizer/__pycache__/cd.cpython-38.pyc,,
|
||||||
|
charset_normalizer/__pycache__/constant.cpython-38.pyc,,
|
||||||
|
charset_normalizer/__pycache__/legacy.cpython-38.pyc,,
|
||||||
|
charset_normalizer/__pycache__/md.cpython-38.pyc,,
|
||||||
|
charset_normalizer/__pycache__/models.cpython-38.pyc,,
|
||||||
|
charset_normalizer/__pycache__/utils.cpython-38.pyc,,
|
||||||
|
charset_normalizer/__pycache__/version.cpython-38.pyc,,
|
||||||
|
charset_normalizer/api.py,sha256=euVPmjAMbjpqhEHPjfKtyy1mK52U0TOUBUQgM_Qy6eE,19191
|
||||||
|
charset_normalizer/assets/__init__.py,sha256=r7aakPaRIc2FFG2mw2V8NOTvkl25_euKZ3wPf5SAVa4,15222
|
||||||
|
charset_normalizer/assets/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
charset_normalizer/cd.py,sha256=Pxdkbn4cy0iZF42KTb1FiWIqqKobuz_fDjGwc6JMNBc,10811
|
||||||
|
charset_normalizer/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||||
|
charset_normalizer/cli/__pycache__/__init__.cpython-38.pyc,,
|
||||||
|
charset_normalizer/cli/__pycache__/normalizer.cpython-38.pyc,,
|
||||||
|
charset_normalizer/cli/normalizer.py,sha256=FmD1RXeMpRBg_mjR0MaJhNUpM2qZ8wz2neAE7AayBeg,9521
|
||||||
|
charset_normalizer/constant.py,sha256=NgU-pY8JH2a9lkVT8oKwAFmIUYNKOuSBwZgF9MrlNCM,19157
|
||||||
|
charset_normalizer/legacy.py,sha256=XKeZOts_HdYQU_Jb3C9ZfOjY2CiUL132k9_nXer8gig,3384
|
||||||
|
charset_normalizer/md.py,sha256=pZP8IVpSC82D8INA9Tf_y0ijJSRI-UIncZvLdfTWEd4,17642
|
||||||
|
charset_normalizer/models.py,sha256=i68YdlSLTEI3EEBVXq8TLNAbyyjrLC2OWszc-OBAk9I,13167
|
||||||
|
charset_normalizer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||||
|
charset_normalizer/utils.py,sha256=ykOznhcAeH-ODLBWJuI7t1nbwa1SAfN_bDYTCJGyh4U,11771
|
||||||
|
charset_normalizer/version.py,sha256=_eh2MA3qS__IajlePQxKBmlw6zaBDvPYlLdEgxgIojw,79
|
|
@ -0,0 +1,5 @@
|
||||||
|
Wheel-Version: 1.0
|
||||||
|
Generator: bdist_wheel (0.37.1)
|
||||||
|
Root-Is-Purelib: true
|
||||||
|
Tag: py3-none-any
|
||||||
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
[console_scripts]
|
||||||
|
normalizer = charset_normalizer.cli.normalizer:cli_detect
|
|
@ -0,0 +1 @@
|
||||||
|
charset_normalizer
|
|
@ -0,0 +1,56 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Charset-Normalizer
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
The Real First Universal Charset Detector.
|
||||||
|
A library that helps you read text from an unknown charset encoding.
|
||||||
|
Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
|
||||||
|
All IANA character set names for which the Python core library provides codecs are supported.
|
||||||
|
|
||||||
|
Basic usage:
|
||||||
|
>>> from charset_normalizer import from_bytes
|
||||||
|
>>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
|
||||||
|
>>> best_guess = results.best()
|
||||||
|
>>> str(best_guess)
|
||||||
|
'Bсеки човек има право на образование. Oбразованието!'
|
||||||
|
|
||||||
|
Others methods and usages are available - see the full documentation
|
||||||
|
at <https://github.com/Ousret/charset_normalizer>.
|
||||||
|
:copyright: (c) 2021 by Ahmed TAHRI
|
||||||
|
:license: MIT, see LICENSE for more details.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from .api import from_bytes, from_fp, from_path, normalize
|
||||||
|
from .legacy import (
|
||||||
|
CharsetDetector,
|
||||||
|
CharsetDoctor,
|
||||||
|
CharsetNormalizerMatch,
|
||||||
|
CharsetNormalizerMatches,
|
||||||
|
detect,
|
||||||
|
)
|
||||||
|
from .models import CharsetMatch, CharsetMatches
|
||||||
|
from .utils import set_logging_handler
|
||||||
|
from .version import VERSION, __version__
|
||||||
|
|
||||||
|
__all__ = (
|
||||||
|
"from_fp",
|
||||||
|
"from_path",
|
||||||
|
"from_bytes",
|
||||||
|
"normalize",
|
||||||
|
"detect",
|
||||||
|
"CharsetMatch",
|
||||||
|
"CharsetMatches",
|
||||||
|
"CharsetNormalizerMatch",
|
||||||
|
"CharsetNormalizerMatches",
|
||||||
|
"CharsetDetector",
|
||||||
|
"CharsetDoctor",
|
||||||
|
"__version__",
|
||||||
|
"VERSION",
|
||||||
|
"set_logging_handler",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Attach a NullHandler to the top level logger by default
|
||||||
|
# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
|
||||||
|
|
||||||
|
logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
|
|
@ -0,0 +1,584 @@
|
||||||
|
import logging
|
||||||
|
import warnings
|
||||||
|
from os import PathLike
|
||||||
|
from os.path import basename, splitext
|
||||||
|
from typing import Any, BinaryIO, List, Optional, Set
|
||||||
|
|
||||||
|
from .cd import (
|
||||||
|
coherence_ratio,
|
||||||
|
encoding_languages,
|
||||||
|
mb_encoding_languages,
|
||||||
|
merge_coherence_ratios,
|
||||||
|
)
|
||||||
|
from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
|
||||||
|
from .md import mess_ratio
|
||||||
|
from .models import CharsetMatch, CharsetMatches
|
||||||
|
from .utils import (
|
||||||
|
any_specified_encoding,
|
||||||
|
cut_sequence_chunks,
|
||||||
|
iana_name,
|
||||||
|
identify_sig_or_bom,
|
||||||
|
is_cp_similar,
|
||||||
|
is_multi_byte_encoding,
|
||||||
|
should_strip_sig_or_bom,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Will most likely be controversial
|
||||||
|
# logging.addLevelName(TRACE, "TRACE")
|
||||||
|
logger = logging.getLogger("charset_normalizer")
|
||||||
|
explain_handler = logging.StreamHandler()
|
||||||
|
explain_handler.setFormatter(
|
||||||
|
logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def from_bytes(
|
||||||
|
sequences: bytes,
|
||||||
|
steps: int = 5,
|
||||||
|
chunk_size: int = 512,
|
||||||
|
threshold: float = 0.2,
|
||||||
|
cp_isolation: Optional[List[str]] = None,
|
||||||
|
cp_exclusion: Optional[List[str]] = None,
|
||||||
|
preemptive_behaviour: bool = True,
|
||||||
|
explain: bool = False,
|
||||||
|
) -> CharsetMatches:
|
||||||
|
"""
|
||||||
|
Given a raw bytes sequence, return the best possibles charset usable to render str objects.
|
||||||
|
If there is no results, it is a strong indicator that the source is binary/not text.
|
||||||
|
By default, the process will extract 5 blocs of 512o each to assess the mess and coherence of a given sequence.
|
||||||
|
And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
|
||||||
|
|
||||||
|
The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
|
||||||
|
but never take it for granted. Can improve the performance.
|
||||||
|
|
||||||
|
You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
|
||||||
|
purpose.
|
||||||
|
|
||||||
|
This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
|
||||||
|
By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
|
||||||
|
toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
|
||||||
|
Custom logging format and handler can be set manually.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not isinstance(sequences, (bytearray, bytes)):
|
||||||
|
raise TypeError(
|
||||||
|
"Expected object of type bytes or bytearray, got: {0}".format(
|
||||||
|
type(sequences)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if explain:
|
||||||
|
previous_logger_level: int = logger.level
|
||||||
|
logger.addHandler(explain_handler)
|
||||||
|
logger.setLevel(TRACE)
|
||||||
|
|
||||||
|
length: int = len(sequences)
|
||||||
|
|
||||||
|
if length == 0:
|
||||||
|
logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
|
||||||
|
if explain:
|
||||||
|
logger.removeHandler(explain_handler)
|
||||||
|
logger.setLevel(previous_logger_level or logging.WARNING)
|
||||||
|
return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
|
||||||
|
|
||||||
|
if cp_isolation is not None:
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"cp_isolation is set. use this flag for debugging purpose. "
|
||||||
|
"limited list of encoding allowed : %s.",
|
||||||
|
", ".join(cp_isolation),
|
||||||
|
)
|
||||||
|
cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
|
||||||
|
else:
|
||||||
|
cp_isolation = []
|
||||||
|
|
||||||
|
if cp_exclusion is not None:
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"cp_exclusion is set. use this flag for debugging purpose. "
|
||||||
|
"limited list of encoding excluded : %s.",
|
||||||
|
", ".join(cp_exclusion),
|
||||||
|
)
|
||||||
|
cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
|
||||||
|
else:
|
||||||
|
cp_exclusion = []
|
||||||
|
|
||||||
|
if length <= (chunk_size * steps):
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
|
||||||
|
steps,
|
||||||
|
chunk_size,
|
||||||
|
length,
|
||||||
|
)
|
||||||
|
steps = 1
|
||||||
|
chunk_size = length
|
||||||
|
|
||||||
|
if steps > 1 and length / steps < chunk_size:
|
||||||
|
chunk_size = int(length / steps)
|
||||||
|
|
||||||
|
is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
|
||||||
|
is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
|
||||||
|
|
||||||
|
if is_too_small_sequence:
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
|
||||||
|
length
|
||||||
|
),
|
||||||
|
)
|
||||||
|
elif is_too_large_sequence:
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
|
||||||
|
length
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
prioritized_encodings: List[str] = []
|
||||||
|
|
||||||
|
specified_encoding: Optional[str] = (
|
||||||
|
any_specified_encoding(sequences) if preemptive_behaviour else None
|
||||||
|
)
|
||||||
|
|
||||||
|
if specified_encoding is not None:
|
||||||
|
prioritized_encodings.append(specified_encoding)
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"Detected declarative mark in sequence. Priority +1 given for %s.",
|
||||||
|
specified_encoding,
|
||||||
|
)
|
||||||
|
|
||||||
|
tested: Set[str] = set()
|
||||||
|
tested_but_hard_failure: List[str] = []
|
||||||
|
tested_but_soft_failure: List[str] = []
|
||||||
|
|
||||||
|
fallback_ascii: Optional[CharsetMatch] = None
|
||||||
|
fallback_u8: Optional[CharsetMatch] = None
|
||||||
|
fallback_specified: Optional[CharsetMatch] = None
|
||||||
|
|
||||||
|
results: CharsetMatches = CharsetMatches()
|
||||||
|
|
||||||
|
sig_encoding, sig_payload = identify_sig_or_bom(sequences)
|
||||||
|
|
||||||
|
if sig_encoding is not None:
|
||||||
|
prioritized_encodings.append(sig_encoding)
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
|
||||||
|
len(sig_payload),
|
||||||
|
sig_encoding,
|
||||||
|
)
|
||||||
|
|
||||||
|
prioritized_encodings.append("ascii")
|
||||||
|
|
||||||
|
if "utf_8" not in prioritized_encodings:
|
||||||
|
prioritized_encodings.append("utf_8")
|
||||||
|
|
||||||
|
for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
|
||||||
|
|
||||||
|
if cp_isolation and encoding_iana not in cp_isolation:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if cp_exclusion and encoding_iana in cp_exclusion:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if encoding_iana in tested:
|
||||||
|
continue
|
||||||
|
|
||||||
|
tested.add(encoding_iana)
|
||||||
|
|
||||||
|
decoded_payload: Optional[str] = None
|
||||||
|
bom_or_sig_available: bool = sig_encoding == encoding_iana
|
||||||
|
strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
|
||||||
|
encoding_iana
|
||||||
|
)
|
||||||
|
|
||||||
|
if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"Encoding %s wont be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
|
||||||
|
encoding_iana,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
|
||||||
|
except (ModuleNotFoundError, ImportError):
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"Encoding %s does not provide an IncrementalDecoder",
|
||||||
|
encoding_iana,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
if is_too_large_sequence and is_multi_byte_decoder is False:
|
||||||
|
str(
|
||||||
|
sequences[: int(50e4)]
|
||||||
|
if strip_sig_or_bom is False
|
||||||
|
else sequences[len(sig_payload) : int(50e4)],
|
||||||
|
encoding=encoding_iana,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
decoded_payload = str(
|
||||||
|
sequences
|
||||||
|
if strip_sig_or_bom is False
|
||||||
|
else sequences[len(sig_payload) :],
|
||||||
|
encoding=encoding_iana,
|
||||||
|
)
|
||||||
|
except (UnicodeDecodeError, LookupError) as e:
|
||||||
|
if not isinstance(e, LookupError):
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"Code page %s does not fit given bytes sequence at ALL. %s",
|
||||||
|
encoding_iana,
|
||||||
|
str(e),
|
||||||
|
)
|
||||||
|
tested_but_hard_failure.append(encoding_iana)
|
||||||
|
continue
|
||||||
|
|
||||||
|
similar_soft_failure_test: bool = False
|
||||||
|
|
||||||
|
for encoding_soft_failed in tested_but_soft_failure:
|
||||||
|
if is_cp_similar(encoding_iana, encoding_soft_failed):
|
||||||
|
similar_soft_failure_test = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if similar_soft_failure_test:
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
|
||||||
|
encoding_iana,
|
||||||
|
encoding_soft_failed,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
r_ = range(
|
||||||
|
0 if not bom_or_sig_available else len(sig_payload),
|
||||||
|
length,
|
||||||
|
int(length / steps),
|
||||||
|
)
|
||||||
|
|
||||||
|
multi_byte_bonus: bool = (
|
||||||
|
is_multi_byte_decoder
|
||||||
|
and decoded_payload is not None
|
||||||
|
and len(decoded_payload) < length
|
||||||
|
)
|
||||||
|
|
||||||
|
if multi_byte_bonus:
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"Code page %s is a multi byte encoding table and it appear that at least one character "
|
||||||
|
"was encoded using n-bytes.",
|
||||||
|
encoding_iana,
|
||||||
|
)
|
||||||
|
|
||||||
|
max_chunk_gave_up: int = int(len(r_) / 4)
|
||||||
|
|
||||||
|
max_chunk_gave_up = max(max_chunk_gave_up, 2)
|
||||||
|
early_stop_count: int = 0
|
||||||
|
lazy_str_hard_failure = False
|
||||||
|
|
||||||
|
md_chunks: List[str] = []
|
||||||
|
md_ratios = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
for chunk in cut_sequence_chunks(
|
||||||
|
sequences,
|
||||||
|
encoding_iana,
|
||||||
|
r_,
|
||||||
|
chunk_size,
|
||||||
|
bom_or_sig_available,
|
||||||
|
strip_sig_or_bom,
|
||||||
|
sig_payload,
|
||||||
|
is_multi_byte_decoder,
|
||||||
|
decoded_payload,
|
||||||
|
):
|
||||||
|
md_chunks.append(chunk)
|
||||||
|
|
||||||
|
md_ratios.append(mess_ratio(chunk, threshold))
|
||||||
|
|
||||||
|
if md_ratios[-1] >= threshold:
|
||||||
|
early_stop_count += 1
|
||||||
|
|
||||||
|
if (early_stop_count >= max_chunk_gave_up) or (
|
||||||
|
bom_or_sig_available and strip_sig_or_bom is False
|
||||||
|
):
|
||||||
|
break
|
||||||
|
except UnicodeDecodeError as e: # Lazy str loading may have missed something there
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
|
||||||
|
encoding_iana,
|
||||||
|
str(e),
|
||||||
|
)
|
||||||
|
early_stop_count = max_chunk_gave_up
|
||||||
|
lazy_str_hard_failure = True
|
||||||
|
|
||||||
|
# We might want to check the sequence again with the whole content
|
||||||
|
# Only if initial MD tests passes
|
||||||
|
if (
|
||||||
|
not lazy_str_hard_failure
|
||||||
|
and is_too_large_sequence
|
||||||
|
and not is_multi_byte_decoder
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
sequences[int(50e3) :].decode(encoding_iana, errors="strict")
|
||||||
|
except UnicodeDecodeError as e:
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
|
||||||
|
encoding_iana,
|
||||||
|
str(e),
|
||||||
|
)
|
||||||
|
tested_but_hard_failure.append(encoding_iana)
|
||||||
|
continue
|
||||||
|
|
||||||
|
mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
|
||||||
|
if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
|
||||||
|
tested_but_soft_failure.append(encoding_iana)
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"%s was excluded because of initial chaos probing. Gave up %i time(s). "
|
||||||
|
"Computed mean chaos is %f %%.",
|
||||||
|
encoding_iana,
|
||||||
|
early_stop_count,
|
||||||
|
round(mean_mess_ratio * 100, ndigits=3),
|
||||||
|
)
|
||||||
|
# Preparing those fallbacks in case we got nothing.
|
||||||
|
if (
|
||||||
|
encoding_iana in ["ascii", "utf_8", specified_encoding]
|
||||||
|
and not lazy_str_hard_failure
|
||||||
|
):
|
||||||
|
fallback_entry = CharsetMatch(
|
||||||
|
sequences, encoding_iana, threshold, False, [], decoded_payload
|
||||||
|
)
|
||||||
|
if encoding_iana == specified_encoding:
|
||||||
|
fallback_specified = fallback_entry
|
||||||
|
elif encoding_iana == "ascii":
|
||||||
|
fallback_ascii = fallback_entry
|
||||||
|
else:
|
||||||
|
fallback_u8 = fallback_entry
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"%s passed initial chaos probing. Mean measured chaos is %f %%",
|
||||||
|
encoding_iana,
|
||||||
|
round(mean_mess_ratio * 100, ndigits=3),
|
||||||
|
)
|
||||||
|
|
||||||
|
if not is_multi_byte_decoder:
|
||||||
|
target_languages: List[str] = encoding_languages(encoding_iana)
|
||||||
|
else:
|
||||||
|
target_languages = mb_encoding_languages(encoding_iana)
|
||||||
|
|
||||||
|
if target_languages:
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"{} should target any language(s) of {}".format(
|
||||||
|
encoding_iana, str(target_languages)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
cd_ratios = []
|
||||||
|
|
||||||
|
# We shall skip the CD when its about ASCII
|
||||||
|
# Most of the time its not relevant to run "language-detection" on it.
|
||||||
|
if encoding_iana != "ascii":
|
||||||
|
for chunk in md_chunks:
|
||||||
|
chunk_languages = coherence_ratio(
|
||||||
|
chunk, 0.1, ",".join(target_languages) if target_languages else None
|
||||||
|
)
|
||||||
|
|
||||||
|
cd_ratios.append(chunk_languages)
|
||||||
|
|
||||||
|
cd_ratios_merged = merge_coherence_ratios(cd_ratios)
|
||||||
|
|
||||||
|
if cd_ratios_merged:
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"We detected language {} using {}".format(
|
||||||
|
cd_ratios_merged, encoding_iana
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
results.append(
|
||||||
|
CharsetMatch(
|
||||||
|
sequences,
|
||||||
|
encoding_iana,
|
||||||
|
mean_mess_ratio,
|
||||||
|
bom_or_sig_available,
|
||||||
|
cd_ratios_merged,
|
||||||
|
decoded_payload,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
encoding_iana in [specified_encoding, "ascii", "utf_8"]
|
||||||
|
and mean_mess_ratio < 0.1
|
||||||
|
):
|
||||||
|
logger.debug(
|
||||||
|
"Encoding detection: %s is most likely the one.", encoding_iana
|
||||||
|
)
|
||||||
|
if explain:
|
||||||
|
logger.removeHandler(explain_handler)
|
||||||
|
logger.setLevel(previous_logger_level)
|
||||||
|
return CharsetMatches([results[encoding_iana]])
|
||||||
|
|
||||||
|
if encoding_iana == sig_encoding:
|
||||||
|
logger.debug(
|
||||||
|
"Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
|
||||||
|
"the beginning of the sequence.",
|
||||||
|
encoding_iana,
|
||||||
|
)
|
||||||
|
if explain:
|
||||||
|
logger.removeHandler(explain_handler)
|
||||||
|
logger.setLevel(previous_logger_level)
|
||||||
|
return CharsetMatches([results[encoding_iana]])
|
||||||
|
|
||||||
|
if len(results) == 0:
|
||||||
|
if fallback_u8 or fallback_ascii or fallback_specified:
|
||||||
|
logger.log(
|
||||||
|
TRACE,
|
||||||
|
"Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
|
||||||
|
)
|
||||||
|
|
||||||
|
if fallback_specified:
|
||||||
|
logger.debug(
|
||||||
|
"Encoding detection: %s will be used as a fallback match",
|
||||||
|
fallback_specified.encoding,
|
||||||
|
)
|
||||||
|
results.append(fallback_specified)
|
||||||
|
elif (
|
||||||
|
(fallback_u8 and fallback_ascii is None)
|
||||||
|
or (
|
||||||
|
fallback_u8
|
||||||
|
and fallback_ascii
|
||||||
|
and fallback_u8.fingerprint != fallback_ascii.fingerprint
|
||||||
|
)
|
||||||
|
or (fallback_u8 is not None)
|
||||||
|
):
|
||||||
|
logger.debug("Encoding detection: utf_8 will be used as a fallback match")
|
||||||
|
results.append(fallback_u8)
|
||||||
|
elif fallback_ascii:
|
||||||
|
logger.debug("Encoding detection: ascii will be used as a fallback match")
|
||||||
|
results.append(fallback_ascii)
|
||||||
|
|
||||||
|
if results:
|
||||||
|
logger.debug(
|
||||||
|
"Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
|
||||||
|
results.best().encoding, # type: ignore
|
||||||
|
len(results) - 1,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.debug("Encoding detection: Unable to determine any suitable charset.")
|
||||||
|
|
||||||
|
if explain:
|
||||||
|
logger.removeHandler(explain_handler)
|
||||||
|
logger.setLevel(previous_logger_level)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def from_fp(
|
||||||
|
fp: BinaryIO,
|
||||||
|
steps: int = 5,
|
||||||
|
chunk_size: int = 512,
|
||||||
|
threshold: float = 0.20,
|
||||||
|
cp_isolation: Optional[List[str]] = None,
|
||||||
|
cp_exclusion: Optional[List[str]] = None,
|
||||||
|
preemptive_behaviour: bool = True,
|
||||||
|
explain: bool = False,
|
||||||
|
) -> CharsetMatches:
|
||||||
|
"""
|
||||||
|
Same thing than the function from_bytes but using a file pointer that is already ready.
|
||||||
|
Will not close the file pointer.
|
||||||
|
"""
|
||||||
|
return from_bytes(
|
||||||
|
fp.read(),
|
||||||
|
steps,
|
||||||
|
chunk_size,
|
||||||
|
threshold,
|
||||||
|
cp_isolation,
|
||||||
|
cp_exclusion,
|
||||||
|
preemptive_behaviour,
|
||||||
|
explain,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def from_path(
|
||||||
|
path: "PathLike[Any]",
|
||||||
|
steps: int = 5,
|
||||||
|
chunk_size: int = 512,
|
||||||
|
threshold: float = 0.20,
|
||||||
|
cp_isolation: Optional[List[str]] = None,
|
||||||
|
cp_exclusion: Optional[List[str]] = None,
|
||||||
|
preemptive_behaviour: bool = True,
|
||||||
|
explain: bool = False,
|
||||||
|
) -> CharsetMatches:
|
||||||
|
"""
|
||||||
|
Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
|
||||||
|
Can raise IOError.
|
||||||
|
"""
|
||||||
|
with open(path, "rb") as fp:
|
||||||
|
return from_fp(
|
||||||
|
fp,
|
||||||
|
steps,
|
||||||
|
chunk_size,
|
||||||
|
threshold,
|
||||||
|
cp_isolation,
|
||||||
|
cp_exclusion,
|
||||||
|
preemptive_behaviour,
|
||||||
|
explain,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def normalize(
|
||||||
|
path: "PathLike[Any]",
|
||||||
|
steps: int = 5,
|
||||||
|
chunk_size: int = 512,
|
||||||
|
threshold: float = 0.20,
|
||||||
|
cp_isolation: Optional[List[str]] = None,
|
||||||
|
cp_exclusion: Optional[List[str]] = None,
|
||||||
|
preemptive_behaviour: bool = True,
|
||||||
|
) -> CharsetMatch:
|
||||||
|
"""
|
||||||
|
Take a (text-based) file path and try to create another file next to it, this time using UTF-8.
|
||||||
|
"""
|
||||||
|
warnings.warn(
|
||||||
|
"normalize is deprecated and will be removed in 3.0",
|
||||||
|
DeprecationWarning,
|
||||||
|
)
|
||||||
|
|
||||||
|
results = from_path(
|
||||||
|
path,
|
||||||
|
steps,
|
||||||
|
chunk_size,
|
||||||
|
threshold,
|
||||||
|
cp_isolation,
|
||||||
|
cp_exclusion,
|
||||||
|
preemptive_behaviour,
|
||||||
|
)
|
||||||
|
|
||||||
|
filename = basename(path)
|
||||||
|
target_extensions = list(splitext(filename))
|
||||||
|
|
||||||
|
if len(results) == 0:
|
||||||
|
raise IOError(
|
||||||
|
'Unable to normalize "{}", no encoding charset seems to fit.'.format(
|
||||||
|
filename
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
result = results.best()
|
||||||
|
|
||||||
|
target_extensions[0] += "-" + result.encoding # type: ignore
|
||||||
|
|
||||||
|
with open(
|
||||||
|
"{}".format(str(path).replace(filename, "".join(target_extensions))), "wb"
|
||||||
|
) as fp:
|
||||||
|
fp.write(result.output()) # type: ignore
|
||||||
|
|
||||||
|
return result # type: ignore
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue