Login | Register For Free | Help
Search for: (Advanced)

Mailing List Archive: Python: Checkins

cpython (merge default -> default): Merged upstream changes.

 

 

Python checkins RSS feed   Index | Next | Previous | View Threaded


python-checkins at python

Feb 20, 2012, 10:51 AM

Post #1 of 5 (85 views)
Permalink
cpython (merge default -> default): Merged upstream changes.

http://hg.python.org/cpython/rev/d64f04e437b1
changeset: 75065:d64f04e437b1
parent: 75064:ae960e5ae112
parent: 75060:2e54fd523412
user: Vinay Sajip <vinay_sajip [at] yahoo>
date: Mon Feb 20 18:51:00 2012 +0000
summary:
Merged upstream changes.

files:
Lib/test/test_base64.py | 2 ++
1 files changed, 2 insertions(+), 0 deletions(-)


diff --git a/Lib/test/test_base64.py b/Lib/test/test_base64.py
--- a/Lib/test/test_base64.py
+++ b/Lib/test/test_base64.py
@@ -166,6 +166,7 @@
self.assertEqual(base64.b64decode(bstr.decode('ascii')), res)
with self.assertRaises(binascii.Error):
base64.b64decode(bstr, validate=True)
+ with self.assertRaises(binascii.Error):
base64.b64decode(bstr.decode('ascii'), validate=True)

def test_b32encode(self):
@@ -236,6 +237,7 @@
for data in [b'abc', b'ABCDEF==']:
with self.assertRaises(binascii.Error):
base64.b32decode(data)
+ with self.assertRaises(binascii.Error):
base64.b32decode(data.decode('ascii'))

def test_b16encode(self):

--
Repository URL: http://hg.python.org/cpython


python-checkins at python

Feb 20, 2012, 10:51 AM

Post #2 of 5 (83 views)
Permalink
cpython (merge default -> default): Merged upstream changes. [In reply to]

http://hg.python.org/cpython/rev/ae960e5ae112
changeset: 75064:ae960e5ae112
parent: 75063:2b4a553bd6ed
parent: 75059:42f61304f77d
user: Vinay Sajip <vinay_sajip [at] yahoo>
date: Mon Feb 20 18:50:33 2012 +0000
summary:
Merged upstream changes.

files:
Doc/library/base64.rst | 11 +-
Doc/whatsnew/3.3.rst | 14 ++
Lib/base64.py | 26 ++-
Lib/test/test_base64.py | 163 +++++++++++++++++----------
Misc/NEWS | 6 +
Python/dynload_aix.c | 1 -
Python/dynload_dl.c | 1 -
Python/dynload_hpux.c | 1 -
Python/dynload_next.c | 1 -
Python/dynload_shlib.c | 6 -
10 files changed, 147 insertions(+), 83 deletions(-)


diff --git a/Doc/library/base64.rst b/Doc/library/base64.rst
--- a/Doc/library/base64.rst
+++ b/Doc/library/base64.rst
@@ -18,9 +18,14 @@

There are two interfaces provided by this module. The modern interface
supports encoding and decoding ASCII byte string objects using all three
-alphabets. The legacy interface provides for encoding and decoding to and from
-file-like objects as well as byte strings, but only using the Base64 standard
-alphabet.
+alphabets. Additionally, the decoding functions of the modern interface also
+accept Unicode strings containing only ASCII characters. The legacy interface
+provides for encoding and decoding to and from file-like objects as well as
+byte strings, but only using the Base64 standard alphabet.
+
+.. versionchanged:: 3.3
+ ASCII-only Unicode strings are now accepted by the decoding functions of
+ the modern interface.

The modern interface provides:

diff --git a/Doc/whatsnew/3.3.rst b/Doc/whatsnew/3.3.rst
--- a/Doc/whatsnew/3.3.rst
+++ b/Doc/whatsnew/3.3.rst
@@ -939,6 +939,20 @@
:c:func:`PyUnicode_FromFormat()`, your code will automatically take
advantage of the new unicode representations.

+Building C extensions
+---------------------
+
+* The range of possible file names for C extensions has been narrowed.
+ Very rarely used spellings have been suppressed: under POSIX, files
+ named ``xxxmodule.so``, ``xxxmodule.abi3.so`` and
+ ``xxxmodule.cpython-*.so`` are no longer recognized as implementing
+ the ``xxx`` module. If you had been generating such files, you have
+ to switch to the other spellings (i.e., remove the ``module`` string
+ from the file names).
+
+ (implemented in :issue:`14040`.)
+
+
Other issues
------------

diff --git a/Lib/base64.py b/Lib/base64.py
--- a/Lib/base64.py
+++ b/Lib/base64.py
@@ -29,6 +29,16 @@

bytes_types = (bytes, bytearray) # Types acceptable as binary data

+def _bytes_from_decode_data(s):
+ if isinstance(s, str):
+ try:
+ return s.encode('ascii')
+ except UnicodeEncodeError:
+ raise ValueError('string argument should contain only ASCII characters')
+ elif isinstance(s, bytes_types):
+ return s
+ else:
+ raise TypeError("argument should be bytes or ASCII string, not %s" % s.__class__.__name__)

def _translate(s, altchars):
if not isinstance(s, bytes_types):
@@ -79,12 +89,9 @@
discarded prior to the padding check. If validate is True,
non-base64-alphabet characters in the input result in a binascii.Error.
"""
- if not isinstance(s, bytes_types):
- raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+ s = _bytes_from_decode_data(s)
if altchars is not None:
- if not isinstance(altchars, bytes_types):
- raise TypeError("expected bytes, not %s"
- % altchars.__class__.__name__)
+ altchars = _bytes_from_decode_data(altchars)
assert len(altchars) == 2, repr(altchars)
s = _translate(s, {chr(altchars[0]): b'+', chr(altchars[1]): b'/'})
if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):
@@ -211,8 +218,7 @@
the input is incorrectly padded or if there are non-alphabet
characters present in the input.
"""
- if not isinstance(s, bytes_types):
- raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+ s = _bytes_from_decode_data(s)
quanta, leftover = divmod(len(s), 8)
if leftover:
raise binascii.Error('Incorrect padding')
@@ -220,8 +226,7 @@
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01 is not None:
- if not isinstance(map01, bytes_types):
- raise TypeError("expected bytes, not %s" % map01.__class__.__name__)
+ map01 = _bytes_from_decode_data(map01)
assert len(map01) == 1, repr(map01)
s = _translate(s, {b'0': b'O', b'1': map01})
if casefold:
@@ -292,8 +297,7 @@
s were incorrectly padded or if there are non-alphabet characters
present in the string.
"""
- if not isinstance(s, bytes_types):
- raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+ s = _bytes_from_decode_data(s)
if casefold:
s = s.upper()
if re.search(b'[^0-9A-F]', s):
diff --git a/Lib/test/test_base64.py b/Lib/test/test_base64.py
--- a/Lib/test/test_base64.py
+++ b/Lib/test/test_base64.py
@@ -102,44 +102,53 @@

def test_b64decode(self):
eq = self.assertEqual
- eq(base64.b64decode(b"d3d3LnB5dGhvbi5vcmc="), b"www.python.org")
- eq(base64.b64decode(b'AA=='), b'\x00')
- eq(base64.b64decode(b"YQ=="), b"a")
- eq(base64.b64decode(b"YWI="), b"ab")
- eq(base64.b64decode(b"YWJj"), b"abc")
- eq(base64.b64decode(b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
- b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
- b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=="),
- b"abcdefghijklmnopqrstuvwxyz"
- b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- b"0123456789!@#0^&*();:<>,. []{}")
- eq(base64.b64decode(b''), b'')
+
+ tests = {b"d3d3LnB5dGhvbi5vcmc=": b"www.python.org",
+ b'AA==': b'\x00',
+ b"YQ==": b"a",
+ b"YWI=": b"ab",
+ b"YWJj": b"abc",
+ b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
+ b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
+ b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==":
+
+ b"abcdefghijklmnopqrstuvwxyz"
+ b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ b"0123456789!@#0^&*();:<>,. []{}",
+ b'': b'',
+ }
+ for data, res in tests.items():
+ eq(base64.b64decode(data), res)
+ eq(base64.b64decode(data.decode('ascii')), res)
+
# Test with arbitrary alternative characters
- eq(base64.b64decode(b'01a*b$cd', altchars=b'*$'), b'\xd3V\xbeo\xf7\x1d')
- # Check if passing a str object raises an error
- self.assertRaises(TypeError, base64.b64decode, "")
- self.assertRaises(TypeError, base64.b64decode, b"", altchars="")
+ tests_altchars = {(b'01a*b$cd', b'*$'): b'\xd3V\xbeo\xf7\x1d',
+ }
+ for (data, altchars), res in tests_altchars.items():
+ data_str = data.decode('ascii')
+ altchars_str = altchars.decode('ascii')
+
+ eq(base64.b64decode(data, altchars=altchars), res)
+ eq(base64.b64decode(data_str, altchars=altchars), res)
+ eq(base64.b64decode(data, altchars=altchars_str), res)
+ eq(base64.b64decode(data_str, altchars=altchars_str), res)
+
# Test standard alphabet
- eq(base64.standard_b64decode(b"d3d3LnB5dGhvbi5vcmc="), b"www.python.org")
- eq(base64.standard_b64decode(b"YQ=="), b"a")
- eq(base64.standard_b64decode(b"YWI="), b"ab")
- eq(base64.standard_b64decode(b"YWJj"), b"abc")
- eq(base64.standard_b64decode(b""), b"")
- eq(base64.standard_b64decode(b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
- b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
- b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=="),
- b"abcdefghijklmnopqrstuvwxyz"
- b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- b"0123456789!@#0^&*();:<>,. []{}")
- # Check if passing a str object raises an error
- self.assertRaises(TypeError, base64.standard_b64decode, "")
- self.assertRaises(TypeError, base64.standard_b64decode, b"", altchars="")
+ for data, res in tests.items():
+ eq(base64.standard_b64decode(data), res)
+ eq(base64.standard_b64decode(data.decode('ascii')), res)
+
# Test with 'URL safe' alternative characters
- eq(base64.urlsafe_b64decode(b'01a-b_cd'), b'\xd3V\xbeo\xf7\x1d')
- self.assertRaises(TypeError, base64.urlsafe_b64decode, "")
+ tests_urlsafe = {b'01a-b_cd': b'\xd3V\xbeo\xf7\x1d',
+ b'': b'',
+ }
+ for data, res in tests_urlsafe.items():
+ eq(base64.urlsafe_b64decode(data), res)
+ eq(base64.urlsafe_b64decode(data.decode('ascii')), res)

def test_b64decode_padding_error(self):
self.assertRaises(binascii.Error, base64.b64decode, b'abc')
+ self.assertRaises(binascii.Error, base64.b64decode, 'abc')

def test_b64decode_invalid_chars(self):
# issue 1466065: Test some invalid characters.
@@ -154,8 +163,10 @@
(b'YWJj\nYWI=', b'abcab'))
for bstr, res in tests:
self.assertEqual(base64.b64decode(bstr), res)
+ self.assertEqual(base64.b64decode(bstr.decode('ascii')), res)
with self.assertRaises(binascii.Error):
base64.b64decode(bstr, validate=True)
+ base64.b64decode(bstr.decode('ascii'), validate=True)

def test_b32encode(self):
eq = self.assertEqual
@@ -170,40 +181,62 @@

def test_b32decode(self):
eq = self.assertEqual
- eq(base64.b32decode(b''), b'')
- eq(base64.b32decode(b'AA======'), b'\x00')
- eq(base64.b32decode(b'ME======'), b'a')
- eq(base64.b32decode(b'MFRA===='), b'ab')
- eq(base64.b32decode(b'MFRGG==='), b'abc')
- eq(base64.b32decode(b'MFRGGZA='), b'abcd')
- eq(base64.b32decode(b'MFRGGZDF'), b'abcde')
- self.assertRaises(TypeError, base64.b32decode, "")
+ tests = {b'': b'',
+ b'AA======': b'\x00',
+ b'ME======': b'a',
+ b'MFRA====': b'ab',
+ b'MFRGG===': b'abc',
+ b'MFRGGZA=': b'abcd',
+ b'MFRGGZDF': b'abcde',
+ }
+ for data, res in tests.items():
+ eq(base64.b32decode(data), res)
+ eq(base64.b32decode(data.decode('ascii')), res)

def test_b32decode_casefold(self):
eq = self.assertEqual
- eq(base64.b32decode(b'', True), b'')
- eq(base64.b32decode(b'ME======', True), b'a')
- eq(base64.b32decode(b'MFRA====', True), b'ab')
- eq(base64.b32decode(b'MFRGG===', True), b'abc')
- eq(base64.b32decode(b'MFRGGZA=', True), b'abcd')
- eq(base64.b32decode(b'MFRGGZDF', True), b'abcde')
- # Lower cases
- eq(base64.b32decode(b'me======', True), b'a')
- eq(base64.b32decode(b'mfra====', True), b'ab')
- eq(base64.b32decode(b'mfrgg===', True), b'abc')
- eq(base64.b32decode(b'mfrggza=', True), b'abcd')
- eq(base64.b32decode(b'mfrggzdf', True), b'abcde')
- # Expected exceptions
+ tests = {b'': b'',
+ b'ME======': b'a',
+ b'MFRA====': b'ab',
+ b'MFRGG===': b'abc',
+ b'MFRGGZA=': b'abcd',
+ b'MFRGGZDF': b'abcde',
+ # Lower cases
+ b'me======': b'a',
+ b'mfra====': b'ab',
+ b'mfrgg===': b'abc',
+ b'mfrggza=': b'abcd',
+ b'mfrggzdf': b'abcde',
+ }
+
+ for data, res in tests.items():
+ eq(base64.b32decode(data, True), res)
+ eq(base64.b32decode(data.decode('ascii'), True), res)
+
self.assertRaises(TypeError, base64.b32decode, b'me======')
+ self.assertRaises(TypeError, base64.b32decode, 'me======')
+
# Mapping zero and one
eq(base64.b32decode(b'MLO23456'), b'b\xdd\xad\xf3\xbe')
- eq(base64.b32decode(b'M1023456', map01=b'L'), b'b\xdd\xad\xf3\xbe')
- eq(base64.b32decode(b'M1023456', map01=b'I'), b'b\x1d\xad\xf3\xbe')
- self.assertRaises(TypeError, base64.b32decode, b"", map01="")
+ eq(base64.b32decode('MLO23456'), b'b\xdd\xad\xf3\xbe')
+
+ map_tests = {(b'M1023456', b'L'): b'b\xdd\xad\xf3\xbe',
+ (b'M1023456', b'I'): b'b\x1d\xad\xf3\xbe',
+ }
+ for (data, map01), res in map_tests.items():
+ data_str = data.decode('ascii')
+ map01_str = map01.decode('ascii')
+
+ eq(base64.b32decode(data, map01=map01), res)
+ eq(base64.b32decode(data_str, map01=map01), res)
+ eq(base64.b32decode(data, map01=map01_str), res)
+ eq(base64.b32decode(data_str, map01=map01_str), res)

def test_b32decode_error(self):
- self.assertRaises(binascii.Error, base64.b32decode, b'abc')
- self.assertRaises(binascii.Error, base64.b32decode, b'ABCDEF==')
+ for data in [b'abc', b'ABCDEF==']:
+ with self.assertRaises(binascii.Error):
+ base64.b32decode(data)
+ base64.b32decode(data.decode('ascii'))

def test_b16encode(self):
eq = self.assertEqual
@@ -214,12 +247,24 @@
def test_b16decode(self):
eq = self.assertEqual
eq(base64.b16decode(b'0102ABCDEF'), b'\x01\x02\xab\xcd\xef')
+ eq(base64.b16decode('0102ABCDEF'), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode(b'00'), b'\x00')
+ eq(base64.b16decode('00'), b'\x00')
# Lower case is not allowed without a flag
self.assertRaises(binascii.Error, base64.b16decode, b'0102abcdef')
+ self.assertRaises(binascii.Error, base64.b16decode, '0102abcdef')
# Case fold
eq(base64.b16decode(b'0102abcdef', True), b'\x01\x02\xab\xcd\xef')
- self.assertRaises(TypeError, base64.b16decode, "")
+ eq(base64.b16decode('0102abcdef', True), b'\x01\x02\xab\xcd\xef')
+
+ def test_decode_nonascii_str(self):
+ decode_funcs = (base64.b64decode,
+ base64.standard_b64decode,
+ base64.urlsafe_b64decode,
+ base64.b32decode,
+ base64.b16decode)
+ for f in decode_funcs:
+ self.assertRaises(ValueError, f, 'with non-ascii \xcb')

def test_ErrorHeritage(self):
self.assertTrue(issubclass(binascii.Error, ValueError))
diff --git a/Misc/NEWS b/Misc/NEWS
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -10,6 +10,9 @@
Core and Builtins
-----------------

+- Issue #14040: Remove rarely used file name suffixes for C extensions
+ (under POSIX mainly).
+
- Issue #14051: Allow arbitrary attributes to be set of classmethod and
staticmethod.

@@ -469,6 +472,9 @@
Library
-------

+- Issue #13641: Decoding functions in the base64 module now accept ASCII-only
+ unicode strings. Patch by Catalin Iacob.
+
- Issue #14043: Speed up importlib's _FileFinder by at least 8x, and add a
new importlib.invalidate_caches() function.

diff --git a/Python/dynload_aix.c b/Python/dynload_aix.c
--- a/Python/dynload_aix.c
+++ b/Python/dynload_aix.c
@@ -28,7 +28,6 @@

const struct filedescr _PyImport_DynLoadFiletab[] = {
{".so", "rb", C_EXTENSION},
- {"module.so", "rb", C_EXTENSION},
{0, 0}
};

diff --git a/Python/dynload_dl.c b/Python/dynload_dl.c
--- a/Python/dynload_dl.c
+++ b/Python/dynload_dl.c
@@ -11,7 +11,6 @@

const struct filedescr _PyImport_DynLoadFiletab[] = {
{".o", "rb", C_EXTENSION},
- {"module.o", "rb", C_EXTENSION},
{0, 0}
};

diff --git a/Python/dynload_hpux.c b/Python/dynload_hpux.c
--- a/Python/dynload_hpux.c
+++ b/Python/dynload_hpux.c
@@ -15,7 +15,6 @@

const struct filedescr _PyImport_DynLoadFiletab[] = {
{SHLIB_EXT, "rb", C_EXTENSION},
- {"module"SHLIB_EXT, "rb", C_EXTENSION},
{0, 0}
};

diff --git a/Python/dynload_next.c b/Python/dynload_next.c
--- a/Python/dynload_next.c
+++ b/Python/dynload_next.c
@@ -10,7 +10,6 @@

const struct filedescr _PyImport_DynLoadFiletab[] = {
{".so", "rb", C_EXTENSION},
- {"module.so", "rb", C_EXTENSION},
{0, 0}
};

diff --git a/Python/dynload_shlib.c b/Python/dynload_shlib.c
--- a/Python/dynload_shlib.c
+++ b/Python/dynload_shlib.c
@@ -39,7 +39,6 @@
const struct filedescr _PyImport_DynLoadFiletab[] = {
#ifdef __CYGWIN__
{".dll", "rb", C_EXTENSION},
- {"module.dll", "rb", C_EXTENSION},
#else /* !__CYGWIN__ */
#if defined(PYOS_OS2) && defined(PYCC_GCC)
{".pyd", "rb", C_EXTENSION},
@@ -48,15 +47,10 @@
#ifdef __VMS
{".exe", "rb", C_EXTENSION},
{".EXE", "rb", C_EXTENSION},
- {"module.exe", "rb", C_EXTENSION},
- {"MODULE.EXE", "rb", C_EXTENSION},
#else /* !__VMS */
{"." SOABI ".so", "rb", C_EXTENSION},
- {"module." SOABI ".so", "rb", C_EXTENSION},
{".abi" PYTHON_ABI_STRING ".so", "rb", C_EXTENSION},
- {"module.abi" PYTHON_ABI_STRING ".so", "rb", C_EXTENSION},
{".so", "rb", C_EXTENSION},
- {"module.so", "rb", C_EXTENSION},
#endif /* __VMS */
#endif /* defined(PYOS_OS2) && defined(PYCC_GCC) */
#endif /* __CYGWIN__ */

--
Repository URL: http://hg.python.org/cpython


python-checkins at python

Feb 23, 2012, 12:57 PM

Post #3 of 5 (79 views)
Permalink
cpython (merge default -> default): Merged upstream changes. [In reply to]

http://hg.python.org/cpython/rev/0fee31f0cc7f
changeset: 75223:0fee31f0cc7f
parent: 75222:ef060b833183
parent: 75216:54a3f30c58c0
user: Vinay Sajip <vinay_sajip [at] yahoo>
date: Thu Feb 23 20:51:57 2012 +0000
summary:
Merged upstream changes.

files:
Doc/library/subprocess.rst | 2 +-
Doc/library/sys.rst | 5 +++--
2 files changed, 4 insertions(+), 3 deletions(-)


diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst
--- a/Doc/library/subprocess.rst
+++ b/Doc/library/subprocess.rst
@@ -240,7 +240,7 @@
When *stdout* or *stderr* are pipes and *universal_newlines* is
:const:`True` then the output data is assumed to be encoded as UTF-8 and
will automatically be decoded to text. All line endings will be converted
- to ``'\n'`` as described for the universal newlines `'U'`` mode argument
+ to ``'\n'`` as described for the universal newlines ``'U'`` mode argument
to :func:`open`.

If *shell* is :const:`True`, the specified command will be executed through
diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst
--- a/Doc/library/sys.rst
+++ b/Doc/library/sys.rst
@@ -195,7 +195,7 @@
be set at build time with the ``--exec-prefix`` argument to the
:program:`configure` script. Specifically, all configuration files (e.g. the
:file:`pyconfig.h` header file) are installed in the directory
- :file:`{exec_prefix}/lib/python{X.Y}/config', and shared library modules are
+ :file:`{exec_prefix}/lib/python{X.Y}/config`, and shared library modules are
installed in :file:`{exec_prefix}/lib/python{X.Y}/lib-dynload`, where *X.Y*
is the version number of Python, for example ``3.2``.

@@ -756,6 +756,7 @@
always use the ``startswith`` idiom presented above.

.. seealso::
+
:attr:`os.name` has a coarser granularity. :func:`os.uname` gives
system-dependent version information.

@@ -771,7 +772,7 @@
argument to the :program:`configure` script. The main collection of Python
library modules is installed in the directory :file:`{prefix}/lib/python{X.Y}``
while the platform independent header files (all except :file:`pyconfig.h`) are
- stored in :file:`{prefix}/include/python{X.Y}``, where *X.Y* is the version
+ stored in :file:`{prefix}/include/python{X.Y}`, where *X.Y* is the version
number of Python, for example ``3.2``.



--
Repository URL: http://hg.python.org/cpython


python-checkins at python

Feb 27, 2012, 3:04 AM

Post #4 of 5 (75 views)
Permalink
cpython (merge default -> default): Merged upstream changes. [In reply to]

http://hg.python.org/cpython/rev/2aab57e694e4
changeset: 75311:2aab57e694e4
parent: 75310:9814ad149861
parent: 75308:32cb52bee738
user: Vinay Sajip <vinay_sajip [at] yahoo>
date: Mon Feb 27 11:03:55 2012 +0000
summary:
Merged upstream changes.

files:
Lib/packaging/tests/test_version.py | 9 ++++++
Lib/packaging/version.py | 24 +++++++++-------
Misc/NEWS | 3 ++
3 files changed, 25 insertions(+), 11 deletions(-)


diff --git a/Lib/packaging/tests/test_version.py b/Lib/packaging/tests/test_version.py
--- a/Lib/packaging/tests/test_version.py
+++ b/Lib/packaging/tests/test_version.py
@@ -16,6 +16,7 @@
(V('1.2'), '1.2'),
(V('1.2.3a4'), '1.2.3a4'),
(V('1.2c4'), '1.2c4'),
+ (V('4.17rc2'), '4.17rc2'),
(V('1.2.3.4'), '1.2.3.4'),
(V('1.2.3.4.0b3'), '1.2.3.4b3'),
(V('1.2.0.0.0'), '1.2'),
@@ -146,6 +147,14 @@
"""
doctest.script_from_examples(comparison_doctest_string)

+ # the doctest above is never run, so temporarily add real unit
+ # tests until the doctest is rewritten
+ self.assertLessEqual(V('1.2.0rc1'), V('1.2.0'))
+ self.assertGreater(V('1.0'), V('1.0c2'))
+ self.assertGreater(V('1.0'), V('1.0rc2'))
+ self.assertGreater(V('1.0rc2'), V('1.0rc1'))
+ self.assertGreater(V('1.0c4'), V('1.0c1'))
+
def test_suggest_normalized_version(self):

self.assertEqual(suggest('1.0'), '1.0')
diff --git a/Lib/packaging/version.py b/Lib/packaging/version.py
--- a/Lib/packaging/version.py
+++ b/Lib/packaging/version.py
@@ -11,19 +11,20 @@
# A marker used in the second and third parts of the `parts` tuple, for
# versions that don't have those segments, to sort properly. An example
# of versions in sort order ('highest' last):
-# 1.0b1 ((1,0), ('b',1), ('f',))
-# 1.0.dev345 ((1,0), ('f',), ('dev', 345))
-# 1.0 ((1,0), ('f',), ('f',))
-# 1.0.post256.dev345 ((1,0), ('f',), ('f', 'post', 256, 'dev', 345))
-# 1.0.post345 ((1,0), ('f',), ('f', 'post', 345, 'f'))
+# 1.0b1 ((1,0), ('b',1), ('z',))
+# 1.0.dev345 ((1,0), ('z',), ('dev', 345))
+# 1.0 ((1,0), ('z',), ('z',))
+# 1.0.post256.dev345 ((1,0), ('z',), ('z', 'post', 256, 'dev', 345))
+# 1.0.post345 ((1,0), ('z',), ('z', 'post', 345, 'z'))
# ^ ^ ^
-# 'b' < 'f' ---------------------/ | |
+# 'b' < 'z' ---------------------/ | |
# | |
-# 'dev' < 'f' < 'post' -------------------/ |
+# 'dev' < 'z' ----------------------------/ |
# |
-# 'dev' < 'f' ----------------------------------------------/
-# Other letters would do, but 'f' for 'final' is kind of nice.
-_FINAL_MARKER = ('f',)
+# 'dev' < 'z' ----------------------------------------------/
+# 'f' for 'final' would be kind of nice, but due to bugs in the support of
+# 'rc' we must use 'z'
+_FINAL_MARKER = ('z',)

_VERSION_RE = re.compile(r'''
^
@@ -167,8 +168,9 @@
if prerel is not _FINAL_MARKER:
s += prerel[0]
s += '.'.join(str(v) for v in prerel[1:])
+ # XXX clean up: postdev is always true; code is obscure
if postdev and postdev is not _FINAL_MARKER:
- if postdev[0] == 'f':
+ if postdev[0] == _FINAL_MARKER[0]:
postdev = postdev[1:]
i = 0
while i < len(postdev):
diff --git a/Misc/NEWS b/Misc/NEWS
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -508,6 +508,9 @@
Library
-------

+- Issue #11841: Fix comparison bug with 'rc' versions in packaging.version.
+ Patch by Filip GruszczyƄski.
+
- Issue #13447: Add a test file to host regression tests for bugs in the
scripts found in the Tools directory.


--
Repository URL: http://hg.python.org/cpython


python-checkins at python

Mar 5, 2012, 1:44 AM

Post #5 of 5 (68 views)
Permalink
cpython (merge default -> default): Merged upstream changes. [In reply to]

http://hg.python.org/cpython/rev/cf7b360f9971
changeset: 75413:cf7b360f9971
parent: 75412:0457fb8bf39c
parent: 75411:47016103185f
user: Vinay Sajip <vinay_sajip [at] yahoo>
date: Mon Mar 05 09:44:25 2012 +0000
summary:
Merged upstream changes.

files:
Lib/test/test_xml_etree.py | 165 +++++++++++++++-------
Lib/xml/etree/ElementTree.py | 81 +++++-----
Misc/NEWS | 4 +
Modules/_elementtree.c | 23 ---
4 files changed, 155 insertions(+), 118 deletions(-)


diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py
--- a/Lib/test/test_xml_etree.py
+++ b/Lib/test/test_xml_etree.py
@@ -1855,58 +1855,16 @@
# --------------------------------------------------------------------


-class CleanContext(object):
- """Provide default namespace mapping and path cache."""
- checkwarnings = None
+class ElementTreeTest(unittest.TestCase):

- def __init__(self, quiet=False):
- if sys.flags.optimize >= 2:
- # under -OO, doctests cannot be run and therefore not all warnings
- # will be emitted
- quiet = True
- deprecations = (
- # Search behaviour is broken if search path starts with "/".
- ("This search is broken in 1.3 and earlier, and will be fixed "
- "in a future version. If you rely on the current behaviour, "
- "change it to '.+'", FutureWarning),
- # Element.getchildren() and Element.getiterator() are deprecated.
- ("This method will be removed in future versions. "
- "Use .+ instead.", DeprecationWarning),
- ("This method will be removed in future versions. "
- "Use .+ instead.", PendingDeprecationWarning),
- # XMLParser.doctype() is deprecated.
- ("This method of XMLParser is deprecated. Define doctype.. "
- "method on the TreeBuilder target.", DeprecationWarning))
- self.checkwarnings = support.check_warnings(*deprecations, quiet=quiet)
-
- def __enter__(self):
- from xml.etree import ElementPath
- self._nsmap = ET.register_namespace._namespace_map
- # Copy the default namespace mapping
- self._nsmap_copy = self._nsmap.copy()
- # Copy the path cache (should be empty)
- self._path_cache = ElementPath._cache
- ElementPath._cache = self._path_cache.copy()
- self.checkwarnings.__enter__()
-
- def __exit__(self, *args):
- from xml.etree import ElementPath
- # Restore mapping and path cache
- self._nsmap.clear()
- self._nsmap.update(self._nsmap_copy)
- ElementPath._cache = self._path_cache
- self.checkwarnings.__exit__(*args)
-
-
-class TestAcceleratorNotImported(unittest.TestCase):
- # Test that the C accelerator was not imported for pyET
- def test_correct_import_pyET(self):
- self.assertEqual(pyET.SubElement.__module__, 'xml.etree.ElementTree')
-
-
-class TestElementClass(unittest.TestCase):
- def test_Element_is_a_type(self):
+ def test_istype(self):
+ self.assertIsInstance(ET.ParseError, type)
+ self.assertIsInstance(ET.QName, type)
+ self.assertIsInstance(ET.ElementTree, type)
self.assertIsInstance(ET.Element, type)
+ # XXX issue 14128 with C ElementTree
+ # self.assertIsInstance(ET.TreeBuilder, type)
+ # self.assertIsInstance(ET.XMLParser, type)

def test_Element_subclass_trivial(self):
class MyElement(ET.Element):
@@ -1936,16 +1894,115 @@
self.assertEqual(mye.newmethod(), 'joe')


+class TreeBuilderTest(unittest.TestCase):
+
+ sample1 = ('<!DOCTYPE html PUBLIC'
+ ' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
+ ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
+ '<html>text</html>')
+
+ def test_dummy_builder(self):
+ class BaseDummyBuilder:
+ def close(self):
+ return 42
+
+ class DummyBuilder(BaseDummyBuilder):
+ data = start = end = lambda *a: None
+
+ parser = ET.XMLParser(target=DummyBuilder())
+ parser.feed(self.sample1)
+ self.assertEqual(parser.close(), 42)
+
+ parser = ET.XMLParser(target=BaseDummyBuilder())
+ parser.feed(self.sample1)
+ self.assertEqual(parser.close(), 42)
+
+ parser = ET.XMLParser(target=object())
+ parser.feed(self.sample1)
+ self.assertIsNone(parser.close())
+
+
+ @unittest.expectedFailure # XXX issue 14007 with C ElementTree
+ def test_doctype(self):
+ class DoctypeParser:
+ _doctype = None
+
+ def doctype(self, name, pubid, system):
+ self._doctype = (name, pubid, system)
+
+ def close(self):
+ return self._doctype
+
+ parser = ET.XMLParser(target=DoctypeParser())
+ parser.feed(self.sample1)
+
+ self.assertEqual(parser.close(),
+ ('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
+ 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'))
+
+
+class NoAcceleratorTest(unittest.TestCase):
+
+ # Test that the C accelerator was not imported for pyET
+ def test_correct_import_pyET(self):
+ self.assertEqual(pyET.Element.__module__, 'xml.etree.ElementTree')
+ self.assertEqual(pyET.SubElement.__module__, 'xml.etree.ElementTree')
+
+# --------------------------------------------------------------------
+
+
+class CleanContext(object):
+ """Provide default namespace mapping and path cache."""
+ checkwarnings = None
+
+ def __init__(self, quiet=False):
+ if sys.flags.optimize >= 2:
+ # under -OO, doctests cannot be run and therefore not all warnings
+ # will be emitted
+ quiet = True
+ deprecations = (
+ # Search behaviour is broken if search path starts with "/".
+ ("This search is broken in 1.3 and earlier, and will be fixed "
+ "in a future version. If you rely on the current behaviour, "
+ "change it to '.+'", FutureWarning),
+ # Element.getchildren() and Element.getiterator() are deprecated.
+ ("This method will be removed in future versions. "
+ "Use .+ instead.", DeprecationWarning),
+ ("This method will be removed in future versions. "
+ "Use .+ instead.", PendingDeprecationWarning))
+ self.checkwarnings = support.check_warnings(*deprecations, quiet=quiet)
+
+ def __enter__(self):
+ from xml.etree import ElementPath
+ self._nsmap = ET.register_namespace._namespace_map
+ # Copy the default namespace mapping
+ self._nsmap_copy = self._nsmap.copy()
+ # Copy the path cache (should be empty)
+ self._path_cache = ElementPath._cache
+ ElementPath._cache = self._path_cache.copy()
+ self.checkwarnings.__enter__()
+
+ def __exit__(self, *args):
+ from xml.etree import ElementPath
+ # Restore mapping and path cache
+ self._nsmap.clear()
+ self._nsmap.update(self._nsmap_copy)
+ ElementPath._cache = self._path_cache
+ self.checkwarnings.__exit__(*args)
+
+
def test_main(module=pyET):
from test import test_xml_etree

- # Run the tests specific to the Python implementation
- support.run_unittest(TestAcceleratorNotImported)
-
# The same doctests are used for both the Python and the C implementations
test_xml_etree.ET = module

- support.run_unittest(TestElementClass)
+ test_classes = [ElementTreeTest, TreeBuilderTest]
+ if module is pyET:
+ # Run the tests specific to the Python implementation
+ test_classes += [NoAcceleratorTest]
+
+ support.run_unittest(*test_classes)

# XXX the C module should give the same warnings as the Python module
with CleanContext(quiet=(module is not pyET)):
diff --git a/Lib/xml/etree/ElementTree.py b/Lib/xml/etree/ElementTree.py
--- a/Lib/xml/etree/ElementTree.py
+++ b/Lib/xml/etree/ElementTree.py
@@ -1511,24 +1511,30 @@
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
- # callbacks
+ # main callbacks
parser.DefaultHandlerExpand = self._default
- parser.StartElementHandler = self._start
- parser.EndElementHandler = self._end
- parser.CharacterDataHandler = self._data
- # optional callbacks
- parser.CommentHandler = self._comment
- parser.ProcessingInstructionHandler = self._pi
+ if hasattr(target, 'start'):
+ parser.StartElementHandler = self._start
+ if hasattr(target, 'end'):
+ parser.EndElementHandler = self._end
+ if hasattr(target, 'data'):
+ parser.CharacterDataHandler = target.data
+ # miscellaneous callbacks
+ if hasattr(target, 'comment'):
+ parser.CommentHandler = target.comment
+ if hasattr(target, 'pi'):
+ parser.ProcessingInstructionHandler = target.pi
# let expat do the buffering, if supported
try:
- self._parser.buffer_text = 1
+ parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
- self._parser.ordered_attributes = 1
- self._parser.specified_attributes = 1
- parser.StartElementHandler = self._start_list
+ parser.ordered_attributes = 1
+ parser.specified_attributes = 1
+ if hasattr(target, 'start'):
+ parser.StartElementHandler = self._start_list
except AttributeError:
pass
self._doctype = None
@@ -1572,44 +1578,29 @@
attrib[fixname(attrib_in[i])] = attrib_in[i+1]
return self.target.start(tag, attrib)

- def _data(self, text):
- return self.target.data(text)
-
def _end(self, tag):
return self.target.end(self._fixname(tag))

- def _comment(self, data):
- try:
- comment = self.target.comment
- except AttributeError:
- pass
- else:
- return comment(data)
-
- def _pi(self, target, data):
- try:
- pi = self.target.pi
- except AttributeError:
- pass
- else:
- return pi(target, data)
-
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
- self.target.data(self.entity[text[1:-1]])
+ data_handler = self.target.data
+ except AttributeError:
+ return
+ try:
+ data_handler(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
- (text, self._parser.ErrorLineNumber,
- self._parser.ErrorColumnNumber)
+ (text, self.parser.ErrorLineNumber,
+ self.parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
- err.lineno = self._parser.ErrorLineNumber
- err.offset = self._parser.ErrorColumnNumber
+ err.lineno = self.parser.ErrorLineNumber
+ err.offset = self.parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
@@ -1636,7 +1627,7 @@
pubid = pubid[1:-1]
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
- elif self.doctype is not self._XMLParser__doctype:
+ elif self.doctype != self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
@@ -1667,7 +1658,7 @@

def feed(self, data):
try:
- self._parser.Parse(data, 0)
+ self.parser.Parse(data, 0)
except self._error as v:
self._raiseerror(v)

@@ -1679,12 +1670,20 @@

def close(self):
try:
- self._parser.Parse("", 1) # end of data
+ self.parser.Parse("", 1) # end of data
except self._error as v:
self._raiseerror(v)
- tree = self.target.close()
- del self.target, self._parser # get rid of circular references
- return tree
+ try:
+ try:
+ close_handler = self.target.close
+ except AttributeError:
+ pass
+ else:
+ return close_handler()
+ finally:
+ # get rid of circular references
+ del self.parser, self._parser
+ del self.target, self._target


# Import the C accelerators
diff --git a/Misc/NEWS b/Misc/NEWS
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -13,6 +13,10 @@
Library
-------

+- Issue #14007: Accept incomplete TreeBuilder objects (missing start, end,
+ data or close method) for the Python implementation as well.
+ Drop the no-op TreeBuilder().xml() method from the C implementation.
+

What's New in Python 3.3.0 Alpha 1?
===================================
diff --git a/Modules/_elementtree.c b/Modules/_elementtree.c
--- a/Modules/_elementtree.c
+++ b/Modules/_elementtree.c
@@ -1699,13 +1699,6 @@
/* handlers */

LOCAL(PyObject*)
-treebuilder_handle_xml(TreeBuilderObject* self, PyObject* encoding,
- PyObject* standalone)
-{
- Py_RETURN_NONE;
-}
-
-LOCAL(PyObject*)
treebuilder_handle_start(TreeBuilderObject* self, PyObject* tag,
PyObject* attrib)
{
@@ -1976,22 +1969,10 @@
return treebuilder_handle_start(self, tag, attrib);
}

-static PyObject*
-treebuilder_xml(TreeBuilderObject* self, PyObject* args)
-{
- PyObject* encoding;
- PyObject* standalone;
- if (!PyArg_ParseTuple(args, "OO:xml", &encoding, &standalone))
- return NULL;
-
- return treebuilder_handle_xml(self, encoding, standalone);
-}
-
static PyMethodDef treebuilder_methods[] = {
{"data", (PyCFunction) treebuilder_data, METH_VARARGS},
{"start", (PyCFunction) treebuilder_start, METH_VARARGS},
{"end", (PyCFunction) treebuilder_end, METH_VARARGS},
- {"xml", (PyCFunction) treebuilder_xml, METH_VARARGS},
{"close", (PyCFunction) treebuilder_close, METH_VARARGS},
{NULL, NULL}
};
@@ -2052,8 +2033,6 @@

PyObject* names;

- PyObject* handle_xml;
-
PyObject* handle_start;
PyObject* handle_data;
PyObject* handle_end;
@@ -2506,7 +2485,6 @@
Py_INCREF(target);
self->target = target;

- self->handle_xml = PyObject_GetAttrString(target, "xml");
self->handle_start = PyObject_GetAttrString(target, "start");
self->handle_data = PyObject_GetAttrString(target, "data");
self->handle_end = PyObject_GetAttrString(target, "end");
@@ -2562,7 +2540,6 @@
Py_XDECREF(self->handle_end);
Py_XDECREF(self->handle_data);
Py_XDECREF(self->handle_start);
- Py_XDECREF(self->handle_xml);

Py_DECREF(self->target);
Py_DECREF(self->entity);

--
Repository URL: http://hg.python.org/cpython

Python checkins RSS feed   Index | Next | Previous | View Threaded
 
 


Interested in having your list archived? Contact Gossamer Threads
 
  Web Applications & Managed Hosting Powered by Gossamer Threads Inc.