diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 352084cb5d6..6108bc7ca52 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -59,30 +59,30 @@ jobs:
# Ubuntu sub-jobs:
# ================
- # GCC 11 (with latest language standards)
+ # GCC 11 (with broad language standards)
- os: ubuntu-20.04
- python-version: 3.8
+ python-version: "3.9"
backend: c
env: { GCC_VERSION: 11, EXTRA_CFLAGS: "-std=c99" }
extra_hash: "-c99"
- os: ubuntu-20.04
- python-version: 3.11
+ python-version: "3.11"
backend: c
env: { GCC_VERSION: 11, EXTRA_CFLAGS: "-std=c17" }
extra_hash: "-gcc11"
- os: ubuntu-20.04
- python-version: 3.11
+ python-version: "3.11"
backend: cpp
env: { GCC_VERSION: 11, EXTRA_CFLAGS: "-std=c++20" }
extra_hash: "-gcc11"
# compile all modules
- os: ubuntu-20.04
- python-version: 3.7
+ python-version: "3.7"
backend: c
env: { CYTHON_COMPILE_ALL: 1 }
extra_hash: "-all"
- os: ubuntu-20.04
- python-version: 3.7
+ python-version: "3.7"
backend: cpp
env: { CYTHON_COMPILE_ALL: 1 }
extra_hash: "-all"
@@ -98,18 +98,18 @@ jobs:
extra_hash: "-all"
# Linting
- os: ubuntu-20.04
- python-version: 3.9
+ python-version: "3.9"
backend: "c,cpp"
env: { TEST_CODE_STYLE: 1, NO_CYTHON_COMPILE: 1 }
extra_hash: "-codestyle"
# Limited API
- os: ubuntu-20.04
- python-version: 3.7
+ python-version: "3.7"
backend: "c,cpp"
env: { LIMITED_API: "--limited-api", EXCLUDE: "--no-file" }
extra_hash: "-limited_api"
- os: ubuntu-20.04
- python-version: 3.8
+ python-version: "3.8"
backend: "c,cpp"
env: { LIMITED_API: "--limited-api", EXCLUDE: "--no-file" }
extra_hash: "-limited_api"
@@ -132,23 +132,23 @@ jobs:
extra_hash: "-limited_api"
# Type specs
- os: ubuntu-20.04
- python-version: 3.9
+ python-version: "3.9"
backend: c
env: { EXTRA_CFLAGS: "-DCYTHON_USE_TYPE_SPECS=1" }
extra_hash: "-typespecs"
- os: ubuntu-20.04
- python-version: 3.8
+ python-version: "3.8"
backend: c
env: { EXTRA_CFLAGS: "-DCYTHON_USE_TYPE_SPECS=1" }
extra_hash: "-typespecs"
- os: ubuntu-20.04
- python-version: 3.7
+ python-version: "3.7"
backend: c
env: { EXTRA_CFLAGS: "-DCYTHON_USE_TYPE_SPECS=1" }
extra_hash: "-typespecs"
# Stackless
- os: ubuntu-20.04
- python-version: 3.8
+ python-version: "3.8"
backend: c
env: { STACKLESS: true, PY: 3 }
extra_hash: "-stackless"
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
index b402f7387c3..5319756c410 100644
--- a/.github/workflows/wheels.yml
+++ b/.github/workflows/wheels.yml
@@ -26,6 +26,14 @@ on:
paths:
#- Cython/Build/**
- .github/workflows/wheels.yml
+ - pyproject.toml
+ - MANIFEST.in
+ - setup.*
+ push:
+ paths:
+ #- Cython/Build/**
+ - .github/workflows/wheels.yml
+ - pyproject.toml
- MANIFEST.in
- setup.*
workflow_dispatch:
@@ -38,52 +46,77 @@ permissions:
contents: write # to create GitHub release (softprops/action-gh-release)
jobs:
+ generate-wheels-matrix:
+ # Create a matrix of all architectures & versions to build.
+ # This enables the next step to run cibuildwheel in parallel.
+ # From https://iscinumpy.dev/post/cibuildwheel-2-10-0/#only-210
+ name: Generate wheels matrix
+ if: >-
+ github.event_name == 'push' ||
+ github.event_name == 'release' ||
+ github.event_name == 'schedule' ||
+ github.event_name == 'workflow_dispatch' ||
+ (github.event_name == 'pull_request' &&
+ contains(github.event.pull_request.labels.*.name, 'Build System'))
+ runs-on: ubuntu-latest
+ outputs:
+ include: ${{ steps.set-matrix.outputs.include }}
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install cibuildwheel
+ # Nb. keep cibuildwheel version pin consistent with job below
+ run: pipx install cibuildwheel==2.16.2
+ - id: set-matrix
+ run: |
+ MATRIX=$(
+ {
+ cibuildwheel --print-build-identifiers --prerelease-pythons --platform linux \
+ | jq -nRc '{"only": inputs, "os": "ubuntu-latest"}' \
+ && cibuildwheel --print-build-identifiers --prerelease-pythons --platform macos \
+ | jq -nRc '{"only": inputs, "os": "macos-latest"}' \
+ && cibuildwheel --print-build-identifiers --prerelease-pythons --platform windows \
+ | jq -nRc '{"only": inputs, "os": "windows-2019"}'
+ } | jq -sc
+ )
+ echo "$MATRIX"
+ echo "include=$MATRIX" >> $GITHUB_OUTPUT
+
build_wheels:
- name: Build wheel for ${{ matrix.python }}-${{ matrix.buildplat[1] }}
+ name: Wheel ${{ matrix.only }}
if: >-
+ github.event_name == 'push' ||
github.event_name == 'release' ||
github.event_name == 'schedule' ||
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'pull_request' &&
contains(github.event.pull_request.labels.*.name, 'Build System'))
- runs-on: ${{ matrix.buildplat[0] }}
+ needs: generate-wheels-matrix
+ runs-on: ${{ matrix.os }}
+
strategy:
# Ensure that a wheel builder finishes even if another fails
fail-fast: false
matrix:
- # Github Actions doesn't support pairing matrix values together, let's improvise
- # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026
- buildplat:
- - [ubuntu-20.04, manylinux_x86_64]
- - [ubuntu-20.04, manylinux_aarch64]
- - [ubuntu-20.04, manylinux_i686]
- - [ubuntu-20.04, musllinux_x86_64]
- - [ubuntu-20.04, musllinux_aarch64]
- - [macos-11, macosx_*]
- - [windows-2019, win_amd64]
- - [windows-2019, win32]
- python: ["cp37", "cp38", "cp39", "cp310", "cp311", "cp312"] # Note: Wheels not needed for PyPy
+ include: ${{ fromJson(needs.generate-wheels-matrix.outputs.include) }}
+
steps:
- name: Checkout Cython
uses: actions/checkout@v3
- name: Set up QEMU
- if: contains(matrix.buildplat[1], '_aarch64')
- uses: docker/setup-qemu-action@v1
+ if: runner.os == 'Linux' && !contains(matrix.only, 'x86') && !contains(matrix.only, 'i686')
+ uses: docker/setup-qemu-action@v3
with:
platforms: all
-
+
- name: Build wheels
- uses: pypa/cibuildwheel@v2.13.0
- env:
- # TODO: Build Cython with the compile-all flag?
- CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }}
- CIBW_PRERELEASE_PYTHONS: True
- CIBW_ARCHS_LINUX: auto aarch64
- CIBW_ENVIRONMENT: CFLAGS='-O3 -g0 -mtune=generic -pipe -fPIC' LDFLAGS='-fPIC'
- # TODO: Cython tests take a long time to complete
- # consider running a subset in the future?
- #CIBW_TEST_COMMAND: python {project}/runtests.py -vv --no-refnanny
+ # Nb. keep cibuildwheel version pin consistent with generate-matrix job above
+ uses: pypa/cibuildwheel@v2.16.2
+ with:
+ only: ${{ matrix.only }}
+ # TODO: Cython tests take a long time to complete
+ # consider running a subset in the future?
+ #CIBW_TEST_COMMAND: python {project}/runtests.py -vv
- name: Release
uses: softprops/action-gh-release@v1
@@ -96,12 +129,13 @@ jobs:
- uses: actions/upload-artifact@v3
with:
- name: ${{ matrix.python }}-${{ startsWith(matrix.buildplat[1], 'macosx') && 'macosx' || matrix.buildplat[1] }}
+ name: ${{ matrix.only }}
path: ./wheelhouse/*.whl
build_sdist_pure_wheel:
name: Build sdist and pure wheel
if: >-
+ github.event_name == 'push' ||
github.event_name == 'release' ||
github.event_name == 'schedule' ||
github.event_name == 'workflow_dispatch' ||
@@ -122,7 +156,7 @@ jobs:
run: |
pip install --upgrade wheel setuptools
python setup.py sdist
- python setup.py bdist_wheel --no-cython-compile --universal
+ python setup.py bdist_wheel --no-cython-compile
- uses: actions/upload-artifact@v3
with:
diff --git a/CHANGES.rst b/CHANGES.rst
index 9ddd6f02a40..96db40d7e59 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -2,16 +2,120 @@
Cython Changelog
================
-3.1.0 (202?-??-??)
+3.1.0 (2024-??-??)
==================
+Features added
+--------------
+
+* Integer operations on known ``int`` types are faster.
+ (Github issues :issue:`5785`)
+
+* Many issues with the Limited C-API were resolved.
+ (Github issues :issue:`5697`, :issue:`5798`, :issue:`5845`, :issue:`5846`,
+ :issue:`5885`, :issue:`5886`, :issue:`5888`)
+
+* Dataclasses support the ``match_args`` option.
+ (Github issue :issue:`5381`)
+
+* Threading in parallel sections can now be disabled with a new ``use_threads_if`` condition.
+ (Github issue :issue:`5919`)
+
+* f-strings are slightly faster.
+ (Github issue :issue:`5866`)
+
+* ``dict.pop()`` is faster in some cases.
+ (Github issue :issue:`5911`)
+
+* Most builtin methods now provide their return type for type inference.
+ (Github issue :issue:`5865`)
+
+* ``.isprintable()`` is optimised for Unicode characters.
+ (Github issue :issue:`3277`)
+
+* The parser was updated for Unicode 15.1 (as provided by CPython 3.13a1).
+
+Bugs fixed
+----------
+
+* Dataclasses did not handle default fields without init value correctly.
+ (Github issue :issue:`5858`)
+
+* The ``-a`` option in the IPython magic no longer copies the complete HTML document
+ into the notebook but only a more reasonable content snippet.
+ Patch by Min RK. (Github issue :issue:`5760`)
+
+* Uselessly referring to C enums (not enum values) as Python objects is now rejected.
+ Patch by Vyas Ramasubramani. (Github issue :issue:`5638`)
+
+* Several C++ warnings about ``char*`` casts were resolved.
+ (Github issues :issue:`5515`, :issue:`5847`)
+
Other changes
-------------
-* Support for Python 2.7 - 3.6 was removed.
+* Support for Python 2.7 - 3.6 was removed, along with large chunks of legacy code.
+ (Github issue :issue:`2800`)
+
+* ``language_level=3`` is now the default.
+ ``language_level=3str`` has become a legacy alias.
+ (Github issue :issue:`5827`)
+
+* The Python ``int`` type now maps directly to ``PyLong`` and is inferred accordingly.
+ (Github issue :issue:`4237`)
+
+* Usages of the outdated ``WITH_THREAD`` macro guard were removed.
+ (Github issue :issue:`5812`)
+
+* Includes all fixes as of Cython 3.0.8 (but generates C99 code in some places).
+
+
+3.0.8 (2024-01-10)
+==================
+
+Bugs fixed
+----------
+
+* Using ``const`` together with defined fused types could fail to compile.
+ (Github issue :issue:`5230`)
+
+* A "use after free" bug was fixed in parallel sections.
+ (Github issue :issue:`5922`)
+
+* Several types were not available as ``cython.*`` types in pure Python code.
+
+* The generated code is now correct C89 again, removing some C++ style ``//`` comments
+ and C99-style declaration-after-code code ordering. This is still relevant for some
+ ols C compilers, specifically ones that match old Python 2.7 installations.
-3.0.6 (2023-??-??)
+3.0.7 (2023-12-19)
+==================
+
+Bugs fixed
+----------
+
+* In the iterator of generator expressions, ``await`` and ``yield`` were not correctly analysed.
+ (Github issue :issue:`5851`)
+
+* ``cpdef`` enums with the same name cimported from different modules could lead to
+ invalid C code.
+ (Github issue :issue:`5887`)
+
+* Some declarations in ``cpython.unicode`` were fixed and extended.
+ (Github issue :issue:`5902`)
+
+* Compiling fused types used in pxd files could crash Cython in Python 3.11+.
+ (Github issues :issue:`5894`, :issue:`5588`)
+
+* Source files with non-ASCII file names could crash Cython.
+ (Github issue :issue:`5873`)
+
+* Includes all bug-fixes and features from the 0.29 maintenance branch
+ up to the :ref:`0.29.37` release.
+
+
+3.0.6 (2023-11-26)
==================
Features added
@@ -19,9 +123,24 @@ Features added
* Fused def function dispatch is a bit faster.
+* Declarations for the ``wchar`` PyUnicode API were added.
+ (Github issue :issue:`5836`)
+
+* The Python "nogil" fork is now also detected with the new ``Py_GIL_DISABLED`` macro.
+ Patch by Hugo van Kemenade. (Github issue :issue:`5852`)
+
Bugs fixed
----------
+* Comparing dataclasses could give different results than Python.
+ (Github issue :issue:`5857`)
+
+* ``float(std::string)`` generated invalid C code.
+ (Github issue :issue:`5818`)
+
+* Using ``cpdef`` functions with ``cimport_from_pyx`` failed.
+ (Github issue :issue:`5795`)
+
* A crash was fixed when string-formatting a Python value fails.
(Github issue :issue:`5787`)
@@ -32,12 +151,21 @@ Bugs fixed
* A C compiler warning was resolved.
(Github issue :issue:`5794`)
+* Complex numbers failed to compile in MSVC with C11.
+ Patch by Lysandros Nikolaou. (Github issue :issue:`5809`)
+
* Some issues with the Limited API and with PyPy were resolved.
(Github issues :issue:`5695`, :issue:`5696`)
* A C++ issue in Python 3.13 was resolved.
(Github issue :issue:`5790`)
+* Several directives are now also available (as no-ops) in Python code.
+ (Github issue :issue:`5803`)
+
+* An error message was corrected.
+ Patch by Mads Ynddal. (Github issue :issue:`5805`)
+
3.0.5 (2023-10-31)
==================
@@ -2058,7 +2186,7 @@ Features added
both Python and C semantics of enums.
(Github issue :issue:`2732`)
-* `PEP-614 `_:
+* `PEP-614`_:
decorators can now be arbitrary Python expressions.
(Github issue :issue:`4570`)
@@ -3304,6 +3432,27 @@ Other changes
.. _`PEP-479`: https://www.python.org/dev/peps/pep-0479
+.. _0.29.37:
+
+0.29.37 (2023-12-18)
+====================
+
+Bugs fixed
+----------
+
+* Fix a potential crash while cleaning up subtypes of externally imported extension
+ types when terminating Python. This was introduced in Cython 0.29.35.
+
+* Fix a ``complex`` related compile error on Windows.
+ (Github issue :issue:`5512`)
+
+* Compiling fused types used in pxd files could crash Cython in Python 3.11+.
+ (Github issues :issue:`5894`, :issue:`5588`)
+
+* ``cythonize`` failed to consider the ``CYTHON_FORCE_REGEN`` env variable.
+ Patch by Harmen Stoppels. (Github issue :issue:`5712`)
+
+
.. _0.29.36:
0.29.36 (2023-07-04)
diff --git a/Cython/Build/Dependencies.py b/Cython/Build/Dependencies.py
index 660ec393ccf..48aeeb7d938 100644
--- a/Cython/Build/Dependencies.py
+++ b/Cython/Build/Dependencies.py
@@ -1180,12 +1180,8 @@ def __contains__(self, val):
return filtered_list
for m in module_list:
- # TODO: use m.name.isascii() in Py3.7+
- try:
- m.name.encode("ascii")
+ if m.name.isascii():
continue
- except UnicodeEncodeError:
- pass
m.export_symbols = make_filtered_list(
"PyInit_" + m.name.rsplit(".", 1)[-1],
m.export_symbols,
diff --git a/Cython/Build/Tests/TestCyCache.py b/Cython/Build/Tests/TestCyCache.py
index 5a94edc19ef..f4d9dae9906 100644
--- a/Cython/Build/Tests/TestCyCache.py
+++ b/Cython/Build/Tests/TestCyCache.py
@@ -65,9 +65,6 @@ def test_cycache_switch(self):
msg='\n'.join(list(difflib.unified_diff(
a_contents.split('\n'), a_contents1.split('\n')))[:10]))
- @unittest.skipIf(sys.version_info[:2] == (3, 12) and sys.platform == "win32",
- "This test is mysteriously broken on Windows on the CI only "
- "(https://github.com/cython/cython/issues/5825)")
def test_cycache_uses_cache(self):
a_pyx = os.path.join(self.src_dir, 'a.pyx')
a_c = a_pyx[:-4] + '.c'
@@ -75,7 +72,8 @@ def test_cycache_uses_cache(self):
f.write('pass')
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
a_cache = os.path.join(self.cache_dir, os.listdir(self.cache_dir)[0])
- gzip.GzipFile(a_cache, 'wb').write(b'fake stuff')
+ with gzip.GzipFile(a_cache, 'wb') as gzipfile:
+ gzipfile.write(b'fake stuff')
os.unlink(a_c)
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
with open(a_c) as f:
diff --git a/Cython/Compiler/Builtin.py b/Cython/Compiler/Builtin.py
index dc6808f2f39..2a2976463bb 100644
--- a/Cython/Compiler/Builtin.py
+++ b/Cython/Compiler/Builtin.py
@@ -302,8 +302,7 @@ def declare_in_type(self, self_type):
BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply",
utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")),
]),
- ("str", "API_STRING_TYPE_DEREF", [BuiltinMethod("join", "TO", "O", "__Pyx_PyString_Join",
- builtin_return_type='basestring',
+ ("str", "API_STRING_TYPE_DEREF", [BuiltinMethod("join", "TO", "T", "__Pyx_PyString_Join",
utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply",
utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")),
@@ -411,6 +410,175 @@ def declare_in_type(self, self_type):
})
+inferred_method_return_types = {
+ 'complex': dict(
+ conjugate='complex',
+ ),
+ 'int': dict(
+ bit_length='T',
+ bit_count='T',
+ to_bytes='bytes',
+ from_bytes='T', # classmethod
+ as_integer_ratio='tuple[int,int]',
+ is_integer='bint',
+ ),
+ 'float': dict(
+ as_integer_ratio='tuple[int,int]',
+ is_integer='bint',
+ hex='unicode',
+ fromhex='T', # classmethod
+ ),
+ 'list': dict(
+ index='Py_ssize_t',
+ count='Py_ssize_t',
+ ),
+ 'unicode': dict(
+ capitalize='T',
+ casefold='T',
+ center='T',
+ count='Py_ssize_t',
+ encode='bytes',
+ endswith='bint',
+ expandtabs='T',
+ find='Py_ssize_t',
+ format='T',
+ format_map='T',
+ index='Py_ssize_t',
+ isalnum='bint',
+ isalpha='bint',
+ isascii='bint',
+ isdecimal='bint',
+ isdigit='bint',
+ isidentifier='bint',
+ islower='bint',
+ isnumeric='bint',
+ isprintable='bint',
+ isspace='bint',
+ istitle='bint',
+ isupper='bint',
+ join='T',
+ ljust='T',
+ lower='T',
+ lstrip='T',
+ maketrans='dict[int,object]', # staticmethod
+ partition='tuple[T,T,T]',
+ removeprefix='T',
+ removesuffix='T',
+ replace='T',
+ rfind='Py_ssize_t',
+ rindex='Py_ssize_t',
+ rjust='T',
+ rpartition='tuple[T,T,T]',
+ rsplit='list[T]',
+ rstrip='T',
+ split='list[T]',
+ splitlines='list[T]',
+ startswith='bint',
+ strip='T',
+ swapcase='T',
+ title='T',
+ translate='T',
+ upper='T',
+ zfill='T',
+ ),
+ 'bytes': dict(
+ hex='unicode',
+ fromhex='T', # classmethod
+ count='Py_ssize_t',
+ removeprefix='T',
+ removesuffix='T',
+ decode='unicode',
+ endswith='bint',
+ find='Py_ssize_t',
+ index='Py_ssize_t',
+ join='T',
+ maketrans='bytes', # staticmethod
+ partition='tuple[T,T,T]',
+ replace='T',
+ rfind='Py_ssize_t',
+ rindex='Py_ssize_t',
+ rpartition='tuple[T,T,T]',
+ startswith='bint',
+ translate='T',
+ center='T',
+ ljust='T',
+ lstrip='T',
+ rjust='T',
+ rsplit='list[T]',
+ rstrip='T',
+ split='list[T]',
+ strip='T',
+ capitalize='T',
+ expandtabs='T',
+ isalnum='bint',
+ isalpha='bint',
+ isascii='bint',
+ isdigit='bint',
+ islower='bint',
+ isspace='bint',
+ istitle='bint',
+ isupper='bint',
+ lower='T',
+ splitlines='list[T]',
+ swapcase='T',
+ title='T',
+ upper='T',
+ zfill='T',
+ ),
+ 'bytearray': dict(
+ # Inherited from 'bytes' below.
+ ),
+ 'memoryview': dict(
+ tobytes='bytes',
+ hex='unicode',
+ tolist='list',
+ toreadonly='T',
+ cast='T',
+ ),
+ 'set': dict(
+ isdisjoint='bint',
+ isubset='bint',
+ issuperset='bint',
+ union='T',
+ intersection='T',
+ difference='T',
+ symmetric_difference='T',
+ copy='T',
+ ),
+ 'frozenset': dict(
+ # Inherited from 'set' below.
+ ),
+ 'dict': dict(
+ copy='T',
+ ),
+}
+
+inferred_method_return_types['bytearray'].update(inferred_method_return_types['bytes'])
+inferred_method_return_types['frozenset'].update(inferred_method_return_types['set'])
+inferred_method_return_types['str'] = inferred_method_return_types['unicode']
+
+
+def find_return_type_of_builtin_method(builtin_type, method_name):
+ type_name = builtin_type.name
+ if type_name in inferred_method_return_types:
+ methods = inferred_method_return_types[type_name]
+ if method_name in methods:
+ return_type_name = methods[method_name]
+ if '[' in return_type_name:
+ # TODO: Keep the "[...]" part when we add support for generics.
+ return_type_name = return_type_name.partition('[')[0]
+ if return_type_name == 'T':
+ return builtin_type
+ if 'T' in return_type_name:
+ return_type_name = return_type_name.replace('T', builtin_type.name)
+ if return_type_name == 'bint':
+ return PyrexTypes.c_bint_type
+ elif return_type_name == 'Py_ssize_t':
+ return PyrexTypes.c_py_ssize_t_type
+ return builtin_scope.lookup(return_type_name).type
+ return PyrexTypes.py_object_type
+
+
builtin_structs_table = [
('Py_buffer', 'Py_buffer',
[("buf", PyrexTypes.c_void_ptr_type),
diff --git a/Cython/Compiler/Dataclass.py b/Cython/Compiler/Dataclass.py
index 39dbcb733c3..49ba0c562f1 100644
--- a/Cython/Compiler/Dataclass.py
+++ b/Cython/Compiler/Dataclass.py
@@ -419,7 +419,8 @@ def generate_init_code(code, init, node, fields, kw_only):
annotation = ""
assignment = ''
if field.default is not MISSING or field.default_factory is not MISSING:
- seen_default = True
+ if field.init.value:
+ seen_default = True
if field.default_factory is not MISSING:
ph_name = default_factory_placeholder
else:
diff --git a/Cython/Compiler/Errors.py b/Cython/Compiler/Errors.py
index 1f2076a9853..336ebc3e135 100644
--- a/Cython/Compiler/Errors.py
+++ b/Cython/Compiler/Errors.py
@@ -244,7 +244,7 @@ def warn_once(position, message, level=0):
echo_file = threadlocal.cython_errors_echo_file
if echo_file:
_write_file_encode(echo_file, line)
- warn_once_seen[message] = True
+ warn_once_seen.add(message)
return warn
diff --git a/Cython/Compiler/ExprNodes.py b/Cython/Compiler/ExprNodes.py
index 06b4e9525ac..ace690660be 100644
--- a/Cython/Compiler/ExprNodes.py
+++ b/Cython/Compiler/ExprNodes.py
@@ -1070,11 +1070,14 @@ def coerce_to(self, dst_type, env):
return src
def fail_assignment(self, dst_type):
+ src_resolved = f" (alias of '{self.type.resolve()}')" if self.type.is_typedef else ""
+ dst_resolved = f" (alias of '{dst_type.resolve()}')" if dst_type.is_typedef else ""
extra_diagnostics = dst_type.assignment_failure_extra_info(self.type)
- if extra_diagnostics:
- extra_diagnostics = ". " + extra_diagnostics
- error(self.pos, "Cannot assign type '%s' to '%s'%s" % (
- self.type, dst_type, extra_diagnostics))
+ error(self.pos,
+ f"Cannot assign type '{self.type}'{src_resolved}"
+ f" to '{dst_type}'{dst_resolved}"
+ f"{'.' if extra_diagnostics else ''}{extra_diagnostics}"
+ )
def check_for_coercion_error(self, dst_type, env, fail=False, default=None):
if fail and not default:
@@ -1758,19 +1761,26 @@ def coerce_to_boolean(self, env):
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
+ def estimate_max_charval(self):
+ # Most strings will probably be ASCII.
+ if self.value.isascii():
+ return 127
+ max_charval = ord(max(self.value))
+ if max_charval <= 255:
+ return 255
+ elif max_charval <= 65535:
+ return 65535
+ else:
+ return 1114111
+
def contains_surrogates(self):
return StringEncoding.string_contains_surrogates(self.value)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
- # FIXME: this should go away entirely!
- # Since string_contains_lone_surrogates() returns False for surrogate pairs in Py2/UCS2,
- # Py2 can generate different code from Py3 here. Let's hope we get away with claiming that
- # the processing of surrogate pairs in code was always ambiguous and lead to different results
- # on P16/32bit Unicode platforms.
if StringEncoding.string_contains_lone_surrogates(self.value):
# lone (unpaired) surrogates are not really portable and cannot be
- # decoded by the UTF-8 codec in Py3.3
+ # decoded by the UTF-8 codec in Py3.3+
self.result_code = code.get_py_const(py_object_type, 'ustring')
data_cname = code.get_string_const(
StringEncoding.BytesLiteral(self.value.encode('unicode_escape')))
@@ -3084,7 +3094,7 @@ def generate_result_code(self, code):
else:
len_func = '__Pyx_PyTuple_GET_SIZE'
code.putln("%s = %s(%s);" % (self.counter_cname, len_func, self.result()))
- code.putln("#if !CYTHON_ASSUME_SAFE_MACROS")
+ code.putln("#if !CYTHON_ASSUME_SAFE_SIZE")
code.putln(code.error_goto_if_neg(self.counter_cname, self.pos))
code.putln("#endif")
code.putln("--%s;" % self.counter_cname) # len -> last item
@@ -3141,7 +3151,7 @@ def generate_next_sequence_item(self, test_name, result_name, code):
else:
code.putln("{")
code.putln("Py_ssize_t %s = %s;" % (Naming.quick_temp_cname, final_size))
- code.putln("#if !CYTHON_ASSUME_SAFE_MACROS")
+ code.putln("#if !CYTHON_ASSUME_SAFE_SIZE")
code.putln(code.error_goto_if_neg(Naming.quick_temp_cname, self.pos))
code.putln("#endif")
code.putln("if (%s >= %s) break;" % (self.counter_cname, Naming.quick_temp_cname))
@@ -3689,112 +3699,76 @@ def may_be_none(self):
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
num_items = len(self.values)
- list_var = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
- ulength_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
- max_char_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ucs4_type, manage_ref=False)
+ use_stack_memory = num_items < 32
- code.putln("{")
-
- code.putln("TUPLE_BUILDER_TYPE builder;")
- code.putln("#if CYTHON_USING_HPY")
- code.putln('TUPLE_CREATE_START(%s, builder, %s);' % (
- list_var,
- num_items))
- code.putln("#else")
- code.putln('TUPLE_CREATE_START(%s, builder, %s); %s' % (
- list_var,
- num_items,
- code.error_goto_if_null_object(list_var, self.pos)))
- code.putln("#endif")
- code.put_gotref(list_var, py_object_type)
- code.putln("%s = 0;" % ulength_var)
- code.putln("%s = 127;" % max_char_var) # at least ASCII character range
+ unknown_nodes = set()
+ max_char_value = 127
+ for node in self.values:
+ if isinstance(node, UnicodeNode):
+ max_char_value = max(max_char_value, node.estimate_max_charval())
+ elif isinstance(node, FormattedValueNode) and node.value.type.is_numeric:
+ # formatted C numbers are always ASCII
+ pass
+ else:
+ unknown_nodes.add(node)
- for i, node in enumerate(self.values):
+ length_parts = []
+ charval_parts = [str(max_char_value)]
+ for node in self.values:
node.generate_evaluation_code(code)
- node.make_owned_reference(code)
- ulength = "__Pyx_PyUnicode_GET_LENGTH(%s)" % node.py_result()
- max_char_value = "__Pyx_PyUnicode_MAX_CHAR_VALUE(%s)" % node.py_result()
- is_ascii = False
if isinstance(node, UnicodeNode):
- try:
- # most strings will be ASCII or at least Latin-1
- node.value.encode('iso8859-1')
- max_char_value = '255'
- node.value.encode('us-ascii')
- is_ascii = True
- except UnicodeEncodeError:
- if max_char_value != '255':
- # not ISO8859-1 => check BMP limit
- max_char = max(map(ord, node.value))
- if max_char < 0xD800:
- # BMP-only, no surrogate pairs used
- max_char_value = '65535'
- ulength = str(len(node.value))
- elif max_char >= 65536:
- # clearly outside of BMP, and not on a 16-bit Unicode system
- max_char_value = '1114111'
- ulength = str(len(node.value))
- else:
- # not really worth implementing a check for surrogate pairs here
- # drawback: C code can differ when generating on Py2 with 2-byte Unicode
- pass
- else:
- ulength = str(len(node.value))
- elif isinstance(node, FormattedValueNode) and node.value.type.is_numeric:
- is_ascii = True # formatted C numbers are always ASCII
-
- if not is_ascii:
- code.putln("%s = (%s > %s) ? %s : %s;" % (
- max_char_var, max_char_value, max_char_var, max_char_value, max_char_var))
- code.putln("#if CYTHON_USING_HPY")
- load_node_result = LoadGlobalNode(self.pos, node.py_result())
- load_node_result.allocate(code)
- code.putln("%s += HPy_Length(HPY_CONTEXT_CNAME, %s);" % (ulength_var, load_node_result.temp_cname))
- load_node_result.release(code)
- code.putln("#else")
- code.putln("%s += %s;" % (ulength_var, ulength))
- code.putln("#endif")
+ length_parts.append(str(len(node.value)))
+ else:
+ # TODO: add exception handling for these macro calls if not ASSUME_SAFE_SIZE/MACROS
+ length_parts.append("UNICODE_GET_LENGTH(%s)" % node.py_result())
+ if node in unknown_nodes:
+ charval_parts.append("__Pyx_PyUnicode_MAX_CHAR_VALUE(%s)" % node.py_result())
+
+ if use_stack_memory:
+ values_array = code.funcstate.allocate_temp(
+ PyrexTypes.c_array_type(PyrexTypes.py_object_type, num_items), manage_ref=False)
+ else:
+ values_array = code.funcstate.allocate_temp(
+ PyrexTypes.CPtrType(PyrexTypes.py_object_type), manage_ref=False)
+ code.putln("%s = (PyObject **) PyMem_Calloc(%d, sizeof(PyObject*));" % (values_array, num_items))
+ code.putln("if (unlikely(!%s)) {" % values_array)
+ code.putln("PyErr_NoMemory(); %s" % code.error_goto(self.pos))
+ code.putln("}")
- node.generate_giveref(code)
- code.putln('#if CYTHON_ASSUME_SAFE_MACROS')
- code.putln('PyTuple_SET_ITEM(%s, %s, %s);' % (list_var, i, node.py_result()))
- code.putln('#else')
- load_node_result = LoadGlobalNode(self.pos, node.py_result())
- load_node_result.allocate(code)
- code.putln("#if !CYTHON_USING_HPY")
- code.put_error_if_neg(
- self.pos,
- 'TUPLE_CREATE_ASSIGN(%s, builder, %s, %s)' % (list_var, i, load_node_result.temp_cname))
- code.putln("#else")
- code.putln("TUPLE_CREATE_ASSIGN(%s, builder, %s, %s);" % (list_var, i, load_node_result.temp_cname))
- code.putln("#endif")
- load_node_result.release(code)
- code.putln('#endif')
- node.generate_post_assignment_code(code)
- node.free_temps(code)
-
- code.putln("TUPLE_CREATE_FINALISE(%s, builder);" % list_var)
+ global_temp_arr = []
+ for i, node in enumerate(self.values):
+ temp = LoadGlobalNode(self.pos, node.py_result())
+ temp.allocate(code)
+ global_temp_arr.append(temp)
+ code.putln('%s[%d] = %s;' % (values_array, i, temp.temp_cname))
code.mark_pos(self.pos)
self.allocate_temp_result(code)
code.globalstate.use_utility_code(UtilityCode.load_cached("JoinPyUnicode", "StringTools.c"))
- code.putln('%s = __Pyx_PyUnicode_Join(HPY_CONTEXT_FIRST_ARG_CALL %s, %d, %s, %s); %s' % (
+ code.putln('%s = __Pyx_PyUnicode_Join(HPY_CONTEXT_FIRST_ARG_CALL %s, %d, %s, %s);' % (
self.result(),
- list_var,
+ values_array,
num_items,
- ulength_var,
- max_char_var,
- code.error_goto_if_null_object(self.py_result(), self.pos)))
+ ' + '.join(length_parts),
+ # or-ing isn't entirely correct here since it can produce values > 1114111,
+ # but we crop that in __Pyx_PyUnicode_Join().
+ ' | '.join(charval_parts),
+ ))
+
+ for temp in global_temp_arr:
+ temp.release(code)
+
+ if not use_stack_memory:
+ code.putln("PyMem_Free(%s);" % values_array)
+ code.funcstate.release_temp(values_array)
+
+ code.putln(code.error_goto_if_null_object(self.py_result(), self.pos))
self.generate_gotref(code)
- code.put_decref_clear(list_var, py_object_type)
- code.funcstate.release_temp(list_var)
- code.funcstate.release_temp(ulength_var)
- code.funcstate.release_temp(max_char_var)
-
- code.putln("}")
+ for node in self.values:
+ node.generate_disposal_code(code)
+ node.free_temps(code)
class FormattedValueNode(ExprNode):
@@ -4026,10 +4000,13 @@ def may_be_none(self):
if base_type:
if base_type.is_string:
return False
+ if base_type in (unicode_type, bytes_type, str_type, bytearray_type, basestring_type):
+ return False
if isinstance(self.index, SliceNode):
# slicing!
- if base_type in (bytes_type, bytearray_type, str_type, unicode_type,
- basestring_type, list_type, tuple_type):
+ if base_type.is_builtin_type:
+ # It seems that none of the builtin types can return None for "__getitem__[slice]".
+ # Slices are not hashable, and thus cannot be used as key in dicts, for example.
return False
return ExprNode.may_be_none(self)
@@ -4947,7 +4924,7 @@ def buffer_lookup_code(self, code):
if self.in_nogil_context:
if self.is_buffer_access or self.is_memview_index:
if code.globalstate.directives['boundscheck']:
- warning(self.pos, "Use boundscheck(False) for faster access", level=1)
+ performance_hint(self.pos, "Use boundscheck(False) for faster access", code.globalstate)
# Assign indices to temps of at least (s)size_t to allow further index calculations.
self.index_temps = index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices]
@@ -6056,8 +6033,15 @@ class CallNode(ExprNode):
may_return_none = None
def infer_type(self, env):
- # TODO(robertwb): Reduce redundancy with analyse_types.
function = self.function
+ if function.is_attribute:
+ method_obj_type = function.obj.infer_type(env)
+ if method_obj_type.is_builtin_type:
+ result_type = Builtin.find_return_type_of_builtin_method(method_obj_type, function.attribute)
+ if result_type is not py_object_type:
+ return result_type
+
+ # TODO(robertwb): Reduce redundancy with analyse_types.
func_type = function.infer_type(env)
if isinstance(function, NewExprNode):
# note: needs call to infer_type() above
@@ -6143,6 +6127,16 @@ def set_py_result_type(self, function, func_type=None):
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
+ elif function.is_attribute and function.obj.type.is_builtin_type:
+ method_obj_type = function.obj.type
+ result_type = Builtin.find_return_type_of_builtin_method(method_obj_type, function.attribute)
+ self.may_return_none = result_type is py_object_type
+ if result_type.is_pyobject:
+ self.type = result_type
+ elif result_type.equivalent_type:
+ self.type = result_type.equivalent_type
+ else:
+ self.type = py_object_type
else:
self.type = py_object_type
@@ -6174,6 +6168,16 @@ def analyse_as_type_constructor(self, env):
self.type = type
return True
+ def function_type(self):
+ # Return the type of the function being called, coercing a function
+ # pointer to a function if necessary.
+ func_type = self.function.type
+
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+
+ return func_type
+
def is_lvalue(self):
return self.type.is_reference
@@ -6296,17 +6300,6 @@ def analyse_types(self, env):
return self
- def function_type(self):
- # Return the type of the function being called, coercing a function
- # pointer to a function if necessary. If the function has fused
- # arguments, return the specific type.
- func_type = self.function.type
-
- if func_type.is_ptr:
- func_type = func_type.base_type
-
- return func_type
-
def analyse_c_function_call(self, env):
func_type = self.function.type
if func_type is error_type:
@@ -6695,9 +6688,11 @@ def generate_result_code(self, code):
if exc_check:
if nogil:
if not exc_checks:
+ perf_hint_entry = getattr(self.function, "entry", None)
PyrexTypes.write_noexcept_performance_hint(
self.pos, code.funcstate.scope,
- function_name=None, void_return=self.type.is_void)
+ function_name=perf_hint_entry.name if perf_hint_entry else None,
+ void_return=self.type.is_void, is_call=True)
code.globalstate.use_utility_code(
UtilityCode.load_cached("ErrOccurredWithGIL", "Exceptions.c"))
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
@@ -6762,15 +6757,18 @@ def generate_evaluation_code(self, code):
", ".join(a.pythran_result() for a in args)))
-class PyMethodCallNode(SimpleCallNode):
+class PyMethodCallNode(CallNode):
# Specialised call to a (potential) PyMethodObject with non-constant argument tuple.
# Allows the self argument to be injected directly instead of repacking a tuple for it.
#
# function ExprNode the function/method object to call
# arg_tuple TupleNode the arguments for the args tuple
+ # kwdict ExprNode or None keyword dictionary (if present)
+ # unpack bool
- subexprs = ['function', 'arg_tuple']
+ subexprs = ['function', 'arg_tuple', 'kwdict']
is_temp = True
+ kwdict = None
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
@@ -6779,8 +6777,15 @@ def generate_evaluation_code(self, code):
self.function.generate_evaluation_code(code)
assert self.arg_tuple.mult_factor is None
args = self.arg_tuple.args
+ kwargs_key_value_pairs = None
for arg in args:
arg.generate_evaluation_code(code)
+ if isinstance(self.kwdict, DictNode):
+ kwargs_key_value_pairs = self.kwdict.key_value_pairs
+ for keyvalue in kwargs_key_value_pairs:
+ keyvalue.generate_evaluation_code(code)
+ elif self.kwdict:
+ self.kwdict.generate_evaluation_code(code)
# make sure function is in temp so that we can replace the reference below if it's a method
reuse_function_temp = self.function.is_temp
@@ -6819,28 +6824,50 @@ def attribute_is_likely_method(attr):
else:
likely_method = 'unlikely'
- code.putln("#if CYTHON_UNPACK_METHODS")
- code.putln("if (%s(PyMethod_Check(%s))) {" % (likely_method, function))
- code.putln("%s = PyMethod_GET_SELF(%s);" % (self_arg, function))
- # the following is always true in Py3 (kept only for safety),
- # but is false for unbound methods in Py2
- code.putln("if (likely(%s)) {" % self_arg)
- code.putln("PyObject* function = PyMethod_GET_FUNCTION(%s);" % function)
- code.put_incref(self_arg, py_object_type)
- code.put_incref("function", py_object_type)
- # free method object as early to possible to enable reuse from CPython's freelist
- code.put_decref_set(function, py_object_type, "function")
- code.putln("%s = 1;" % arg_offset_cname)
- code.putln("}")
- code.putln("}")
- code.putln("#endif") # CYTHON_UNPACK_METHODS
- # TODO may need to deal with unused variables in the #else case
+ if self.unpack:
+ # unpack is ultimately governed by optimize.unpack_method_calls
+ # and is a separate decision to whether we want vectorcall-type behaviour
+ code.putln("#if CYTHON_UNPACK_METHODS")
+ code.putln("if (%s(PyMethod_Check(%s))) {" % (likely_method, function))
+ code.putln("%s = PyMethod_GET_SELF(%s);" % (self_arg, function))
+ # the result of PyMethod_GET_SELF is always true in Py3.
+ code.putln(f"assert({self_arg});")
+ code.putln("PyObject* function = PyMethod_GET_FUNCTION(%s);" % function)
+ code.put_incref(self_arg, py_object_type)
+ code.put_incref("function", py_object_type)
+ # free method object as early to possible to enable reuse from CPython's freelist
+ code.put_decref_set(function, py_object_type, "function")
+ code.putln("%s = 1;" % arg_offset_cname)
+ code.putln("}")
+ code.putln("#endif") # CYTHON_UNPACK_METHODS
+ # TODO may need to deal with unused variables in the #else case
+ kwnames_temp = None
+ if kwargs_key_value_pairs:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectVectorCallKwBuilder", "ObjectHandling.c"))
+ function_caller = "__Pyx_Object_Vectorcall_CallFromBuilder"
+ kwnames_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ code.putln("%s = __Pyx_MakeVectorcallBuilderKwds(%s); %s" % (
+ kwnames_temp, len(kwargs_key_value_pairs),
+ code.error_goto_if_null(kwnames_temp, self.pos)
+ ))
+ code.put_gotref(kwnames_temp, py_object_type)
+ elif self.kwdict:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectFastCall", "ObjectHandling.c"))
+ function_caller = "__Pyx_PyObject_FastCallDict"
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectFastCall", "ObjectHandling.c"))
+ function_caller = "__Pyx_PyObject_FastCall"
# actually call the function
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("PyObjectFastCall", "ObjectHandling.c"))
+
code.putln("{")
+ extra_keyword_args = ""
+ if kwargs_key_value_pairs:
+ extra_keyword_args = f"+ ((CYTHON_VECTORCALL) ? {len(kwargs_key_value_pairs)} : 0)"
# To avoid passing an out-of-bounds argument pointer in the no-args case,
# we need at least two entries, so we pad with NULL and point to that.
# See https://github.com/cython/cython/issues/5668
@@ -6851,19 +6878,45 @@ def attribute_is_likely_method(attr):
load_tmp = LoadGlobalNode(self.pos, arg_result)
load_tmp.allocate(code)
loaded_vars.append(load_tmp)
- code.putln("PYOBJECT_TYPE __pyx_callargs[%d] = {%s, %s};" % (
+ code.putln("PYOBJECT_TYPE __pyx_callargs[%d%s] = {%s, %s};" % (
(len(args) + 1) if args else 2,
+ extra_keyword_args,
self_arg,
', '.join(load_tmp.temp_cname for load_tmp in loaded_vars) if args else "NULL",
))
for tmp in loaded_vars:
tmp.release(code)
- code.putln("%s = __Pyx_PyObject_FastCall(%s, __pyx_callargs+1-%s, %d+%s);" % (
+ if kwargs_key_value_pairs:
+ for n, keyvalue in enumerate(kwargs_key_value_pairs):
+ key_is_str = (
+ (keyvalue.key.type is Builtin.str_type or keyvalue.key.type is Builtin.unicode_type)
+ and not keyvalue.key.may_be_none()
+ )
+ code.put_error_if_neg(
+ self.pos,
+ "__Pyx_VectorcallBuilder_AddArg%s(%s, %s, %s, __pyx_callargs+%d, %d)" % (
+ "" if key_is_str else "_Check",
+ keyvalue.key.py_result(),
+ keyvalue.value.py_result(),
+ kwnames_temp,
+ len(args) + 1,
+ n
+ ))
+
+ if kwnames_temp:
+ keyword_variable = f", {kwnames_temp}"
+ elif self.kwdict:
+ keyword_variable = f", {self.kwdict.result()}"
+ else:
+ keyword_variable = ""
+ code.putln("%s = %s(%s, __pyx_callargs+1-%s, %d+%s%s);" % (
self.result(),
+ function_caller,
function,
arg_offset_cname,
len(args),
- arg_offset_cname))
+ arg_offset_cname,
+ keyword_variable))
code.put_xdecref_clear(self_arg, py_object_type)
code.funcstate.release_temp(self_arg)
@@ -6871,6 +6924,15 @@ def attribute_is_likely_method(attr):
for arg in args:
arg.generate_disposal_code(code)
arg.free_temps(code)
+ if kwargs_key_value_pairs:
+ for keyvalue in kwargs_key_value_pairs:
+ keyvalue.generate_disposal_code(code)
+ keyvalue.free_temps(code)
+ code.put_decref_clear(kwnames_temp, py_object_type)
+ code.funcstate.release_temp(kwnames_temp)
+ elif self.kwdict:
+ self.kwdict.generate_disposal_code(code)
+ self.kwdict.free_temps(code)
code.putln(code.error_goto_if_null_object(self.result(), self.pos))
self.generate_gotref(code)
@@ -6882,6 +6944,51 @@ def attribute_is_likely_method(attr):
code.funcstate.release_temp(function)
code.putln("}")
+ @staticmethod
+ def can_be_used_for_posargs(positional_args, has_kwargs, kwds_is_dict_node=None):
+ """
+ Test whether the positional args given are compatible with
+ being translated into a PyMethodCallNode
+ """
+ if not isinstance(positional_args, TupleNode):
+ return False
+ if positional_args.mult_factor:
+ return False
+ if positional_args.is_literal and len(positional_args.args) > 1:
+ return False
+ if not len(positional_args.args):
+ # If positional_args is an empty tuple, it's probably only
+ # worth optimizing if the kwds are f(a=1, b=2) and not
+ # if they're f(**kwds)
+ return has_kwargs and kwds_is_dict_node
+ return True
+
+
+ @staticmethod
+ def can_be_used_for_function(function):
+ """
+ Test whether the function passed is suitable to be translated
+ into a PyMethodCallNode
+ """
+ may_be_a_method = True
+ if function.type is Builtin.type_type:
+ may_be_a_method = False
+ elif function.is_attribute:
+ if function.entry and function.entry.type.is_cfunction:
+ # optimised builtin method
+ may_be_a_method = False
+ elif function.is_name:
+ entry = function.entry
+ if entry.is_builtin or entry.type.is_cfunction:
+ may_be_a_method = False
+ elif entry.cf_assignments:
+ # local functions/classes are definitely not methods
+ non_method_nodes = (PyCFunctionNode, ClassNode, Py3ClassNode)
+ may_be_a_method = any(
+ assignment.rhs and not isinstance(assignment.rhs, non_method_nodes)
+ for assignment in entry.cf_assignments)
+ return may_be_a_method
+
class InlinedDefNodeCallNode(CallNode):
# Inline call to defnode
@@ -7413,12 +7520,25 @@ def generate_evaluation_code(self, code):
code.putln("%s = %s;" % (self.result(), item.py_result()))
item.generate_post_assignment_code(code)
else:
+ if item.is_temp:
+ # For the fairly plausible special case where item is a temporary
+ # with a refcount of 1 (so created specifically for us),
+ # avoid making a copy
+ code.putln("#if CYTHON_COMPILING_IN_CPYTHON && !CYTHON_USING_HPY")
+ code.putln("if (Py_REFCNT(%s) == 1) {" % item.py_result())
+ code.putln("%s = %s;" % (self.result(), item.py_result()))
+ item.generate_post_assignment_code(code)
+ code.putln("} else")
+ code.putln("#endif")
+ code.putln("{")
code.putln("%s = DICT_COPY(%s); %s" % (
self.result(),
item.py_result(),
code.error_goto_if_null_object(self.result(), item.pos)))
self.generate_gotref(code)
item.generate_disposal_code(code)
+ if item.is_temp:
+ code.putln("}")
if item.type is not dict_type:
code.putln('} else {')
@@ -8495,14 +8615,17 @@ def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
none_check = "likely(%s != Py_None)" % rhs.py_result()
if rhs.type is list_type:
sequence_types = ['List']
+ get_size_func = "__Pyx_PyList_GET_SIZE"
if rhs.may_be_none():
sequence_type_test = none_check
elif rhs.type is tuple_type:
sequence_types = ['Tuple']
+ get_size_func = "__Pyx_PyTuple_GET_SIZE"
if rhs.may_be_none():
sequence_type_test = none_check
else:
sequence_types = ['Tuple', 'List']
+ get_size_func = "__Pyx_PySequence_SIZE"
tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
@@ -8511,7 +8634,7 @@ def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
- code.putln("Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);")
+ code.putln("Py_ssize_t size = %s(sequence);" % get_size_func)
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c"))
@@ -8697,7 +8820,7 @@ def generate_starred_assignment_code(self, rhs, code):
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c"))
length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
- code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
+ code.putln('%s = __Pyx_PyList_GET_SIZE(%s);' % (length_temp, target_list))
code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
len(unpacked_fixed_items_left), length_temp,
@@ -9467,9 +9590,9 @@ class DictNode(ExprNode):
# Dictionary constructor.
#
# key_value_pairs [DictItemNode]
- # exclude_null_values [boolean] Do not add NULL values to dict
+ # exclude_null_values boolean Do not add NULL values to dict
#
- # obj_conversion_errors [PyrexError] used internally
+ # obj_conversion_errors PyrexError used internally
subexprs = ['key_value_pairs']
is_temp = 1
@@ -12485,8 +12608,8 @@ def is_py_operation_types(self, type1, type2):
def infer_builtin_types_operation(self, type1, type2):
# b'abc' + 'abc' raises an exception in Py3,
- # so we can safely infer the Py2 type for bytes here
- string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
+ # so we can safely infer a mix here.
+ string_types = (bytes_type, bytearray_type, basestring_type, str_type, unicode_type)
if type1 in string_types and type2 in string_types:
return string_types[max(string_types.index(type1),
string_types.index(type2))]
@@ -12859,25 +12982,11 @@ def is_py_operation_types(self, type1, type2):
or NumBinopNode.is_py_operation_types(self, type1, type2))
def infer_builtin_types_operation(self, type1, type2):
- # b'%s' % xyz raises an exception in Py3<3.5, so it's safe to infer the type for Py2 and later Py3's.
- if type1 is unicode_type:
- # None + xyz may be implemented by RHS
- if type2.is_builtin_type or not self.operand1.may_be_none():
- return type1
- elif type1 in (bytes_type, str_type, basestring_type):
- if type2 is unicode_type:
- return type2
- elif type2.is_numeric:
+ # b'%s' % xyz raises an exception in Py3<3.5, so it's safe to infer the type for later Py3's.
+ if type1 in (unicode_type, bytes_type, str_type, basestring_type):
+ # 'None % xyz' may be implemented by the RHS, but everything else will do string formatting.
+ if type2.is_builtin_type or not type2.is_pyobject or not self.operand1.may_be_none():
return type1
- elif self.operand1.is_string_literal:
- if type1 is str_type or type1 is bytes_type:
- if set(_find_formatting_types(self.operand1.value)) <= _safe_bytes_formats:
- return type1
- return basestring_type
- elif type1 is bytes_type and not type2.is_builtin_type:
- return None # RHS might implement '% operator differently in Py3
- else:
- return basestring_type # either str or unicode, can't tell
return super().infer_builtin_types_operation(type1, type2)
def zero_division_message(self):
@@ -14721,13 +14830,14 @@ class CoerceToBooleanNode(CoercionNode):
type = PyrexTypes.c_bint_type
+ # Note that all of these need a check if CYTHON_ASSUME_SAFE_MACROS is false
_special_builtins = {
- Builtin.list_type: 'PyList_GET_SIZE',
- Builtin.tuple_type: 'PyTuple_GET_SIZE',
- Builtin.set_type: 'PySet_GET_SIZE',
- Builtin.frozenset_type: 'PySet_GET_SIZE',
- Builtin.bytes_type: 'PyBytes_GET_SIZE',
- Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
+ Builtin.list_type: '__Pyx_PyList_GET_SIZE',
+ Builtin.tuple_type: '__Pyx_PyTuple_GET_SIZE',
+ Builtin.set_type: '__Pyx_PySet_GET_SIZE',
+ Builtin.frozenset_type: '__Pyx_PySet_GET_SIZE',
+ Builtin.bytes_type: '__Pyx_PyBytes_GET_SIZE',
+ Builtin.bytearray_type: '__Pyx_PyByteArray_GET_SIZE',
Builtin.unicode_type: '__Pyx_PyUnicode_IS_TRUE',
}
@@ -14759,6 +14869,8 @@ def generate_result_code(self, code):
checks = ["(%s != Py_None)" % self.arg.py_result()] if self.arg.may_be_none() else []
checks.append("(%s(%s) != 0)" % (test_func, self.arg.py_result()))
code.putln("%s = %s;" % (self.result(), '&&'.join(checks)))
+ code.putln(code.error_goto_if(
+ "((!CYTHON_ASSUME_SAFE_MACROS) && %s < 0)" % self.result(), self.pos))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(HPY_CONTEXT_FIRST_ARG_CALL %s); %s" % (
diff --git a/Cython/Compiler/FusedNode.py b/Cython/Compiler/FusedNode.py
index 5b7d25d10ee..6865a2cb42d 100644
--- a/Cython/Compiler/FusedNode.py
+++ b/Cython/Compiler/FusedNode.py
@@ -1,7 +1,8 @@
import copy
from . import (ExprNodes, PyrexTypes, MemoryView,
- ParseTreeTransforms, StringEncoding, Errors)
+ ParseTreeTransforms, StringEncoding, Errors,
+ Naming)
from .ExprNodes import CloneNode, ProxyNode, TupleNode
from .Nodes import FuncDefNode, CFuncDefNode, StatListNode, DefNode
from ..Utils import OrderedSet
@@ -152,13 +153,13 @@ def copy_cdef(self, env):
type.specialize_entry(entry, cname)
# Reuse existing Entries (e.g. from .pxd files).
- for i, orig_entry in enumerate(env.cfunc_entries):
+ for orig_entry in env.cfunc_entries:
if entry.cname == orig_entry.cname and type.same_as_resolved_type(orig_entry.type):
- copied_node.entry = env.cfunc_entries[i]
+ copied_node.entry = orig_entry
if not copied_node.entry.func_cname:
copied_node.entry.func_cname = entry.func_cname
- entry = copied_node.entry
- type = entry.type
+ entry = orig_entry
+ type = orig_entry.type
break
else:
new_cfunc_entries.append(entry)
@@ -196,12 +197,11 @@ def copy_cdef(self, env):
break
# replace old entry with new entries
- try:
+ if self.node.entry in env.cfunc_entries:
cindex = env.cfunc_entries.index(self.node.entry)
- except ValueError:
- env.cfunc_entries.extend(new_cfunc_entries)
- else:
env.cfunc_entries[cindex:cindex+1] = new_cfunc_entries
+ else:
+ env.cfunc_entries.extend(new_cfunc_entries)
if orig_py_func:
self.py_func = self.make_fused_cpdef(orig_py_func, env,
@@ -303,9 +303,10 @@ def _fused_instance_checks(self, normal_types, pyx_code, env):
""")
def _dtype_name(self, dtype):
+ name = str(dtype).replace('_', '__').replace(' ', '_')
if dtype.is_typedef:
- return '___pyx_%s' % dtype
- return str(dtype).replace(' ', '_')
+ name = Naming.fused_dtype_prefix + name
+ return name
def _dtype_type(self, dtype):
if dtype.is_typedef:
diff --git a/Cython/Compiler/ModuleNode.py b/Cython/Compiler/ModuleNode.py
index a6b61355fa7..9c6e0acb3e5 100644
--- a/Cython/Compiler/ModuleNode.py
+++ b/Cython/Compiler/ModuleNode.py
@@ -301,17 +301,9 @@ def h_entries(entries, api=0, pxd=0):
h_code_main.putln("/* WARNING: the interface of the module init function changed in CPython 3.5. */")
h_code_main.putln("/* It now returns a PyModuleDef instance instead of a PyModule instance. */")
h_code_main.putln("")
- h_code_main.putln("#if PY_MAJOR_VERSION < 3")
- if env.module_name.isascii():
- py2_mod_name = env.module_name
- else:
- py2_mod_name = env.module_name.encode("ascii", errors="ignore").decode("utf-8")
- h_code_main.putln('#error "Unicode module names are not supported in Python 2";')
- h_code_main.putln("PyMODINIT_FUNC init%s(void);" % py2_mod_name)
- h_code_main.putln("#else")
py3_mod_func_name = self.mod_init_func_cname('PyInit', env)
- warning_string = EncodedString('Use PyImport_AppendInittab("%s", %s) instead of calling %s directly.' % (
- py2_mod_name, py3_mod_func_name, py3_mod_func_name))
+ warning_string = EncodedString('Use PyImport_AppendInittab(%s, %s) instead of calling %s directly.' % (
+ env.module_name.as_c_string_literal(), py3_mod_func_name, py3_mod_func_name))
h_code_main.putln('/* WARNING: %s from Python 3.5 */' % warning_string.rstrip('.'))
h_code_main.putln("PyMODINIT_FUNC %s(void);" % py3_mod_func_name)
h_code_main.putln("")
@@ -334,7 +326,6 @@ def h_entries(entries, api=0, pxd=0):
h_code_main.putln('#define %s() __PYX_WARN_IF_%s_INIT_CALLED(%s())' % (
py3_mod_func_name, py3_mod_func_name, py3_mod_func_name))
h_code_main.putln('#endif')
- h_code_main.putln('#endif')
h_code_end.putln("")
h_code_end.putln("#endif /* !%s */" % h_guard)
@@ -1555,10 +1546,12 @@ def generate_new_function(self, scope, code, cclass_entry):
slot_func)
code.putln("")
if freelist_size:
+ code.putln("#if CYTHON_USE_FREELISTS")
code.putln("static %s[%d];" % (
scope.parent_type.declaration_code(freelist_name),
freelist_size))
code.putln("static int %s = 0;" % freecount_name)
+ code.putln("#endif")
code.putln("")
code.putln(
"static PyObject *%s(PyTypeObject *t, %sPyObject *a, %sPyObject *k) {" % (
@@ -1588,7 +1581,7 @@ def generate_new_function(self, scope, code, cclass_entry):
else:
type_safety_check = ' & (int)(!__Pyx_PyType_HasFeature(t, (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))'
obj_struct = type.declaration_code("", deref=True)
- code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
+ code.putln("#if CYTHON_USE_FREELISTS")
code.putln(
"if (likely((int)(%s > 0) & (int)(t->tp_basicsize == sizeof(%s))%s)) {" % (
freecount_name, obj_struct, type_safety_check))
@@ -1791,13 +1784,7 @@ def generate_dealloc_function(self, scope, code):
if base_type.scope.needs_gc():
code.putln("PyObject_GC_Track(o);")
else:
- code.putln("#if PY_MAJOR_VERSION < 3")
- # Py2 lacks guarantees that the type pointer is still valid if we dealloc the object
- # at system exit time. Thus, we need an extra NULL check.
- code.putln("if (!(%s) || PyType_IS_GC(%s)) PyObject_GC_Track(o);" % (base_cname, base_cname))
- code.putln("#else")
code.putln("if (PyType_IS_GC(%s)) PyObject_GC_Track(o);" % base_cname)
- code.putln("#endif")
tp_dealloc = TypeSlots.get_base_slot_function(scope, tp_slot)
if tp_dealloc is not None:
@@ -1809,9 +1796,15 @@ def generate_dealloc_function(self, scope, code):
# cimported base type pointer directly interacts badly with
# the module cleanup, which may already have cleared it.
# In that case, fall back to traversing the type hierarchy.
- code.putln("if (likely(%s)) __Pyx_PyType_GetSlot(%s, tp_dealloc, destructor)(o); "
- "else __Pyx_call_next_tp_dealloc(o, %s);" % (
- base_cname, base_cname, slot_func_cname))
+ # If we're using the module state then always go through the
+ # type hierarchy, because our access to the module state may
+ # have been lost (at least for the limited API version of
+ # using module state).
+ code.putln("#if !CYTHON_USE_MODULE_STATE")
+ code.putln("if (likely(%s)) __Pyx_PyType_GetSlot(%s, tp_dealloc, destructor)(o); else" % (
+ base_cname, base_cname))
+ code.putln("#endif")
+ code.putln("__Pyx_call_next_tp_dealloc(o, %s);" % slot_func_cname)
code.globalstate.use_utility_code(
UtilityCode.load_cached("CallNextTpDealloc", "ExtensionTypes.c"))
else:
@@ -1827,7 +1820,7 @@ def generate_dealloc_function(self, scope, code):
' & (int)(!__Pyx_PyType_HasFeature(Py_TYPE(o), (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))')
type = scope.parent_type
- code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
+ code.putln("#if CYTHON_USE_FREELISTS")
code.putln(
"if (((int)(%s < %d) & (int)(Py_TYPE(o)->tp_basicsize == sizeof(%s))%s)) {" % (
freecount_name,
@@ -1899,18 +1892,31 @@ def generate_traverse_function(self, scope, code, cclass_entry):
code.putln("e = %s(o, v, a); if (e) return e;" % static_call)
elif base_type.is_builtin_type:
base_cname = base_type.typeptr_cname
- code.putln("if (!%s->tp_traverse); else { e = %s->tp_traverse(o,v,a); if (e) return e; }" % (
- base_cname, base_cname))
+ code.putln("{")
+ code.putln(
+ f"traverseproc traverse = __Pyx_PyType_GetSlot({base_cname}, tp_traverse, traverseproc);")
+ code.putln("if (!traverse); else { e = traverse(o,v,a); if (e) return e; }")
+ code.putln("}")
else:
# This is an externally defined type. Calling through the
# cimported base type pointer directly interacts badly with
# the module cleanup, which may already have cleared it.
# In that case, fall back to traversing the type hierarchy.
+ # If we're using the module state then always go through the
+ # type hierarchy, because our access to the module state may
+ # have been lost (at least for the limited API version of
+ # using module state).
base_cname = base_type.typeptr_cname
+ code.putln("#if !CYTHON_USE_MODULE_STATE")
+ code.putln("e = 0;")
+ code.putln("if (likely(%s)) {" % base_cname)
code.putln(
- "e = ((likely(%s)) ? ((%s->tp_traverse) ? %s->tp_traverse(o, v, a) : 0) : "
- "__Pyx_call_next_tp_traverse(o, v, a, %s)); if (e) return e;" % (
- base_cname, base_cname, base_cname, slot_func))
+ f"traverseproc traverse = __Pyx_PyType_GetSlot({base_cname}, tp_traverse, traverseproc);")
+ code.putln("if (traverse) { e = traverse(o, v, a); }")
+ code.putln("} else")
+ code.putln("#endif")
+ code.putln("{ e = __Pyx_call_next_tp_traverse(o, v, a, %s); }" % slot_func)
+ code.putln("if (e) return e;")
code.globalstate.use_utility_code(
UtilityCode.load_cached("CallNextTpTraverse", "ExtensionTypes.c"))
@@ -1965,17 +1971,27 @@ def generate_clear_function(self, scope, code, cclass_entry):
code.putln("%s(o);" % static_call)
elif base_type.is_builtin_type:
base_cname = base_type.typeptr_cname
- code.putln("if (!%s->tp_clear); else %s->tp_clear(o);" % (
- base_cname, base_cname))
+ code.putln("{")
+ code.putln(f"inquiry clear = __Pyx_PyType_GetSlot({base_cname}, tp_clear, inquiry);")
+ code.putln("if (clear) clear(o);")
+ code.putln("}")
else:
# This is an externally defined type. Calling through the
# cimported base type pointer directly interacts badly with
# the module cleanup, which may already have cleared it.
# In that case, fall back to traversing the type hierarchy.
+ # If we're using the module state then always go through the
+ # type hierarchy, because our access to the module state may
+ # have been lost (at least for the limited API version of
+ # using module state).
base_cname = base_type.typeptr_cname
- code.putln(
- "if (likely(%s)) { if (%s->tp_clear) %s->tp_clear(o); } else __Pyx_call_next_tp_clear(o, %s);" % (
- base_cname, base_cname, base_cname, slot_func))
+ code.putln("#if !CYTHON_USE_MODULE_STATE")
+ code.putln("if (likely(%s)) {" % base_cname)
+ code.putln(f"inquiry clear = __Pyx_PyType_GetSlot({base_cname}, tp_clear, inquiry);")
+ code.putln("if (clear) clear(o);")
+ code.putln("} else")
+ code.putln("#endif")
+ code.putln("{ __Pyx_call_next_tp_clear(o, %s); }" % slot_func)
code.globalstate.use_utility_code(
UtilityCode.load_cached("CallNextTpClear", "ExtensionTypes.c"))
@@ -3013,28 +3029,10 @@ def generate_module_init_func(self, imported_modules, env, code):
code.enter_cfunc_scope(self.scope)
code.putln("")
code.putln(UtilityCode.load_as_string("PyModInitFuncType", "ModuleSetupCode.c")[0])
- if env.module_name.isascii():
- py2_mod_name = env.module_name
- fail_compilation_in_py2 = False
- else:
- fail_compilation_in_py2 = True
- # at this point py2_mod_name is largely a placeholder and the value doesn't matter
- py2_mod_name = env.module_name.encode("ascii", errors="ignore").decode("utf8")
- header2 = "__Pyx_PyMODINIT_FUNC init%s(void)" % py2_mod_name
header3 = "__Pyx_PyMODINIT_FUNC %s(void)" % self.mod_init_func_cname('PyInit', env)
header3 = EncodedString(header3)
- code.putln("#if PY_MAJOR_VERSION < 3")
# Optimise for small code size as the module init function is only executed once.
- code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header2)
- if fail_compilation_in_py2:
- code.putln('#error "Unicode module names are not supported in Python 2";')
- if self.scope.is_package:
- code.putln("#if !defined(CYTHON_NO_PYINIT_EXPORT) && (defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS))")
- code.putln("__Pyx_PyMODINIT_FUNC init__init__(void) { init%s(); }" % py2_mod_name)
- code.putln("#endif")
- code.putln(header2)
- code.putln("#else")
code.putln("#if !CYTHON_USING_HPY")
code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header3)
if self.scope.is_package:
@@ -3078,8 +3076,6 @@ def generate_module_init_func(self, imported_modules, env, code):
code.putln("#endif")
code.putln("#endif") # PEP489
- code.putln("#endif") # Py3
-
# start of module init/exec function (pre/post PEP 489)
code.putln("{")
code.putln('int stringtab_initialized = 0;')
@@ -3112,8 +3108,8 @@ def generate_module_init_func(self, imported_modules, env, code):
env.module_name.as_c_string_literal()[1:-1])
code.putln("return -1;")
code.putln("}")
- code.putln("#elif PY_MAJOR_VERSION >= 3")
- # Hack: enforce single initialisation also on reimports under different names on Python 3 (with PEP 3121/489).
+ code.putln("#else")
+ # Hack: enforce single initialisation also on reimports under different names (with PEP 3121/489).
code.putln("#if !CYTHON_USING_HPY")
code.putln("if (API_IS_NOT_NULL(%s)) return __Pyx_NewRef(%s);" % (
Naming.module_cname,
@@ -3480,6 +3476,7 @@ def generate_module_cleanup_func(self, env, code):
scope = cclass_type.scope
freelist_name = scope.mangle_internal(Naming.freelist_name)
freecount_name = scope.mangle_internal(Naming.freecount_name)
+ code.putln('#if CYTHON_USE_FREELISTS')
code.putln("while (%s > 0) {" % freecount_name)
code.putln("PyObject* o = (PyObject*)%s[--%s];" % (
freelist_name, freecount_name))
@@ -3491,6 +3488,7 @@ def generate_module_cleanup_func(self, env, code):
code.putln("if (tp_free) tp_free(o);")
code.putln("#endif")
code.putln("}")
+ code.putln('#endif') # CYTHON_USE_FREELISTS
# for entry in env.pynum_entries:
# code.put_decref_clear(entry.cname,
# PyrexTypes.py_object_type,
@@ -3527,19 +3525,17 @@ def generate_main_method(self, env, code):
def punycode_module_name(self, prefix, name):
# adapted from PEP483
- try:
- name = '_' + name.encode('ascii').decode('ascii')
- except UnicodeEncodeError:
+ if name.isascii():
+ name = '_' + name
+ else:
name = 'U_' + name.encode('punycode').replace(b'-', b'_').decode('ascii')
return "%s%s" % (prefix, name)
def wrong_punycode_module_name(self, name):
# to work around a distutils bug by also generating an incorrect symbol...
- try:
- name.encode("ascii")
+ if name.isascii():
return None # workaround is not needed
- except UnicodeEncodeError:
- return "PyInitU" + ("_"+name).encode('punycode').replace(b'-', b'_').decode('ascii')
+ return "PyInitU" + ("_"+name).encode('punycode').replace(b'-', b'_').decode('ascii')
def mod_init_func_cname(self, prefix, env):
# from PEP483
@@ -3680,7 +3676,7 @@ def generate_module_creation_code(self, env, code):
code.putln("int add_module_result = PyState_AddModule(%s, &%s);" % (
module_temp, Naming.pymoduledef_cname))
code.putln("%s = 0; /* transfer ownership from %s to %s pseudovariable */" % (
- module_temp, module_temp, env.module_name
+ module_temp, module_temp, env.module_name.as_c_string_literal()
))
# At this stage the module likely has a refcount of 2 - one owned by the list
# inside PyState_AddModule and one owned by "__pyx_m" (and returned from this
diff --git a/Cython/Compiler/Naming.py b/Cython/Compiler/Naming.py
index 3a9c9af58f4..ecb928662d2 100644
--- a/Cython/Compiler/Naming.py
+++ b/Cython/Compiler/Naming.py
@@ -134,6 +134,7 @@
error_without_exception_cname = pyrex_prefix + "error_without_exception"
binding_cfunc = pyrex_prefix + "binding_PyCFunctionType"
fused_func_prefix = pyrex_prefix + 'fuse_'
+fused_dtype_prefix = pyrex_prefix + 'fused_dtype_'
quick_temp_cname = pyrex_prefix + "temp" # temp variable for quick'n'dirty temping
tp_dict_version_temp = pyrex_prefix + "tp_dict_version"
obj_dict_version_temp = pyrex_prefix + "obj_dict_version"
diff --git a/Cython/Compiler/Nodes.py b/Cython/Compiler/Nodes.py
index 6b12cf52e03..82fa4f60b7a 100644
--- a/Cython/Compiler/Nodes.py
+++ b/Cython/Compiler/Nodes.py
@@ -871,6 +871,7 @@ class CArgDeclNode(Node):
# kw_only boolean Is a keyword-only argument
# is_dynamic boolean Non-literal arg stored inside CyFunction
# pos_only boolean Is a positional-only argument
+ # type_from_annotation boolean Was the type deduced from an annotation
#
# name_cstring property that converts the name to a cstring taking care of unicode
# and quoting it
@@ -891,6 +892,7 @@ class CArgDeclNode(Node):
default_value = None
annotation = None
is_dynamic = 0
+ type_from_annotation = False
def declared_name(self):
return self.declarator.declared_name()
@@ -992,6 +994,8 @@ def inject_type_from_annotations(self, env):
elif not self.or_none and arg_type.can_be_optional():
self.not_none = True
+ if arg_type:
+ self.type_from_annotation = True
return arg_type
def calculate_default_value_code(self, code):
@@ -2446,13 +2450,18 @@ def generate_arg_type_test(self, arg, code):
UtilityCode.load_cached("ArgTypeTest", "FunctionArguments.c"))
typeptr_cname = arg.type.typeptr_cname
arg_code = "(CAST_IF_CAPI(PyObject *)%s)" % arg.entry.cname
+ exact = 0
+ if arg.type.is_builtin_type and arg.type.require_exact:
+ # 2 is used to indicate that the type is from the annotation
+ # and provide a little extra info on failure.
+ exact = 2 if arg.type_from_annotation else 1
code.putln(
'if (unlikely(!(__Pyx_ArgTypeTest(%s, %s, %d, %s, %s)))) %s' % (
arg_code,
typeptr_cname,
arg.accept_none,
arg.name_cstring,
- arg.type.is_builtin_type and arg.type.require_exact,
+ exact,
code.error_goto(arg.pos)))
else:
error(arg.pos, "Cannot test type of extern C class without type object name specification")
@@ -3877,7 +3886,7 @@ def generate_argument_parsing_code(self, env, code, decl_code):
if self.signature_has_generic_args():
if self.signature.use_fastcall:
code.putln("#if !CYTHON_METH_FASTCALL")
- code.putln("#if CYTHON_ASSUME_SAFE_MACROS")
+ code.putln("#if CYTHON_ASSUME_SAFE_SIZE")
code.putln("%s = PyTuple_GET_SIZE(%s);" % (
Naming.nargs_cname, Naming.args_cname))
code.putln("#else")
@@ -4954,9 +4963,15 @@ def generate_execution_code(self, code):
if self.py_func.is_module_scope:
code.putln("else {")
else:
- code.putln("else if (unlikely((Py_TYPE(%s)->tp_dictoffset != 0) || "
- "__Pyx_PyType_HasFeature(Py_TYPE(%s), (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))) {" % (
- self_arg, self_arg))
+ code.putln("else if (")
+ code.putln("#if CYTHON_USE_TYPE_SLOTS || CYTHON_COMPILING_IN_PYPY")
+ code.putln(f"unlikely(Py_TYPE({self_arg})->tp_dictoffset != 0)")
+ code.putln("#else")
+ dict_str_const = code.get_py_string_const(EncodedString("__dict__"))
+ code.putln(f'PyObject_HasAttr({self_arg}, {dict_str_const})')
+ code.putln("#endif")
+ code.putln(" || unlikely(__Pyx_PyType_HasFeature(Py_TYPE(%s), (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))) {" % (
+ self_arg))
code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS")
code.globalstate.use_utility_code(
@@ -5819,12 +5834,9 @@ def generate_type_ready_code(entry, code, bases_tuple_cname=None, check_heap_typ
typeptr_cname,
type.vtabptr_cname,
))
- # TODO: find a way to make this work with the Limited API!
- code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API")
code.globalstate.use_utility_code(
UtilityCode.load_cached('MergeVTables', 'ImportExport.c'))
code.put_error_if_neg(entry.pos, "__Pyx_MergeVtables(%s)" % typeptr_cname)
- code.putln("#endif")
if not type.scope.is_internal and not type.scope.directives.get('internal'):
# scope.is_internal is set for types defined by
# Cython (such as closures), the 'internal'
@@ -5857,9 +5869,7 @@ def generate_type_ready_code(entry, code, bases_tuple_cname=None, check_heap_typ
# do so at runtime.
code.globalstate.use_utility_code(
UtilityCode.load_cached('SetupReduce', 'ExtensionTypes.c'))
- code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API") # FIXME
code.put_error_if_neg(entry.pos, "__Pyx_setup_reduce((PyObject *) %s)" % typeptr_cname)
- code.putln("#endif")
def annotate(self, code):
if self.type_init_args:
@@ -8648,6 +8658,8 @@ def put_error_uncatcher(self, code, exc_vars, exc_lineno_cnames=None, exc_filena
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
+ # although the thread state is already assigned, that can't be trusted after releasing the GIL
+ code.putln("__Pyx_PyThreadState_assign")
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
@@ -8674,6 +8686,8 @@ def put_error_cleaner(self, code, exc_vars):
code.globalstate.use_utility_code(reset_exception_utility_code)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
+ # although the thread state is already assigned, that can't be trusted after releasing the GIL
+ code.putln("__Pyx_PyThreadState_assign")
# not using preprocessor here to avoid warnings about
# unused utility functions and/or temps
@@ -9139,7 +9153,7 @@ class ParallelStatNode(StatNode, ParallelNode):
construct (replaced by its compile time value)
"""
- child_attrs = ['body', 'num_threads']
+ child_attrs = ['body', 'num_threads', 'threading_condition']
body = None
@@ -9150,6 +9164,7 @@ class ParallelStatNode(StatNode, ParallelNode):
num_threads = None
chunksize = None
+ threading_condition = None
parallel_exc = (
Naming.parallel_exc_type,
@@ -9192,9 +9207,10 @@ def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.num_threads = None
+ self.threading_condition = None
if self.kwargs:
- # Try to find num_threads and chunksize keyword arguments
+ # Try to find known keyword arguments.
pairs = []
seen = set()
for dictitem in self.kwargs.key_value_pairs:
@@ -9204,6 +9220,9 @@ def analyse_declarations(self, env):
if dictitem.key.value == 'num_threads':
if not dictitem.value.is_none:
self.num_threads = dictitem.value
+ elif dictitem.key.value == 'use_threads_if':
+ if not dictitem.value.is_none:
+ self.threading_condition = dictitem.value
elif self.is_prange and dictitem.key.value == 'chunksize':
if not dictitem.value.is_none:
self.chunksize = dictitem.value
@@ -9230,6 +9249,12 @@ def analyse_expressions(self, env):
if self.num_threads:
self.num_threads = self.num_threads.analyse_expressions(env)
+ if self.threading_condition:
+ if self.is_parallel:
+ self.threading_condition = self.threading_condition.analyse_expressions(env)
+ else:
+ error(self.pos, "'use_threads_if' must de declared in the parent parallel section")
+
if self.chunksize:
self.chunksize = self.chunksize.analyse_expressions(env)
@@ -9833,15 +9858,23 @@ def redef_builtin_expect_apple_gcc_bug(self, code):
if not self.parent:
code.redef_builtin_expect(self.redef_condition)
+ def _parameters_nogil_check(self, env, names, nodes):
+ for name, node in zip(names, nodes):
+ if node is not None and node.type.is_pyobject:
+ error(node.pos, "%s may not be a Python object "
+ "as we don't have the GIL" % name)
+
+
class ParallelWithBlockNode(ParallelStatNode):
"""
This node represents a 'with cython.parallel.parallel():' block
"""
- valid_keyword_arguments = ['num_threads']
+ valid_keyword_arguments = ['num_threads', 'use_threads_if']
num_threads = None
+ threading_condition = None
def analyse_declarations(self, env):
super().analyse_declarations(env)
@@ -9850,12 +9883,20 @@ def analyse_declarations(self, env):
"positional arguments")
def generate_execution_code(self, code):
+
+ if self.threading_condition is not None:
+ self.threading_condition.generate_evaluation_code(code)
+
self.declare_closure_privates(code)
self.setup_parallel_control_flow_block(code)
code.putln("#ifdef _OPENMP")
code.put("#pragma omp parallel ")
+ if self.threading_condition is not None:
+ code.put("if(%s) " % self.threading_condition.result())
+
+
if self.privates:
privates = [e.cname for e in self.privates
if not e.type.is_pyobject]
@@ -9882,11 +9923,21 @@ def generate_execution_code(self, code):
return_ = code.label_used(code.return_label)
self.restore_labels(code)
+
+ # ------ cleanup ------
self.end_parallel_control_flow_block(code, break_=break_,
continue_=continue_,
return_=return_)
+
+ if self.threading_condition is not None:
+ self.threading_condition.generate_disposal_code(code)
+ self.threading_condition.free_temps(code)
+
self.release_closure_privates(code)
+ def nogil_check(self, env):
+ self._parameters_nogil_check(env, ['use_threads_if'], [self.threading_condition])
+
class ParallelRangeNode(ParallelStatNode):
"""
@@ -9897,7 +9948,7 @@ class ParallelRangeNode(ParallelStatNode):
"""
child_attrs = ['body', 'target', 'else_clause', 'args', 'num_threads',
- 'chunksize']
+ 'chunksize', 'threading_condition']
body = target = else_clause = args = None
@@ -9908,7 +9959,7 @@ class ParallelRangeNode(ParallelStatNode):
nogil = None
schedule = None
- valid_keyword_arguments = ['schedule', 'nogil', 'num_threads', 'chunksize']
+ valid_keyword_arguments = ['schedule', 'nogil', 'num_threads', 'chunksize', 'use_threads_if']
def __init__(self, pos, **kwds):
super().__init__(pos, **kwds)
@@ -10022,12 +10073,9 @@ def analyse_expressions(self, env):
return node
def nogil_check(self, env):
- names = 'start', 'stop', 'step', 'target'
- nodes = self.start, self.stop, self.step, self.target
- for name, node in zip(names, nodes):
- if node is not None and node.type.is_pyobject:
- error(node.pos, "%s may not be a Python object "
- "as we don't have the GIL" % name)
+ names = 'start', 'stop', 'step', 'target', 'use_threads_if'
+ nodes = self.start, self.stop, self.step, self.target, self.threading_condition
+ self._parameters_nogil_check(env, names, nodes)
def generate_execution_code(self, code):
"""
@@ -10095,6 +10143,9 @@ def generate_execution_code(self, code):
fmt_dict[name] = result
+ if self.threading_condition is not None:
+ self.threading_condition.generate_evaluation_code(code)
+
fmt_dict['i'] = code.funcstate.allocate_temp(self.index_type, False)
fmt_dict['nsteps'] = code.funcstate.allocate_temp(self.index_type, False)
@@ -10137,7 +10188,7 @@ def generate_execution_code(self, code):
# And finally, release our privates and write back any closure
# variables
- for temp in start_stop_step + (self.chunksize,):
+ for temp in start_stop_step + (self.chunksize, self.threading_condition):
if temp is not None:
temp.generate_disposal_code(code)
temp.free_temps(code)
@@ -10159,6 +10210,10 @@ def generate_loop(self, code, fmt_dict):
reduction_codepoint = self.parent.privatization_insertion_point
else:
code.put("#pragma omp parallel")
+
+ if self.threading_condition is not None:
+ code.put(" if(%s)" % self.threading_condition.result())
+
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.privatization_insertion_point
code.putln("")
diff --git a/Cython/Compiler/Optimize.py b/Cython/Compiler/Optimize.py
index 5baf31d62dd..ba2bd0f6dc3 100644
--- a/Cython/Compiler/Optimize.py
+++ b/Cython/Compiler/Optimize.py
@@ -3367,21 +3367,40 @@ def _handle_simple_method_dict_setdefault(self, node, function, args, is_unbound
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
])
+ PyDict_Pop_ignore_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_int_type, [
+ PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
+ ],
+ exception_value=PyrexTypes.c_int_type.exception_value,
+ )
+
def _handle_simple_method_dict_pop(self, node, function, args, is_unbound_method):
"""Replace dict.pop() by a call to _PyDict_Pop().
"""
+ capi_func = "__Pyx_PyDict_Pop"
+ utility_code_name = 'py_dict_pop'
+ func_type = self.PyDict_Pop_func_type
+
if len(args) == 2:
args.append(ExprNodes.NullNode(node.pos))
- elif len(args) != 3:
+ elif len(args) == 3:
+ if not node.result_is_used:
+ # special case: we can ignore the default value
+ capi_func = "__Pyx_PyDict_Pop_ignore"
+ utility_code_name = 'py_dict_pop_ignore'
+ func_type = self.PyDict_Pop_ignore_func_type
+ else:
self._error_wrong_arg_count('dict.pop', node, args, "2 or 3")
return node
return self._substitute_method_call(
node, function,
- "__Pyx_PyDict_Pop", self.PyDict_Pop_func_type,
+ capi_func, func_type,
'pop', is_unbound_method, args,
may_return_none=True,
- utility_code=load_c_utility('py_dict_pop'))
+ utility_code=load_c_utility(utility_code_name))
Pyx_BinopInt_func_types = {
(ctype, ret_type): PyrexTypes.CFuncType(
@@ -5048,6 +5067,13 @@ def visit_SingleAssignmentNode(self, node):
lhs.lhs_of_first_assignment = True
return node
+ def _check_optimize_method_calls(self, node):
+ function = node.function
+ return (node.is_temp and function.type.is_pyobject and self.current_directives.get(
+ "optimize.unpack_method_calls_in_pyinit"
+ if not self.in_loop and self.current_env().is_module_scope
+ else "optimize.unpack_method_calls"))
+
def visit_SimpleCallNode(self, node):
"""
Replace generic calls to isinstance(x, type) by a more efficient type check.
@@ -5064,38 +5090,37 @@ def visit_SimpleCallNode(self, node):
function.type = function.entry.type
PyTypeObjectPtr = PyrexTypes.CPtrType(cython_scope.lookup('PyTypeObject').type)
node.args[1] = ExprNodes.CastNode(node.args[1], PyTypeObjectPtr)
- elif (node.is_temp and function.type.is_pyobject and self.current_directives.get(
- "optimize.unpack_method_calls_in_pyinit"
- if not self.in_loop and self.current_env().is_module_scope
- else "optimize.unpack_method_calls")):
+ else:
# optimise simple Python methods calls
- if isinstance(node.arg_tuple, ExprNodes.TupleNode) and not (
- node.arg_tuple.mult_factor or (node.arg_tuple.is_literal and len(node.arg_tuple.args) > 1)):
+ if ExprNodes.PyMethodCallNode.can_be_used_for_posargs(node.arg_tuple, has_kwargs=False):
# simple call, now exclude calls to objects that are definitely not methods
- may_be_a_method = True
- if function.type is Builtin.type_type:
- may_be_a_method = False
- elif function.is_attribute:
- if function.entry and function.entry.type.is_cfunction:
- # optimised builtin method
- may_be_a_method = False
- elif function.is_name:
- entry = function.entry
- if entry.is_builtin or entry.type.is_cfunction:
- may_be_a_method = False
- elif entry.cf_assignments:
- # local functions/classes are definitely not methods
- non_method_nodes = (ExprNodes.PyCFunctionNode, ExprNodes.ClassNode, ExprNodes.Py3ClassNode)
- may_be_a_method = any(
- assignment.rhs and not isinstance(assignment.rhs, non_method_nodes)
- for assignment in entry.cf_assignments)
- if may_be_a_method:
+ if ExprNodes.PyMethodCallNode.can_be_used_for_function(function):
if (node.self and function.is_attribute and
isinstance(function.obj, ExprNodes.CloneNode) and function.obj.arg is node.self):
# function self object was moved into a CloneNode => undo
function.obj = function.obj.arg
node = self.replace(node, ExprNodes.PyMethodCallNode.from_node(
- node, function=function, arg_tuple=node.arg_tuple, type=node.type))
+ node, function=function, arg_tuple=node.arg_tuple, type=node.type,
+ unpack=self._check_optimize_method_calls(node)))
+ return node
+
+ def visit_GeneralCallNode(self, node):
+ """
+ Replace likely Python method calls by a specialised PyMethodCallNode.
+ """
+ self.visitchildren(node)
+ has_kwargs = bool(node.keyword_args)
+ kwds_is_dict_node = isinstance(node.keyword_args, ExprNodes.DictNode)
+ if not ExprNodes.PyMethodCallNode.can_be_used_for_posargs(
+ node.positional_args, has_kwargs=has_kwargs, kwds_is_dict_node=kwds_is_dict_node):
+ return node
+ function = node.function
+ if not ExprNodes.PyMethodCallNode.can_be_used_for_function(function):
+ return node
+
+ node = self.replace(node, ExprNodes.PyMethodCallNode.from_node(
+ node, function=function, arg_tuple=node.positional_args, kwdict=node.keyword_args,
+ type=node.type, unpack=self._check_optimize_method_calls(node)))
return node
def visit_NumPyMethodCallNode(self, node):
diff --git a/Cython/Compiler/ParseTreeTransforms.pxd b/Cython/Compiler/ParseTreeTransforms.pxd
index 2e0fc55f338..4e8d91f6921 100644
--- a/Cython/Compiler/ParseTreeTransforms.pxd
+++ b/Cython/Compiler/ParseTreeTransforms.pxd
@@ -52,10 +52,12 @@ cdef class YieldNodeCollector(TreeVisitor):
cdef public bint has_return_value
cdef public bint has_yield
cdef public bint has_await
+ cdef list excludes
@cython.final
cdef class MarkClosureVisitor(CythonTransform):
cdef bint needs_closure
+ cdef list excludes
@cython.final
cdef class CreateClosureClasses(CythonTransform):
diff --git a/Cython/Compiler/ParseTreeTransforms.py b/Cython/Compiler/ParseTreeTransforms.py
index 03db4d6d23b..3a190e4f2fa 100644
--- a/Cython/Compiler/ParseTreeTransforms.py
+++ b/Cython/Compiler/ParseTreeTransforms.py
@@ -23,7 +23,6 @@
from .TreeFragment import TreeFragment
from .StringEncoding import EncodedString
from .Errors import error, warning, CompileError, InternalError
-from .Code import UtilityCode
class SkipDeclarations:
@@ -211,7 +210,7 @@ def visit_LambdaNode(self, node):
def visit_GeneratorExpressionNode(self, node):
# unpack a generator expression into the corresponding DefNode
collector = YieldNodeCollector()
- collector.visitchildren(node.loop)
+ collector.visitchildren(node.loop, attrs=None, exclude=["iterator"])
node.def_node = Nodes.DefNode(
node.pos, name=node.name, doc=None,
args=[], star_arg=None, starstar_arg=None,
@@ -2825,27 +2824,36 @@ def visit_IndexNode(self, node):
return node
-class FindInvalidUseOfFusedTypes(CythonTransform):
+class FindInvalidUseOfFusedTypes(TreeVisitor):
+
+ def __call__(self, tree):
+ self._in_fused_function = False
+ self.visit(tree)
+ return tree
+
+ def visit_Node(self, node):
+ self.visitchildren(node)
def visit_FuncDefNode(self, node):
- # Errors related to use in functions with fused args will already
- # have been detected
- if not node.has_fused_arguments:
+ outer_status = self._in_fused_function
+ self._in_fused_function = node.has_fused_arguments
+
+ if not self._in_fused_function:
+ # Errors related to use in functions with fused args will already
+ # have been detected.
if not node.is_generator_body and node.return_type.is_fused:
error(node.pos, "Return type is not specified as argument type")
- else:
- self.visitchildren(node)
- return node
+ self.visitchildren(node)
+ self._in_fused_function = outer_status
def visit_ExprNode(self, node):
- if node.type and node.type.is_fused:
+ if not self._in_fused_function and node.type and node.type.is_fused:
error(node.pos, "Invalid use of fused types, type cannot be specialized")
+ # Errors in subtrees are likely related, so do not recurse.
else:
self.visitchildren(node)
- return node
-
class ExpandInplaceOperators(EnvTransform):
@@ -3145,7 +3153,7 @@ def visit_TryFinallyStatNode(self, node):
class YieldNodeCollector(TreeVisitor):
- def __init__(self):
+ def __init__(self, excludes=[]):
super().__init__()
self.yields = []
self.returns = []
@@ -3154,9 +3162,11 @@ def __init__(self):
self.has_return_value = False
self.has_yield = False
self.has_await = False
+ self.excludes = excludes
def visit_Node(self, node):
- self.visitchildren(node)
+ if node not in self.excludes:
+ self.visitchildren(node)
def visit_YieldExprNode(self, node):
self.yields.append(node)
@@ -3192,7 +3202,11 @@ def visit_LambdaNode(self, node):
pass
def visit_GeneratorExpressionNode(self, node):
- pass
+ # node.loop iterator is evaluated outside the generator expression
+ if isinstance(node.loop, Nodes._ForInStatNode):
+ # Possibly should handle ForFromStatNode
+ # but for now do nothing
+ self.visit(node.loop.iterator)
def visit_CArgDeclNode(self, node):
# do not look into annotations
@@ -3206,6 +3220,7 @@ class MarkClosureVisitor(CythonTransform):
def visit_ModuleNode(self, node):
self.needs_closure = False
+ self.excludes = []
self.visitchildren(node)
return node
@@ -3215,7 +3230,7 @@ def visit_FuncDefNode(self, node):
node.needs_closure = self.needs_closure
self.needs_closure = True
- collector = YieldNodeCollector()
+ collector = YieldNodeCollector(self.excludes)
collector.visitchildren(node)
if node.is_async_def:
@@ -3274,7 +3289,11 @@ def visit_ClassDefNode(self, node):
return node
def visit_GeneratorExpressionNode(self, node):
+ excludes = self.excludes
+ if isinstance(node.loop, Nodes._ForInStatNode):
+ self.excludes = [node.loop.iterator]
node = self.visit_LambdaNode(node)
+ self.excludes = excludes
if not isinstance(node.loop, Nodes._ForInStatNode):
# Possibly should handle ForFromStatNode
# but for now do nothing
diff --git a/Cython/Compiler/Parsing.py b/Cython/Compiler/Parsing.py
index 0b6650a9911..34d23891590 100644
--- a/Cython/Compiler/Parsing.py
+++ b/Cython/Compiler/Parsing.py
@@ -787,7 +787,6 @@ def p_name(s, name):
def wrap_compile_time_constant(pos, value):
- rep = repr(value)
if value is None:
return ExprNodes.NoneNode(pos)
elif value is Ellipsis:
@@ -795,9 +794,9 @@ def wrap_compile_time_constant(pos, value):
elif isinstance(value, bool):
return ExprNodes.BoolNode(pos, value=value)
elif isinstance(value, int):
- return ExprNodes.IntNode(pos, value=rep, constant_result=value)
+ return ExprNodes.IntNode(pos, value=repr(value), constant_result=value)
elif isinstance(value, float):
- return ExprNodes.FloatNode(pos, value=rep, constant_result=value)
+ return ExprNodes.FloatNode(pos, value=repr(value), constant_result=value)
elif isinstance(value, complex):
node = ExprNodes.ImagNode(pos, value=repr(value.imag), constant_result=complex(0.0, value.imag))
if value.real:
@@ -813,13 +812,12 @@ def wrap_compile_time_constant(pos, value):
bvalue = bytes_literal(value, 'ascii') # actually: unknown encoding, but BytesLiteral requires one
return ExprNodes.BytesNode(pos, value=bvalue, constant_result=value)
elif isinstance(value, tuple):
- args = [wrap_compile_time_constant(pos, arg)
- for arg in value]
- if None not in args:
- return ExprNodes.TupleNode(pos, args=args)
- else:
+ args = [wrap_compile_time_constant(pos, arg) for arg in value]
+ if None in args:
# error already reported
return None
+ return ExprNodes.TupleNode(pos, args=args)
+
error(pos, "Invalid type for compile-time constant: %r (type %s)"
% (value, value.__class__.__name__))
return None
diff --git a/Cython/Compiler/Pipeline.py b/Cython/Compiler/Pipeline.py
index 3dfbd38665b..d5b3ea320c8 100644
--- a/Cython/Compiler/Pipeline.py
+++ b/Cython/Compiler/Pipeline.py
@@ -212,7 +212,7 @@ def create_pipeline(context, mode, exclude_classes=()):
_check_c_declarations,
InlineDefNodeCalls(context),
AnalyseExpressionsTransform(context),
- FindInvalidUseOfFusedTypes(context),
+ FindInvalidUseOfFusedTypes(),
ExpandInplaceOperators(context),
HPy(context),
IterationTransform(context),
@@ -344,8 +344,6 @@ def insert_into_pipeline(pipeline, transform, before=None, after=None):
# Running a pipeline
#
-_pipeline_entry_points = {}
-
try:
from threading import local as _threadlocal
except ImportError:
@@ -361,10 +359,25 @@ def get_timings():
return {}
+_pipeline_entry_points = {}
+
+def _make_debug_phase_runner(phase_name):
+ # Create a new wrapper for each step to show the name in profiles.
+ try:
+ return _pipeline_entry_points[phase_name]
+ except KeyError:
+ pass
+
+ def run(phase, data):
+ return phase(data)
+
+ run.__name__ = run.__qualname__ = phase_name
+ _pipeline_entry_points[phase_name] = run
+ return run
+
+
def run_pipeline(pipeline, source, printtree=True):
from .Visitor import PrintTree
- exec_ns = globals().copy() if DebugFlags.debug_verbose_pipeline else None
-
try:
timings = threadlocal.cython_pipeline_timings
except AttributeError:
@@ -386,12 +399,7 @@ def run(phase, data):
phase_name = getattr(phase, '__name__', type(phase).__name__)
if DebugFlags.debug_verbose_pipeline:
print("Entering pipeline phase %r" % phase)
- # create a new wrapper for each step to show the name in profiles
- try:
- run = _pipeline_entry_points[phase_name]
- except KeyError:
- exec("def %s(phase, data): return phase(data)" % phase_name, exec_ns)
- run = _pipeline_entry_points[phase_name] = exec_ns[phase_name]
+ run = _make_debug_phase_runner(phase_name)
t = time()
data = run(phase, data)
diff --git a/Cython/Compiler/PyrexTypes.py b/Cython/Compiler/PyrexTypes.py
index 1e1e786be8a..9d18586bc16 100644
--- a/Cython/Compiler/PyrexTypes.py
+++ b/Cython/Compiler/PyrexTypes.py
@@ -317,8 +317,7 @@ def assignable_from_resolved_type(self, src_type):
return self.same_as(src_type)
def assignment_failure_extra_info(self, src_type):
- """Override if you can useful provide extra
- information about why an assignment didn't work."""
+ """Override if you can provide useful extra information about why an assignment didn't work."""
return ""
def as_argument_type(self):
@@ -1518,7 +1517,7 @@ def __str__(self):
return "%s object" % self.name
def __repr__(self):
- return "<%s>"% self.cname
+ return "<%s>"% self.typeptr_cname
def default_coerced_ctype(self):
if self.name in ('bytes', 'bytearray'):
@@ -2463,7 +2462,7 @@ class CFloatType(CNumericType):
is_float = 1
to_py_function = "PYOBJECT_FLOAT_FROM_DOUBLE"
- from_py_function = "__pyx_PyFloat_AsDouble"
+ from_py_function = "__Pyx_PyFloat_AsDouble"
exception_value = -1
@@ -2471,7 +2470,7 @@ def __init__(self, rank, math_h_modifier = ''):
CNumericType.__init__(self, rank, 1)
self.math_h_modifier = math_h_modifier
if rank == RANK_FLOAT:
- self.from_py_function = "__pyx_PyFloat_AsFloat"
+ self.from_py_function = "__Pyx_PyFloat_AsFloat"
def assignable_from_resolved_type(self, src_type):
return (src_type.is_numeric and not src_type.is_complex) or src_type is error_type
@@ -3000,9 +2999,9 @@ def assignment_failure_extra_info(self, src_type):
copied_src_type.exception_value = self.base_type.exception_value
if self.base_type.pointer_assignable_from_resolved_type(copied_src_type):
# the only reason we can't assign is because of exception incompatibility
- msg = "Exception values are incompatible."
+ msg = " Exception values are incompatible."
if not self.base_type.exception_check and not self.base_type.exception_value:
- msg += " Suggest adding 'noexcept' to type '{}'.".format(src_type)
+ msg += f" Suggest adding 'noexcept' to type '{src_type}'."
return msg
return super().assignment_failure_extra_info(src_type)
@@ -3380,7 +3379,12 @@ def declaration_code(self, entity_code,
if self.is_overridable:
arg_decl_list.append("int %s" % Naming.skip_dispatch_cname)
if self.optional_arg_count:
- arg_decl_list.append(self.op_arg_struct.declaration_code(Naming.optional_args_cname))
+ if self.op_arg_struct:
+ arg_decl_list.append(self.op_arg_struct.declaration_code(Naming.optional_args_cname))
+ else:
+ # op_arg_struct may not be initialized at this point if this class is being used
+ # to prepare a Python error message or similar. In this case, just omit the args.
+ assert for_display
if self.has_varargs:
arg_decl_list.append("...")
arg_decl_code = ", ".join(arg_decl_list)
@@ -4383,7 +4387,7 @@ class EnumMixin:
def create_enum_to_py_utility_code(self, env):
from .UtilityCode import CythonUtilityCode
- self.to_py_function = "__Pyx_Enum_%s_to_py" % self.name
+ self.to_py_function = "__Pyx_Enum_%s_to_py" % type_identifier(self)
if self.entry.scope != env.global_scope():
module_name = self.entry.scope.qualified_name
else:
@@ -4946,6 +4950,17 @@ def specialize_here(self, pos, env, template_values=None):
memoryviewslice_type = CStructOrUnionType("memoryviewslice", "struct",
None, 1, "__Pyx_memviewslice")
+fixed_sign_int_types = {
+ "bint": (1, c_bint_type),
+ "Py_UNICODE": (0, c_py_unicode_type),
+ "Py_UCS4": (0, c_py_ucs4_type),
+ "Py_hash_t": (2, c_py_hash_t_type),
+ "Py_ssize_t": (2, c_py_ssize_t_type),
+ "ssize_t": (2, c_ssize_t_type),
+ "size_t": (0, c_size_t_type),
+ "ptrdiff_t": (2, c_ptrdiff_t_type),
+}
+
modifiers_and_name_to_type = {
#(signed, longness, name) : type
(0, 0, "char"): c_uchar_type,
@@ -4980,18 +4995,14 @@ def specialize_here(self, pos, env, template_values=None):
(1, 0, "void"): c_void_type,
(1, 0, "Py_tss_t"): c_pytss_t_type,
- (1, 0, "bint"): c_bint_type,
- (0, 0, "Py_UNICODE"): c_py_unicode_type,
- (0, 0, "Py_UCS4"): c_py_ucs4_type,
- (2, 0, "Py_hash_t"): c_py_hash_t_type,
- (2, 0, "Py_ssize_t"): c_py_ssize_t_type,
- (2, 0, "ssize_t") : c_ssize_t_type,
- (0, 0, "size_t") : c_size_t_type,
- (2, 0, "ptrdiff_t") : c_ptrdiff_t_type,
-
(1, 0, "object"): py_object_type,
}
+modifiers_and_name_to_type.update({
+ (signed, 0, name): tp
+ for name, (signed, tp) in fixed_sign_int_types.items()
+})
+
def is_promotion(src_type, dst_type):
# It's hard to find a hard definition of promotion, but empirical
# evidence suggests that the below is all that's allowed.
@@ -5408,41 +5419,34 @@ def parse_basic_type(name):
if base:
return CPtrType(base)
#
+ if name in fixed_sign_int_types:
+ return fixed_sign_int_types[name][1]
basic_type = simple_c_type(1, 0, name)
if basic_type:
return basic_type
#
- signed = 1
- longness = 0
- if name == 'Py_UNICODE':
+ if name.startswith('u'):
+ name = name[1:]
signed = 0
- elif name == 'Py_UCS4':
- signed = 0
- elif name == 'Py_hash_t':
- signed = 2
- elif name == 'Py_ssize_t':
+ elif (name.startswith('s') and
+ not name.startswith('short')):
+ name = name[1:]
signed = 2
- elif name == 'ssize_t':
- signed = 2
- elif name == 'size_t':
- signed = 0
else:
- if name.startswith('u'):
- name = name[1:]
- signed = 0
- elif (name.startswith('s') and
- not name.startswith('short')):
- name = name[1:]
- signed = 2
- longness = 0
- while name.startswith('short'):
- name = name.replace('short', '', 1).strip()
- longness -= 1
- while name.startswith('long'):
- name = name.replace('long', '', 1).strip()
+ signed = 1
+
+ # We parse both (cy) 'long long' and (py) 'longlong' style names here.
+ longness = 0
+ while name.startswith(('long', 'short')):
+ if name.startswith('long'):
+ name = name[4:].lstrip()
longness += 1
- if longness != 0 and not name:
- name = 'int'
+ else:
+ name = name[5:].lstrip()
+ longness -= 1
+ if longness != 0 and not name:
+ name = 'int' # long/short [int]
+
return simple_c_type(signed, longness, name)
@@ -5548,16 +5552,29 @@ def cap_length(s, max_prefix=63, max_len=1024):
hash_prefix = hashlib.sha256(s.encode('ascii')).hexdigest()[:6]
return '%s__%s__etc' % (hash_prefix, s[:max_len-17])
-def write_noexcept_performance_hint(pos, env, function_name=None, void_return=False):
- on_what = "on '%s' " % function_name if function_name else ""
+def write_noexcept_performance_hint(pos, env, function_name=None, void_return=False, is_call=False):
+ if function_name:
+ # we need it escaped everywhere we use it
+ function_name = "'%s'" % function_name
+ if is_call:
+ on_what = "after calling %s " % (function_name or 'function')
+ elif function_name:
+ on_what = "on %s " % function_name
+ else:
+ on_what =''
msg = (
"Exception check %swill always require the GIL to be acquired."
) % on_what
- solutions = ["Declare the function as 'noexcept' if you control the definition and "
- "you're sure you don't want the function to raise exceptions."]
+ the_function = function_name if function_name else "the function"
+ if is_call and not function_name:
+ the_function = the_function + " you are calling"
+ solutions = ["Declare %s as 'noexcept' if you control the definition and "
+ "you're sure you don't want the function to raise exceptions."
+ % the_function]
if void_return:
solutions.append(
- "Use an 'int' return type on the function to allow an error code to be returned.")
+ "Use an 'int' return type on %s to allow an error code to be returned." %
+ the_function)
if len(solutions) == 1:
msg = "%s %s" % (msg, solutions[0])
else:
diff --git a/Cython/Compiler/Scanning.py b/Cython/Compiler/Scanning.py
index 337ef23b824..245792926cb 100644
--- a/Cython/Compiler/Scanning.py
+++ b/Cython/Compiler/Scanning.py
@@ -346,9 +346,7 @@ def __init__(self, file, filename, parent_scanner=None,
self.next()
def normalize_ident(self, text):
- try:
- text.encode('ascii') # really just name.isascii but supports Python 2 and 3
- except UnicodeEncodeError:
+ if not text.isascii():
text = normalize('NFKC', text)
self.produce(IDENT, text)
diff --git a/Cython/Compiler/StringEncoding.py b/Cython/Compiler/StringEncoding.py
index e31cdd48fec..a81a5a8ec86 100644
--- a/Cython/Compiler/StringEncoding.py
+++ b/Cython/Compiler/StringEncoding.py
@@ -132,16 +132,6 @@ def as_c_string_literal(self):
s = bytes_literal(self.byteencode(), self.encoding)
return s.as_c_string_literal()
- if not hasattr(str, "isascii"):
- def isascii(self):
- # not defined for Python3.7+ since the class already has it
- try:
- self.encode("ascii")
- except UnicodeEncodeError:
- return False
- else:
- return True
-
def string_contains_surrogates(ustring):
"""
diff --git a/Cython/Compiler/Symtab.py b/Cython/Compiler/Symtab.py
index 3a2accc59e1..9e77a36bb71 100644
--- a/Cython/Compiler/Symtab.py
+++ b/Cython/Compiler/Symtab.py
@@ -43,26 +43,24 @@ def c_safe_identifier(cname):
def punycodify_name(cname, mangle_with=None):
# if passed the mangle_with should be a byte string
# modified from PEP489
- try:
- cname.encode('ascii')
- except UnicodeEncodeError:
- cname = cname.encode('punycode').replace(b'-', b'_').decode('ascii')
- if mangle_with:
- # sometimes it necessary to mangle unicode names alone where
- # they'll be inserted directly into C, because the punycode
- # transformation can turn them into invalid identifiers
- cname = "%s_%s" % (mangle_with, cname)
- elif cname.startswith(Naming.pyrex_prefix):
- # a punycode name could also be a valid ascii variable name so
- # change the prefix to distinguish
- cname = cname.replace(Naming.pyrex_prefix,
- Naming.pyunicode_identifier_prefix, 1)
+ if cname.isascii():
+ return cname
+
+ cname = cname.encode('punycode').replace(b'-', b'_').decode('ascii')
+ if mangle_with:
+ # sometimes it necessary to mangle unicode names alone where
+ # they'll be inserted directly into C, because the punycode
+ # transformation can turn them into invalid identifiers
+ cname = "%s_%s" % (mangle_with, cname)
+ elif cname.startswith(Naming.pyrex_prefix):
+ # a punycode name could also be a valid ascii variable name so
+ # change the prefix to distinguish
+ cname = cname.replace(Naming.pyrex_prefix,
+ Naming.pyunicode_identifier_prefix, 1)
return cname
-
-
class BufferAux:
writable_needed = False
@@ -169,7 +167,6 @@ class Entry:
borrowed = 0
init = ""
annotation = None
- pep563_annotation = None
visibility = 'private'
is_builtin = 0
is_cglobal = 0
diff --git a/Cython/Compiler/Tests/TestBuiltin.py b/Cython/Compiler/Tests/TestBuiltin.py
new file mode 100644
index 00000000000..ebc5278a328
--- /dev/null
+++ b/Cython/Compiler/Tests/TestBuiltin.py
@@ -0,0 +1,32 @@
+import builtins
+import sys
+import unittest
+
+from ..Builtin import (
+ inferred_method_return_types, find_return_type_of_builtin_method,
+ builtin_scope,
+)
+
+
+class TestBuiltinReturnTypes(unittest.TestCase):
+ def test_find_return_type_of_builtin_method(self):
+ # It's enough to test the method existence in a recent Python that likely has them.
+ look_up_methods = sys.version_info >= (3,10)
+
+ for type_name, methods in inferred_method_return_types.items():
+ py_type = getattr(builtins, type_name if type_name != 'unicode' else 'str')
+
+ for method_name, return_type_name in methods.items():
+ builtin_type = builtin_scope.lookup(type_name).type
+ return_type = find_return_type_of_builtin_method(builtin_type, method_name)
+
+ if return_type.is_builtin_type:
+ if '[' in return_type_name:
+ return_type_name = return_type_name.partition('[')[0]
+ if return_type_name == 'T':
+ return_type_name = type_name
+ self.assertEqual(return_type.name, return_type_name)
+ if look_up_methods:
+ self.assertTrue(hasattr(py_type, method_name), f"{type_name}.{method_name}")
+ else:
+ self.assertEqual(return_type.empty_declaration_code(pyrex=True), return_type_name)
diff --git a/Cython/Compiler/TypeInference.py b/Cython/Compiler/TypeInference.py
index d40d191534e..ffd48d45179 100644
--- a/Cython/Compiler/TypeInference.py
+++ b/Cython/Compiler/TypeInference.py
@@ -544,14 +544,7 @@ def aggressive_spanning_type(types, might_overflow, scope):
def safe_spanning_type(types, might_overflow, scope):
result_type = simply_type(reduce(find_spanning_type, types))
if result_type.is_pyobject:
- # In theory, any specific Python type is always safe to
- # infer. However, inferring str can cause some existing code
- # to break, since we are also now much more strict about
- # coercion from str to char *. See trac #553.
- if result_type.name == 'str':
- return py_object_type
- else:
- return result_type
+ return result_type
elif (result_type is PyrexTypes.c_double_type or
result_type is PyrexTypes.c_float_type):
# Python's float type is just a C double, so it's safe to use
diff --git a/Cython/Compiler/TypeSlots.py b/Cython/Compiler/TypeSlots.py
index b267f29b75c..833cd544298 100644
--- a/Cython/Compiler/TypeSlots.py
+++ b/Cython/Compiler/TypeSlots.py
@@ -6,7 +6,7 @@
from . import Naming
from . import PyrexTypes
-from .Errors import error
+from .Errors import error, warn_once
import copy
@@ -239,22 +239,18 @@ class SlotDescriptor:
# slot_name string Member name of the slot in the type object
# is_initialised_dynamically Is initialised by code in the module init function
# is_inherited Is inherited by subtypes (see PyType_Ready())
- # py3 Indicates presence of slot in Python 3
- # py2 Indicates presence of slot in Python 2
- # ifdef Full #ifdef string that slot is wrapped in. Using this causes py3, py2 and flags to be ignored.)
+ # ifdef Full #ifdef string that slot is wrapped in. Using this causes flags to be ignored.
# used_ifdef Full #ifdef string that the slot value is wrapped in (otherwise it is assigned NULL)
# Unlike "ifdef" the slot is defined and this just controls if it receives a value
def __init__(self, slot_name, dynamic=False, inherited=False,
- py3=True, py2=True, ifdef=None, is_binop=False,
+ ifdef=None, is_binop=False,
used_ifdef=None):
self.slot_name = slot_name
self.is_initialised_dynamically = dynamic
self.is_inherited = inherited
self.ifdef = ifdef
self.used_ifdef = used_ifdef
- self.py3 = py3
- self.py2 = py2
self.is_binop = is_binop
def slot_code(self, scope):
@@ -265,15 +261,9 @@ def spec_value(self, scope):
def preprocessor_guard_code(self):
ifdef = self.ifdef
- py2 = self.py2
- py3 = self.py3
guard = None
if ifdef:
guard = "#if %s" % ifdef
- elif not py3 or py3 == '':
- guard = "#if PY_MAJOR_VERSION < 3"
- elif not py2:
- guard = "#if PY_MAJOR_VERSION >= 3"
return guard
def generate_spec(self, scope, code):
@@ -284,12 +274,18 @@ def generate_spec(self, scope, code):
return
preprocessor_guard = self.preprocessor_guard_code()
if not preprocessor_guard:
- if self.py3 and self.slot_name.startswith('bf_'):
+ if self.slot_name.startswith('bf_'):
# The buffer protocol requires Limited API 3.11, so check if the spec slots are available.
preprocessor_guard = "#if defined(Py_%s)" % self.slot_name
if preprocessor_guard:
code.putln(preprocessor_guard)
+ if self.used_ifdef:
+ # different from preprocessor guard - this defines if we *want* to define it,
+ # rather than if the slot exists
+ code.putln(f"#if {self.used_ifdef}")
code.putln("{Py_%s, (void *)%s}," % (self.slot_name, value))
+ if self.used_ifdef:
+ code.putln("#endif")
if preprocessor_guard:
code.putln("#endif")
@@ -335,9 +331,6 @@ def generate(self, scope, code):
if end_pypy_guard:
code.putln("#endif")
- if self.py3 == '':
- code.putln("#else")
- code.putln("0, /*reserved*/")
if preprocessor_guard:
code.putln("#endif")
@@ -369,8 +362,8 @@ class FixedSlot(SlotDescriptor):
#
# value string
- def __init__(self, slot_name, value, py3=True, py2=True, ifdef=None):
- SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2, ifdef=ifdef)
+ def __init__(self, slot_name, value, ifdef=None):
+ SlotDescriptor.__init__(self, slot_name, ifdef=ifdef)
self.value = value
def slot_code(self, scope):
@@ -380,8 +373,8 @@ def slot_code(self, scope):
class EmptySlot(FixedSlot):
# Descriptor for a type slot whose value is always 0.
- def __init__(self, slot_name, py3=True, py2=True, ifdef=None):
- FixedSlot.__init__(self, slot_name, "0", py3=py3, py2=py2, ifdef=ifdef)
+ def __init__(self, slot_name, ifdef=None):
+ FixedSlot.__init__(self, slot_name, "0", ifdef=ifdef)
class MethodSlot(SlotDescriptor):
@@ -392,8 +385,8 @@ class MethodSlot(SlotDescriptor):
# alternatives [string] Alternative list of __xxx__ names for the method
def __init__(self, signature, slot_name, method_name, method_name_to_slot,
- fallback=None, py3=True, py2=True, ifdef=None, inherited=True):
- SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2,
+ fallback=None, ifdef=None, inherited=True):
+ SlotDescriptor.__init__(self, slot_name,
ifdef=ifdef, inherited=inherited)
self.signature = signature
self.slot_name = slot_name
@@ -403,19 +396,23 @@ def __init__(self, signature, slot_name, method_name, method_name_to_slot,
#
if fallback:
self.alternatives.append(fallback)
- for alt in (self.py2, self.py3):
- if isinstance(alt, (tuple, list)):
- slot_name, method_name = alt
- self.alternatives.append(method_name)
- method_name_to_slot[method_name] = self
def slot_code(self, scope):
entry = scope.lookup_here(self.method_name)
if entry and entry.is_special and entry.func_cname:
+ for method_name in self.alternatives:
+ alt_entry = scope.lookup_here(method_name)
+ if alt_entry:
+ warn_once(alt_entry.pos,
+ f"{method_name} was removed in Python 3; ignoring it and using {self.method_name} instead",
+ 2)
return entry.func_cname
for method_name in self.alternatives:
entry = scope.lookup_here(method_name)
if entry and entry.is_special and entry.func_cname:
+ warn_once(entry.pos,
+ f"{method_name} was removed in Python 3; use {self.method_name} instead",
+ 2)
return entry.func_cname
return "0"
@@ -918,9 +915,6 @@ def is_reverse_number_slot(name):
'__del__': Signature("T", 'r')
}
-
-PyNumberMethods_Py2only_GUARD = "PY_MAJOR_VERSION < 3 || (CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x03050000)"
-
#------------------------------------------------------------------------------------------
#
# The main slot table. This table contains descriptors for all the
@@ -949,8 +943,6 @@ def __init__(self, old_binops):
BinopSlot(bf, "nb_add", "__add__", method_name_to_slot),
BinopSlot(bf, "nb_subtract", "__sub__", method_name_to_slot),
BinopSlot(bf, "nb_multiply", "__mul__", method_name_to_slot),
- BinopSlot(bf, "nb_divide", "__div__", method_name_to_slot,
- ifdef = PyNumberMethods_Py2only_GUARD),
BinopSlot(bf, "nb_remainder", "__mod__", method_name_to_slot),
BinopSlot(bf, "nb_divmod", "__divmod__", method_name_to_slot),
BinopSlot(ptf, "nb_power", "__pow__", method_name_to_slot),
@@ -958,29 +950,21 @@ def __init__(self, old_binops):
MethodSlot(unaryfunc, "nb_positive", "__pos__", method_name_to_slot),
MethodSlot(unaryfunc, "nb_absolute", "__abs__", method_name_to_slot),
MethodSlot(inquiry, "nb_bool", "__bool__", method_name_to_slot,
- py2 = ("nb_nonzero", "__nonzero__")),
+ fallback="__nonzero__"),
MethodSlot(unaryfunc, "nb_invert", "__invert__", method_name_to_slot),
BinopSlot(bf, "nb_lshift", "__lshift__", method_name_to_slot),
BinopSlot(bf, "nb_rshift", "__rshift__", method_name_to_slot),
BinopSlot(bf, "nb_and", "__and__", method_name_to_slot),
BinopSlot(bf, "nb_xor", "__xor__", method_name_to_slot),
BinopSlot(bf, "nb_or", "__or__", method_name_to_slot),
- EmptySlot("nb_coerce", ifdef = PyNumberMethods_Py2only_GUARD),
MethodSlot(unaryfunc, "nb_int", "__int__", method_name_to_slot, fallback="__long__"),
- MethodSlot(unaryfunc, "nb_long", "__long__", method_name_to_slot,
- fallback="__int__", py3 = ""),
+ EmptySlot("nb_long (reserved)"),
MethodSlot(unaryfunc, "nb_float", "__float__", method_name_to_slot),
- MethodSlot(unaryfunc, "nb_oct", "__oct__", method_name_to_slot,
- ifdef = PyNumberMethods_Py2only_GUARD),
- MethodSlot(unaryfunc, "nb_hex", "__hex__", method_name_to_slot,
- ifdef = PyNumberMethods_Py2only_GUARD),
# Added in release 2.0
MethodSlot(ibinaryfunc, "nb_inplace_add", "__iadd__", method_name_to_slot),
MethodSlot(ibinaryfunc, "nb_inplace_subtract", "__isub__", method_name_to_slot),
MethodSlot(ibinaryfunc, "nb_inplace_multiply", "__imul__", method_name_to_slot),
- MethodSlot(ibinaryfunc, "nb_inplace_divide", "__idiv__", method_name_to_slot,
- ifdef = PyNumberMethods_Py2only_GUARD),
MethodSlot(ibinaryfunc, "nb_inplace_remainder", "__imod__", method_name_to_slot),
MethodSlot(ptf, "nb_inplace_power", "__ipow__", method_name_to_slot),
MethodSlot(ibinaryfunc, "nb_inplace_lshift", "__ilshift__", method_name_to_slot),
@@ -1026,15 +1010,6 @@ def __init__(self, old_binops):
)
self.PyBufferProcs = (
- MethodSlot(readbufferproc, "bf_getreadbuffer", "__getreadbuffer__", method_name_to_slot,
- py3 = False),
- MethodSlot(writebufferproc, "bf_getwritebuffer", "__getwritebuffer__", method_name_to_slot,
- py3 = False),
- MethodSlot(segcountproc, "bf_getsegcount", "__getsegcount__", method_name_to_slot,
- py3 = False),
- MethodSlot(charbufferproc, "bf_getcharbuffer", "__getcharbuffer__", method_name_to_slot,
- py3 = False),
-
MethodSlot(getbufferproc, "bf_getbuffer", "__getbuffer__", method_name_to_slot),
MethodSlot(releasebufferproc, "bf_releasebuffer", "__releasebuffer__", method_name_to_slot)
)
@@ -1053,10 +1028,8 @@ def __init__(self, old_binops):
EmptySlot("tp_getattr"),
EmptySlot("tp_setattr"),
- # tp_compare (Py2) / tp_reserved (Py3<3.5) / tp_as_async (Py3.5+) is always used as tp_as_async in Py3
- MethodSlot(cmpfunc, "tp_compare", "__cmp__", method_name_to_slot, ifdef="PY_MAJOR_VERSION < 3"),
SuiteSlot(self. PyAsyncMethods, "__Pyx_PyAsyncMethodsStruct", "tp_as_async",
- self.substructures, ifdef="PY_MAJOR_VERSION >= 3"),
+ self.substructures),
MethodSlot(reprfunc, "tp_repr", "__repr__", method_name_to_slot),
@@ -1113,7 +1086,7 @@ def __init__(self, old_binops):
EmptySlot("tp_weaklist"),
EmptySlot("tp_del"),
EmptySlot("tp_version_tag"),
- SyntheticSlot("tp_finalize", ["__del__"], "0", ifdef="PY_VERSION_HEX >= 0x030400a1",
+ SyntheticSlot("tp_finalize", ["__del__"], "0",
used_ifdef="CYTHON_USE_TP_FINALIZE"),
EmptySlot("tp_vectorcall", ifdef="PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)"),
EmptySlot("tp_print", ifdef="__PYX_NEED_TP_PRINT_SLOT == 1"),
@@ -1145,6 +1118,15 @@ def __init__(self, old_binops):
MethodSlot(descrsetfunc, "", "__set__", method_name_to_slot)
MethodSlot(descrdelfunc, "", "__delete__", method_name_to_slot)
+ #-------------------------------------------------------------------------
+ #
+ # Legacy "fallback" Py2 slots. Don't appear in the generated slot table,
+ # but match the "fallback" argument of a slot that does
+ #
+ #-------------------------------------------------------------------------
+ MethodSlot(inquiry, "", "__nonzero__", method_name_to_slot)
+ MethodSlot(unaryfunc, "", "__long__", method_name_to_slot)
+
def get_special_method_signature(self, name):
# Given a method name, if it is a special method,
# return its signature, else return None.
diff --git a/Cython/Compiler/Visitor.py b/Cython/Compiler/Visitor.py
index d32f7b5cf11..282e7950266 100644
--- a/Cython/Compiler/Visitor.py
+++ b/Cython/Compiler/Visitor.py
@@ -575,15 +575,8 @@ def visit_UnopNode(self, node):
### dispatch to specific handlers
def _find_handler(self, match_name, has_kwargs):
- try:
- match_name.encode('ascii')
- except UnicodeEncodeError:
- # specifically when running the Cython compiler under Python 2
- # getattr can't take a unicode string.
- # Classes with unicode names won't have specific handlers and thus it
- # should be OK to return None.
- # Doing the test here ensures that the same code gets run on
- # Python 2 and 3
+ if not match_name.isascii():
+ # Classes with unicode names won't have specific handlers.
return None
call_type = 'general' if has_kwargs else 'simple'
diff --git a/Cython/Includes/cpython/__init__.pxd b/Cython/Includes/cpython/__init__.pxd
index 7ad2684aa79..1254a1229ab 100644
--- a/Cython/Includes/cpython/__init__.pxd
+++ b/Cython/Includes/cpython/__init__.pxd
@@ -149,7 +149,6 @@ from cpython.mapping cimport *
from cpython.iterator cimport *
from cpython.type cimport *
from cpython.number cimport *
-from cpython.int cimport *
from cpython.bool cimport *
from cpython.long cimport *
from cpython.float cimport *
@@ -165,10 +164,6 @@ from cpython.getargs cimport *
from cpython.pythread cimport *
from cpython.pystate cimport *
-# Python <= 2.x
-from cpython.cobject cimport *
-from cpython.oldbuffer cimport *
-
# Python >= 2.4
from cpython.set cimport *
diff --git a/Cython/Includes/cpython/cobject.pxd b/Cython/Includes/cpython/cobject.pxd
deleted file mode 100644
index 497d8a92e80..00000000000
--- a/Cython/Includes/cpython/cobject.pxd
+++ /dev/null
@@ -1,36 +0,0 @@
-
-cdef extern from "Python.h":
-
- ###########################################################################
- # Warning:
- #
- # The CObject API is deprecated as of Python 3.1. Please switch to
- # the new Capsules API.
- ###########################################################################
-
- int PyCObject_Check(object p)
- # Return true if its argument is a PyCObject.
-
- object PyCObject_FromVoidPtr(void* cobj, void (*destr)(void *))
- # Return value: New reference.
- #
- # Create a PyCObject from the void * cobj. The destr function will
- # be called when the object is reclaimed, unless it is NULL.
-
- object PyCObject_FromVoidPtrAndDesc(void* cobj, void* desc, void (*destr)(void *, void *))
- # Return value: New reference.
- #
- # Create a PyCObject from the void * cobj. The destr function will
- # be called when the object is reclaimed. The desc argument can be
- # used to pass extra callback data for the destructor function.
-
- void* PyCObject_AsVoidPtr(object self) except? NULL
- # Return the object void * that the PyCObject self was created with.
-
- void* PyCObject_GetDesc(object self) except? NULL
- # Return the description void * that the PyCObject self was created with.
-
- int PyCObject_SetVoidPtr(object self, void* cobj) except 0
- # Set the void pointer inside self to cobj. The PyCObject must not
- # have an associated destructor. Return true on success, false on
- # failure.
diff --git a/Cython/Includes/cpython/int.pxd b/Cython/Includes/cpython/int.pxd
deleted file mode 100644
index 50babff6151..00000000000
--- a/Cython/Includes/cpython/int.pxd
+++ /dev/null
@@ -1,89 +0,0 @@
-cdef extern from "Python.h":
- ctypedef unsigned long long PY_LONG_LONG
-
- ############################################################################
- # Integer Objects
- ############################################################################
- # PyTypeObject PyInt_Type
- # This instance of PyTypeObject represents the Python plain
- # integer type. This is the same object as int and types.IntType.
-
- bint PyInt_Check(object o)
- # Return true if o is of type PyInt_Type or a subtype of
- # PyInt_Type.
-
- bint PyInt_CheckExact(object o)
- # Return true if o is of type PyInt_Type, but not a subtype of
- # PyInt_Type.
-
- object PyInt_FromString(char *str, char **pend, int base)
- # Return value: New reference.
- # Return a new PyIntObject or PyLongObject based on the string
- # value in str, which is interpreted according to the radix in
- # base. If pend is non-NULL, *pend will point to the first
- # character in str which follows the representation of the
- # number. If base is 0, the radix will be determined based on the
- # leading characters of str: if str starts with '0x' or '0X',
- # radix 16 will be used; if str starts with '0', radix 8 will be
- # used; otherwise radix 10 will be used. If base is not 0, it must
- # be between 2 and 36, inclusive. Leading spaces are ignored. If
- # there are no digits, ValueError will be raised. If the string
- # represents a number too large to be contained within the
- # machine's long int type and overflow warnings are being
- # suppressed, a PyLongObject will be returned. If overflow
- # warnings are not being suppressed, NULL will be returned in this
- # case.
-
- object PyInt_FromLong(long ival)
- # Return value: New reference.
- # Create a new integer object with a value of ival.
- # The current implementation keeps an array of integer objects for
- # all integers between -5 and 256, when you create an int in that
- # range you actually just get back a reference to the existing
- # object. So it should be possible to change the value of 1. I
- # suspect the behaviour of Python in this case is undefined. :-)
-
- object PyInt_FromSsize_t(Py_ssize_t ival)
- # Return value: New reference.
- # Create a new integer object with a value of ival. If the value
- # is larger than LONG_MAX or smaller than LONG_MIN, a long integer
- # object is returned.
-
- object PyInt_FromSize_t(size_t ival)
- # Return value: New reference.
- # Create a new integer object with a value of ival. If the value
- # exceeds LONG_MAX, a long integer object is returned.
-
- long PyInt_AsLong(object io) except? -1
- # Will first attempt to cast the object to a PyIntObject, if it is
- # not already one, and then return its value. If there is an
- # error, -1 is returned, and the caller should check
- # PyErr_Occurred() to find out whether there was an error, or
- # whether the value just happened to be -1.
-
- long PyInt_AS_LONG(object io)
- # Return the value of the object io. No error checking is performed.
-
- unsigned long PyInt_AsUnsignedLongMask(object io) except? -1
- # Will first attempt to cast the object to a PyIntObject or
- # PyLongObject, if it is not already one, and then return its
- # value as unsigned long. This function does not check for
- # overflow.
-
- PY_LONG_LONG PyInt_AsUnsignedLongLongMask(object io) except? -1
- # Will first attempt to cast the object to a PyIntObject or
- # PyLongObject, if it is not already one, and then return its
- # value as unsigned long long, without checking for overflow.
-
- Py_ssize_t PyInt_AsSsize_t(object io) except? -1
- # Will first attempt to cast the object to a PyIntObject or
- # PyLongObject, if it is not already one, and then return its
- # value as Py_ssize_t.
-
- long PyInt_GetMax()
- # Return the system's idea of the largest integer it can handle
- # (LONG_MAX, as defined in the system header files).
-
- int PyInt_ClearFreeList()
- # Clear the integer free list. Return the number of items that could not be freed.
- # New in version 2.6.
diff --git a/Cython/Includes/cpython/oldbuffer.pxd b/Cython/Includes/cpython/oldbuffer.pxd
deleted file mode 100644
index 0222428ed48..00000000000
--- a/Cython/Includes/cpython/oldbuffer.pxd
+++ /dev/null
@@ -1,63 +0,0 @@
-# Legacy Python 2 buffer interface.
-#
-# These functions are no longer available in Python 3, use the new
-# buffer interface instead.
-
-cdef extern from "Python.h":
- cdef enum _:
- Py_END_OF_BUFFER
- # This constant may be passed as the size parameter to
- # PyBuffer_FromObject() or PyBuffer_FromReadWriteObject(). It
- # indicates that the new PyBufferObject should refer to base object
- # from the specified offset to the end of its exported
- # buffer. Using this enables the caller to avoid querying the base
- # object for its length.
-
- bint PyBuffer_Check(object p)
- # Return true if the argument has type PyBuffer_Type.
-
- object PyBuffer_FromObject(object base, Py_ssize_t offset, Py_ssize_t size)
- # Return value: New reference.
- #
- # Return a new read-only buffer object. This raises TypeError if
- # base doesn't support the read-only buffer protocol or doesn't
- # provide exactly one buffer segment, or it raises ValueError if
- # offset is less than zero. The buffer will hold a reference to the
- # base object, and the buffer's contents will refer to the base
- # object's buffer interface, starting as position offset and
- # extending for size bytes. If size is Py_END_OF_BUFFER, then the
- # new buffer's contents extend to the length of the base object's
- # exported buffer data.
-
- object PyBuffer_FromReadWriteObject(object base, Py_ssize_t offset, Py_ssize_t size)
- # Return value: New reference.
- #
- # Return a new writable buffer object. Parameters and exceptions
- # are similar to those for PyBuffer_FromObject(). If the base
- # object does not export the writeable buffer protocol, then
- # TypeError is raised.
-
- object PyBuffer_FromMemory(void *ptr, Py_ssize_t size)
- # Return value: New reference.
- #
- # Return a new read-only buffer object that reads from a specified
- # location in memory, with a specified size. The caller is
- # responsible for ensuring that the memory buffer, passed in as
- # ptr, is not deallocated while the returned buffer object
- # exists. Raises ValueError if size is less than zero. Note that
- # Py_END_OF_BUFFER may not be passed for the size parameter;
- # ValueError will be raised in that case.
-
- object PyBuffer_FromReadWriteMemory(void *ptr, Py_ssize_t size)
- # Return value: New reference.
- #
- # Similar to PyBuffer_FromMemory(), but the returned buffer is
- # writable.
-
- object PyBuffer_New(Py_ssize_t size)
- # Return value: New reference.
- #
- # Return a new writable buffer object that maintains its own memory
- # buffer of size bytes. ValueError is returned if size is not zero
- # or positive. Note that the memory buffer (as returned by
- # PyObject_AsWriteBuffer()) is not specifically aligned.
diff --git a/Cython/Includes/cpython/unicode.pxd b/Cython/Includes/cpython/unicode.pxd
index a7d24e42823..c798ae2b644 100644
--- a/Cython/Includes/cpython/unicode.pxd
+++ b/Cython/Includes/cpython/unicode.pxd
@@ -194,7 +194,7 @@ cdef extern from *:
# string is null-terminated in case this is required by the application.
# Also, note that the wchar_t* string might contain null characters,
# which would cause the string to be truncated when used with most C functions.
- Py_ssize_t PyUnicode_AsWideChar(object o, wchar_t *w, Py_ssize_t size)
+ Py_ssize_t PyUnicode_AsWideChar(object o, wchar_t *w, Py_ssize_t size) except -1
# Convert the Unicode object to a wide character string. The output
# string always ends with a null character. If size is not NULL,
@@ -207,7 +207,7 @@ cdef extern from *:
# Returns a buffer allocated by PyMem_New (use PyMem_Free() to free it)
# on success. On error, returns NULL and *size is undefined. Raises a
# MemoryError if memory allocation is failed.
- wchar_t *PyUnicode_AsWideCharString(object o, Py_ssize_t *size)
+ wchar_t *PyUnicode_AsWideCharString(object o, Py_ssize_t *size) except NULL
# Unicode Methods
@@ -394,7 +394,11 @@ cdef extern from *:
# This caches the UTF-8 representation of the string in the Unicode
# object, and subsequent calls will return a pointer to the same buffer.
# The caller is not responsible for deallocating the buffer
- const char* PyUnicode_AsUTF8AndSize(object unicode, Py_ssize_t *size)
+ const char* PyUnicode_AsUTF8AndSize(object unicode, Py_ssize_t *size) except NULL
+
+
+ # As PyUnicode_AsUTF8AndSize(), but does not store the size.
+ const char *PyUnicode_AsUTF8(object unicode) except NULL
# These are the UTF-16 codec APIs:
diff --git a/Cython/Shadow.py b/Cython/Shadow.py
index 9f8e552690d..977f8454590 100644
--- a/Cython/Shadow.py
+++ b/Cython/Shadow.py
@@ -442,6 +442,8 @@ def _specialized_from_args(signatures, args, kwargs):
'Py_hash_t',
'Py_ssize_t',
'size_t',
+ 'ssize_t',
+ 'ptrdiff_t',
]
float_types = [
'longdouble',
@@ -478,7 +480,7 @@ def _specialized_from_args(signatures, args, kwargs):
for name in int_types:
reprname = to_repr(name, name)
gs[name] = typedef(py_int, reprname)
- if name not in ('Py_UNICODE', 'Py_UCS4') and not name.endswith('size_t'):
+ if name not in ('Py_UNICODE', 'Py_UCS4', 'Py_hash_t', 'ptrdiff_t') and not name.endswith('size_t'):
gs['u'+name] = typedef(py_int, "unsigned " + reprname)
gs['s'+name] = typedef(py_int, "signed " + reprname)
@@ -488,18 +490,40 @@ def _specialized_from_args(signatures, args, kwargs):
for name in complex_types:
gs[name] = typedef(py_complex, to_repr(name, name))
+del name, reprname
+
bint = typedef(bool, "bint")
void = typedef(None, "void")
Py_tss_t = typedef(None, "Py_tss_t")
-for t in int_types + float_types + complex_types + other_types:
+for t in int_types:
+ for i in range(1, 4):
+ gs["%s_%s" % ('p'*i, t)] = gs[t]._pointer(i)
+ if 'u'+t in gs:
+ gs["%s_u%s" % ('p'*i, t)] = gs['u'+t]._pointer(i)
+ gs["%s_s%s" % ('p'*i, t)] = gs['s'+t]._pointer(i)
+
+for t in float_types + complex_types + other_types:
for i in range(1, 4):
gs["%s_%s" % ('p'*i, t)] = gs[t]._pointer(i)
+del t, i
+
NULL = gs['p_void'](0)
-# looks like 'gs' has some users out there by now...
-#del gs
+del gs
+
+
+def __getattr__(name):
+ # looks like 'gs' has some users out there by now...
+ if name == 'gs':
+ import warnings
+ warnings.warn(
+ "'gs' is not a publicly exposed name in cython.*. Use vars() or globals() instead.",
+ DeprecationWarning)
+ return globals()
+ raise AttributeError(f"'cython' has no attribute {name!r}")
+
integral = floating = numeric = _FusedType()
diff --git a/Cython/Tests/TestShadow.py b/Cython/Tests/TestShadow.py
index f19719932ab..329c3a51731 100644
--- a/Cython/Tests/TestShadow.py
+++ b/Cython/Tests/TestShadow.py
@@ -1,7 +1,7 @@
import unittest
from Cython import Shadow
-from Cython.Compiler import Options, CythonScope
+from Cython.Compiler import Options, CythonScope, PyrexTypes, Errors
class TestShadow(unittest.TestCase):
def test_all_directives_in_shadow(self):
@@ -57,6 +57,58 @@ def test_all_types_in_shadow(self):
missing_types.append(key)
self.assertEqual(missing_types, [])
- # TODO - there's a lot of types that are looked up by `cython_scope.lookup_type` that
- # it's unfortunately hard to get a definite list of to confirm that they're present
- # (because they're obtained by on-the-fly string parsing)
+ def test_int_types_in_shadow(self):
+ missing_types = []
+ for int_name in Shadow.int_types:
+ for sign in ['', 'u', 's']:
+ name = sign + int_name
+
+ if sign and (
+ int_name in ['Py_UNICODE', 'Py_UCS4', 'Py_ssize_t',
+ 'ssize_t', 'ptrdiff_t', 'Py_hash_t'] or
+ name == "usize_t"):
+ # size_t is special-cased here a little since ssize_t legitimate
+ # but usize_t isn't
+ self.assertNotIn(name, dir(Shadow))
+ self.assertNotIn('p_' + name, dir(Shadow))
+ continue
+
+ if not hasattr(Shadow, name):
+ missing_types.append(name)
+
+ for ptr in range(1, 4):
+ ptr_name = 'p' * ptr + '_' + name
+ if not hasattr(Shadow, ptr_name):
+ missing_types.append(ptr_name)
+ self.assertEqual(missing_types, [])
+
+ def test_most_types(self):
+ # TODO it's unfortunately hard to get a definite list of types to confirm that they're
+ # present (because they're obtained by on-the-fly string parsing in `cython_scope.lookup_type`)
+
+ cython_scope = CythonScope.create_cython_scope(None)
+ # Set up just enough of "Context" and "Errors" that CythonScope.lookup_type can fail
+ class Context:
+ cpp = False
+ language_level = 3
+ future_directives = []
+ cython_scope.context = Context
+ Errors.init_thread()
+
+ missing_types = []
+ missing_lookups = []
+ for (signed, longness, name), type_ in PyrexTypes.modifiers_and_name_to_type.items():
+ if name == 'object':
+ continue # This probably shouldn't be in Shadow
+ if not hasattr(Shadow, name):
+ missing_types.append(name)
+ if not cython_scope.lookup_type(name):
+ missing_lookups.append(name)
+ for ptr in range(1, 4):
+ ptr_name = 'p' * ptr + '_' + name
+ if not hasattr(Shadow, ptr_name):
+ missing_types.append(ptr_name)
+ if not cython_scope.lookup_type(ptr_name):
+ missing_lookups.append(ptr_name)
+ self.assertEqual(missing_types, [])
+ self.assertEqual(missing_lookups, [])
diff --git a/Cython/Utility/Builtins.c b/Cython/Utility/Builtins.c
index 8123df16eb4..7f613a788be 100644
--- a/Cython/Utility/Builtins.c
+++ b/Cython/Utility/Builtins.c
@@ -49,12 +49,17 @@ static CYTHON_INLINE PyObject* __Pyx_PyExec2(PyObject* o, PyObject* globals) {
static PyObject* __Pyx_PyExec3(PyObject* o, PyObject* globals, PyObject* locals) {
PyObject* result;
+#if !CYTHON_COMPILING_IN_LIMITED_API
PyObject* s = 0;
char *code = 0;
+#endif
if (!globals || globals == Py_None) {
globals = $moddict_cname;
- } else if (unlikely(!PyDict_Check(globals))) {
+ }
+#if !CYTHON_COMPILING_IN_LIMITED_API
+ // In Limited API we just use exec builtin which already has this
+ else if (unlikely(!PyDict_Check(globals))) {
__Pyx_TypeName globals_type_name =
__Pyx_PyType_GetName(Py_TYPE(globals));
PyErr_Format(PyExc_TypeError,
@@ -63,10 +68,12 @@ static PyObject* __Pyx_PyExec3(PyObject* o, PyObject* globals, PyObject* locals)
__Pyx_DECREF_TypeName(globals_type_name);
goto bad;
}
+#endif
if (!locals || locals == Py_None) {
locals = globals;
}
+#if !CYTHON_COMPILING_IN_LIMITED_API
if (__Pyx_PyDict_GetItemStr(globals, PYIDENT("__builtins__")) == NULL) {
if (unlikely(PyDict_SetItem(globals, PYIDENT("__builtins__"), PyEval_GetBuiltins()) < 0))
goto bad;
@@ -115,6 +122,19 @@ static PyObject* __Pyx_PyExec3(PyObject* o, PyObject* globals, PyObject* locals)
bad:
Py_XDECREF(s);
return 0;
+#else // CYTHON_COMPILING_IN_LIMITED_API
+ {
+ // For the limited API we just defer to the actual builtin
+ // (after setting up globals and locals) - there's too much we can't do otherwise
+ PyObject *builtins, *exec;
+ builtins = PyEval_GetBuiltins();
+ if (!builtins) return NULL;
+ exec = PyDict_GetItemString(builtins, "exec");
+ if (!exec) return NULL;
+ result = PyObject_CallFunctionObjArgs(exec, o, globals, locals, NULL);
+ return result;
+ }
+#endif
}
//////////////////// GetAttr3.proto ////////////////////
@@ -331,16 +351,32 @@ static long __Pyx__PyObject_Ord(PyObject* c); /*proto*/
static long __Pyx__PyObject_Ord(PyObject* c) {
Py_ssize_t size;
if (PyBytes_Check(c)) {
- size = PyBytes_GET_SIZE(c);
+ size = __Pyx_PyBytes_GET_SIZE(c);
if (likely(size == 1)) {
+#if CYTHON_ASSUME_SAFE_MACROS
return (unsigned char) PyBytes_AS_STRING(c)[0];
+#else
+ char *data = PyBytes_AsString(c);
+ if (unlikely(!data)) return -1;
+ return (unsigned char) data[0];
+#endif
}
-#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
+#if !CYTHON_ASSUME_SAFE_SIZE
+ else if (unlikely(size < 0)) return -1;
+#endif
} else if (PyByteArray_Check(c)) {
- size = PyByteArray_GET_SIZE(c);
+ size = __Pyx_PyByteArray_GET_SIZE(c);
if (likely(size == 1)) {
+#if CYTHON_ASSUME_SAFE_MACROS
return (unsigned char) PyByteArray_AS_STRING(c)[0];
+#else
+ char *data = PyByteArray_AsString(c);
+ if (unlikely(!data)) return -1;
+ return (unsigned char) data[0];
+#endif
}
+#if !CYTHON_ASSUME_SAFE_SIZE
+ else if (unlikely(size < 0)) return -1;
#endif
} else {
// FIXME: support character buffers - but CPython doesn't support them either
@@ -475,8 +511,23 @@ static CYTHON_INLINE PyObject* __Pyx_PyFrozenSet_New(PyObject* it) {
result = PyFrozenSet_New(it);
if (unlikely(!result))
return NULL;
- if ((PY_VERSION_HEX >= 0x031000A1) || likely(PySet_GET_SIZE(result)))
+ if ((__PYX_LIMITED_VERSION_HEX >= 0x030A00A1)
+#if CYTHON_COMPILING_IN_LIMITED_API
+ || __Pyx_get_runtime_version() >= 0x030A00A1
+#endif
+ )
return result;
+ {
+ Py_ssize_t size = __Pyx_PySet_GET_SIZE(result);
+ if (likely(size > 0))
+ return result;
+#if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(size < 0)) {
+ Py_DECREF(result);
+ return NULL;
+ }
+#endif
+ }
// empty frozenset is a singleton (on Python <3.10)
// seems wasteful, but CPython does the same
Py_DECREF(result);
@@ -500,7 +551,11 @@ static CYTHON_INLINE int __Pyx_PySet_Update(PyObject* set, PyObject* it) {
PyObject *retval;
#if CYTHON_USE_TYPE_SLOTS && !CYTHON_COMPILING_IN_PYPY
if (PyAnySet_Check(it)) {
- if (PySet_GET_SIZE(it) == 0)
+ Py_ssize_t size = __Pyx_PySet_GET_SIZE(it);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(size < 0)) return -1;
+ #endif
+ if (size == 0)
return 0;
// fast and safe case: CPython will update our result set and return it
retval = PySet_Type.tp_as_number->nb_inplace_or(set, it);
diff --git a/Cython/Utility/Coroutine.c b/Cython/Utility/Coroutine.c
index 9a40900a867..9dd14786f38 100644
--- a/Cython/Utility/Coroutine.c
+++ b/Cython/Utility/Coroutine.c
@@ -511,7 +511,16 @@ static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *$local_tstate_cna
// PyErr_SetObject() and friends put the value directly into ev
else if (unlikely(PyTuple_Check(ev))) {
// if it's a tuple, it is interpreted as separate constructor arguments (surprise!)
- if (PyTuple_GET_SIZE(ev) >= 1) {
+ Py_ssize_t tuple_size = __Pyx_PyTuple_GET_SIZE(ev);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(tuple_size < 0)) {
+ Py_XDECREF(tb);
+ Py_DECREF(ev);
+ Py_DECREF(et);
+ return -1;
+ }
+ #endif
+ if (tuple_size >= 1) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
value = PyTuple_GET_ITEM(ev, 0);
Py_INCREF(value);
diff --git a/Cython/Utility/CythonFunction.c b/Cython/Utility/CythonFunction.c
index 66da48d4e99..d44af5e9a46 100644
--- a/Cython/Utility/CythonFunction.c
+++ b/Cython/Utility/CythonFunction.c
@@ -72,7 +72,7 @@ typedef struct {
// Dynamic default args and annotations
void *defaults;
int defaults_pyobjects;
- size_t defaults_size; // used by FusedFunction for copying defaults
+ size_t defaults_size; /* used by FusedFunction for copying defaults */
int flags;
// Defaults info
@@ -1093,7 +1093,7 @@ static PyObject * __Pyx_CyFunction_CallMethod(HPY_CONTEXT_FIRST_ARG_DEF PyObject
return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw);
case METH_NOARGS:
if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_SIZE
size = PyTuple_GET_SIZE(arg);
#else
size = PyTuple_Size(arg);
@@ -1118,7 +1118,7 @@ static PyObject * __Pyx_CyFunction_CallMethod(HPY_CONTEXT_FIRST_ARG_DEF PyObject
break;
case METH_O:
if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_SIZE
size = PyTuple_GET_SIZE(arg);
#else
size = PyTuple_Size(arg);
@@ -1187,12 +1187,12 @@ static PYOBJECT_TYPE __Pyx_CyFunction_CallAsMethod(HPY_CONTEXT_FIRST_ARG_DEF PYO
PyObject *result;
__pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func;
-#if CYTHON_METH_FASTCALL
+#if CYTHON_METH_FASTCALL && (CYTHON_VECTORCALL || CYTHON_BACKPORT_VECTORCALL)
// Prefer vectorcall if available. This is not the typical case, as
// CPython would normally use vectorcall directly instead of tp_call.
__pyx_vectorcallfunc vc = __Pyx_CyFunction_func_vectorcall(cyfunc);
if (vc) {
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE
return __Pyx_PyVectorcall_FastCallDict(func, vc, &PyTuple_GET_ITEM(args, 0), (size_t)PyTuple_GET_SIZE(args), kw);
#else
// avoid unused function warning
@@ -1207,11 +1207,11 @@ static PYOBJECT_TYPE __Pyx_CyFunction_CallAsMethod(HPY_CONTEXT_FIRST_ARG_DEF PYO
PyObject *new_args;
PyObject *self;
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_SIZE
argc = PyTuple_GET_SIZE(args);
#else
argc = PyTuple_Size(args);
- if (unlikely(!argc) < 0) return NULL;
+ if (unlikely(argc < 0)) return NULL;
#endif
new_args = PyTuple_GetSlice(args, 1, argc);
@@ -1236,7 +1236,7 @@ static PYOBJECT_TYPE __Pyx_CyFunction_CallAsMethod(HPY_CONTEXT_FIRST_ARG_DEF PYO
}
#endif /* CYTHON_USING_HPY */
-#if CYTHON_METH_FASTCALL
+#if CYTHON_METH_FASTCALL && (CYTHON_VECTORCALL || CYTHON_BACKPORT_VECTORCALL || CYTHON_USING_HPY)
// Check that kwnames is empty (if you want to allow keyword arguments,
// simply pass kwnames=NULL) and figure out what to do with "self".
// Return value:
@@ -1707,7 +1707,10 @@ static int __Pyx_CyFunction_InitClassCell(PyObject *cyfunctions, PyObject *class
//@requires: CythonFunctionShared
static int __Pyx_CyFunction_InitClassCell(PyObject *cyfunctions, PyObject *classobj) {
- Py_ssize_t i, count = PyList_GET_SIZE(cyfunctions);
+ Py_ssize_t i, count = __Pyx_PyList_GET_SIZE(cyfunctions);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(count < 0)) return -1;
+ #endif
for (i = 0; i < count; i++) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *)
@@ -1892,10 +1895,14 @@ __pyx_FusedFunction_getitem(__pyx_FusedFunctionObject *self, PyObject *idx)
}
if (PyTuple_Check(idx)) {
- Py_ssize_t n = PyTuple_GET_SIZE(idx);
- PyObject *list = PyList_New(n);
+ Py_ssize_t n = __Pyx_PyTuple_GET_SIZE(idx);
+ PyObject *list;
int i;
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(n < 0)) return NULL;
+ #endif
+ list = PyList_New(n);
if (unlikely(!list))
return NULL;
@@ -1971,11 +1978,14 @@ static PyObject *
__pyx_FusedFunction_call(PyObject *func, PyObject *args, PyObject *kw)
{
__pyx_FusedFunctionObject *binding_func = (__pyx_FusedFunctionObject *) func;
- Py_ssize_t argc = PyTuple_GET_SIZE(args);
+ Py_ssize_t argc = __Pyx_PyTuple_GET_SIZE(args);
PyObject *new_args = NULL;
__pyx_FusedFunctionObject *new_func = NULL;
PyObject *result = NULL;
int is_staticmethod = binding_func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD;
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(argc < 0)) return NULL;
+ #endif
if (binding_func->self) {
// Bound method call, put 'self' in the args tuple
diff --git a/Cython/Utility/Dataclasses.c b/Cython/Utility/Dataclasses.c
index 6b1942bd04c..6247020a3bc 100644
--- a/Cython/Utility/Dataclasses.c
+++ b/Cython/Utility/Dataclasses.c
@@ -39,21 +39,21 @@ static PyObject* __Pyx_LoadInternalModule(const char* name, const char* fallback
if (!module) {
PyObject *localDict, *runValue, *builtins, *modulename;
if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad;
- PyErr_Clear(); // this is reasonably likely (especially on older versions of Python)
+ PyErr_Clear(); /* this is reasonably likely (especially on older versions of Python) */
modulename = PyUnicode_FromFormat("_cython_" CYTHON_ABI ".%s", name);
if (!modulename) goto bad;
#if CYTHON_COMPILING_IN_CPYTHON
- module = PyImport_AddModuleObject(modulename); // borrowed
+ module = PyImport_AddModuleObject(modulename); /* borrowed */
#else
- module = PyImport_AddModule(PyBytes_AsString(modulename)); // borrowed
+ module = PyImport_AddModule(PyBytes_AsString(modulename)); /* borrowed */
#endif
Py_DECREF(modulename);
if (!module) goto bad;
Py_INCREF(module);
if (PyObject_SetAttrString(shared_abi_module, name, module) < 0) goto bad;
- localDict = PyModule_GetDict(module); // borrowed
+ localDict = PyModule_GetDict(module); /* borrowed */
if (!localDict) goto bad;
- builtins = PyEval_GetBuiltins(); // borrowed
+ builtins = PyEval_GetBuiltins(); /* borrowed */
if (!builtins) goto bad;
if (PyDict_SetItemString(localDict, "__builtins__", builtins) <0) goto bad;
@@ -98,7 +98,7 @@ static PyObject* __Pyx_DataclassesCallHelper(PyObject *callable, PyObject *kwds)
static int __Pyx_DataclassesCallHelper_FilterToDict(PyObject *callable, PyObject *kwds, PyObject *new_kwds, PyObject *args_list, int is_kwonly) {
Py_ssize_t size, i;
size = PySequence_Size(args_list);
- if (size == -1) return -1;
+ if (unlikely(size < 0)) return -1;
for (i=0; itp_dict, PYIDENT("__reduce__"), reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
- ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__reduce_cython__")); if (unlikely(ret < 0)) goto __PYX_BAD;
+ ret = __Pyx_SetItemOnTypeDict((PyTypeObject*)type_obj, PYIDENT("__reduce__"), reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ ret = __Pyx_DelItemOnTypeDict((PyTypeObject*)type_obj, PYIDENT("__reduce_cython__")); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (reduce == object_reduce || PyErr_Occurred()) {
// Ignore if we're done, i.e. if 'reduce' already has the right name and the original is gone.
// Otherwise: error.
@@ -513,8 +510,8 @@ static int __Pyx_setup_reduce(PyObject* type_obj) {
if (!setstate || __Pyx_setup_reduce_is_named(setstate, PYIDENT("__setstate_cython__"))) {
setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, PYIDENT("__setstate_cython__"));
if (likely(setstate_cython)) {
- ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__setstate__"), setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
- ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__setstate_cython__")); if (unlikely(ret < 0)) goto __PYX_BAD;
+ ret = __Pyx_SetItemOnTypeDict((PyTypeObject*)type_obj, PYIDENT("__setstate__"), setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ ret = __Pyx_DelItemOnTypeDict((PyTypeObject*)type_obj, PYIDENT("__setstate_cython__")); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (!setstate || PyErr_Occurred()) {
// Ignore if we're done, i.e. if 'setstate' already has the right name and the original is gone.
// Otherwise: error.
@@ -549,7 +546,6 @@ static int __Pyx_setup_reduce(PyObject* type_obj) {
Py_XDECREF(setstate_cython);
return ret;
}
-#endif
/////////////// BinopSlot ///////////////
diff --git a/Cython/Utility/FunctionArguments.c b/Cython/Utility/FunctionArguments.c
index 6212c09299f..c3a327ac0f3 100644
--- a/Cython/Utility/FunctionArguments.c
+++ b/Cython/Utility/FunctionArguments.c
@@ -1,6 +1,8 @@
//////////////////// ArgTypeTest.proto ////////////////////
+// Exact is 0 (False), 1 (True) or 2 (True and from annotation)
+// The latter gives a small amount of extra error diagnostics
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact) \
((likely(OBJ_IS_TYPE(obj, type) | (none_allowed && (API_IS_EQUAL(obj, API_NONE_VALUE))))) ? 1 : \
__Pyx__ArgTypeTest(HPY_CONTEXT_FIRST_ARG_CALL obj, type, name, exact))
@@ -8,25 +10,57 @@
static int __Pyx__ArgTypeTest(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE obj, PYTYPEOBJECT_TYPE type, const char *name, int exact); /*proto*/
//////////////////// ArgTypeTest ////////////////////
+//@substitute: naming
static int __Pyx__ArgTypeTest(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE obj, PYTYPEOBJECT_TYPE type, const char *name, int exact)
{
__Pyx_TypeName type_name;
__Pyx_TypeName obj_type_name;
+ PYOBJECT_TYPE extra_info = PYOBJECT_GLOBAL_LOAD($empty_unicode);
+ int from_annotation_subclass = 0;
if (unlikely(API_IS_NULL(type))) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (!exact) {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
+ } else if (exact == 2) {
+ // type from annotation
+ if (__Pyx_TypeCheck(obj, type)) {
+ from_annotation_subclass = 1;
+ extra_info = PYOBJECT_GLOBAL_LOAD(PYUNICODE("Note that Cython is deliberately stricter than PEP-484 and rejects subclasses of builtin types. If you need to pass subclasses then set the 'annotation_typing' directive to False."));
+ }
}
type_name = __Pyx_PyType_GetName(HPY_CONTEXT_FIRST_ARG_CALL type);
obj_type_name = __Pyx_PyType_GetName(HPY_CONTEXT_FIRST_ARG_CALL GET_TYPE(obj));
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected " __Pyx_FMT_TYPENAME
- ", got " __Pyx_FMT_TYPENAME ")", name, type_name, obj_type_name);
+ ", got " __Pyx_FMT_TYPENAME ")"
+#if __PYX_LIMITED_VERSION_HEX < 0x030C0000
+ "%s%U"
+#endif
+ , name, type_name, obj_type_name
+#if __PYX_LIMITED_VERSION_HEX < 0x030C0000
+ , (from_annotation_subclass ? ". " : ""), extra_info
+#endif
+ );
+#if __PYX_LIMITED_VERSION_HEX >= 0x030C0000
+ // Set the extra_info as a note instead. In principle it'd be possible to do this
+ // from Python 3.11 up, but PyErr_GetRaisedException makes it much easier so do it
+ // from Python 3.12 instead.
+ if (exact == 2 && from_annotation_subclass) {
+ PyObject *res;
+ PyObject *vargs[2];
+ vargs[0] = PyErr_GetRaisedException();
+ vargs[1] = extra_info;
+ res = PyObject_VectorcallMethod(PYUNICODE("add_note"), vargs, 2, NULL);
+ Py_XDECREF(res);
+ PyErr_SetRaisedException(vargs[0]);
+ }
+#endif
__Pyx_DECREF_TypeName(type_name);
__Pyx_DECREF_TypeName(obj_type_name);
+ PYOBJECT_GLOBAL_CLOSEREF(extra_info);
return 0;
}
@@ -137,7 +171,7 @@ static int __Pyx_CheckKeywordStrings(
#else
if (CYTHON_METH_FASTCALL && likely(PyTuple_Check(kw))) {
Py_ssize_t kwsize;
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_SIZE
kwsize = PyTuple_GET_SIZE(kw);
#else
kwsize = PyTuple_Size(kw);
@@ -240,7 +274,7 @@ static int __Pyx_ParseOptionalKeywords(
if (kwds_is_tuple) {
Py_ssize_t size;
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_SIZE
size = PyTuple_GET_SIZE(kwds);
#else
size = PyTuple_Size(kwds);
@@ -277,7 +311,7 @@ static int __Pyx_ParseOptionalKeywords(
if (*name) {
values[name-argnames] = value;
#if CYTHON_AVOID_BORROWED_REFS
- Py_INCREF(value); // transfer ownership of value to values
+ Py_INCREF(value); /* transfer ownership of value to values */
Py_DECREF(key);
#endif
key = NULL;
@@ -295,17 +329,16 @@ static int __Pyx_ParseOptionalKeywords(
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (
- #if !CYTHON_COMPILING_IN_PYPY
- (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #if CYTHON_ASSUME_SAFE_SIZE
+ (PyUnicode_GET_LENGTH(**name) != PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
- // In Py2, we may need to convert the argument name from str to unicode for comparison.
PyUnicode_Compare(**name, key)
);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
#if CYTHON_AVOID_BORROWED_REFS
- value = NULL; // ownership transferred to values
+ value = NULL; /* ownership transferred to values */
#endif
break;
}
@@ -317,8 +350,8 @@ static int __Pyx_ParseOptionalKeywords(
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
- #if !CYTHON_COMPILING_IN_PYPY
- (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #if CYTHON_ASSUME_SAFE_SIZE
+ (PyUnicode_GET_LENGTH(**argname) != PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
// need to convert argument name from bytes to unicode for comparison
PyUnicode_Compare(**argname, key);
@@ -441,8 +474,8 @@ static int __Pyx_MergeKeywords(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE kwdict, P
#define __Pyx_Arg_NewRef_VARARGS(arg) __Pyx_NewRef(arg)
#define __Pyx_Arg_XDECREF_VARARGS(arg) Py_XDECREF(arg)
#else
- #define __Pyx_Arg_NewRef_VARARGS(arg) arg // no-op
- #define __Pyx_Arg_XDECREF_VARARGS(arg) // no-op - arg is borrowed
+ #define __Pyx_Arg_NewRef_VARARGS(arg) arg /* no-op */
+ #define __Pyx_Arg_XDECREF_VARARGS(arg) /* no-op - arg is borrowed */
#endif
#define __Pyx_NumKwargs_VARARGS(kwds) PyDict_Size(kwds)
#define __Pyx_KwValues_VARARGS(args, nargs) NULL
@@ -453,14 +486,14 @@ static int __Pyx_MergeKeywords(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE kwdict, P
#define __Pyx_NumKwargs_FASTCALL(kwds) TUPLE_GET_SIZE(kwds)
#define __Pyx_KwValues_FASTCALL(args, nargs) ((args) + (nargs))
static CYTHON_INLINE PYOBJECT_TYPE __Pyx_GetKwValue_FASTCALL(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE kwnames, PYOBJECT_TYPE const *kwvalues, PYOBJECT_TYPE s);
-#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000
+ #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000
CYTHON_UNUSED static PyObject *__Pyx_KwargsAsDict_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues);/*proto*/
#else
#define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw)
#endif
- #define __Pyx_Arg_NewRef_FASTCALL(arg) arg // no-op, __Pyx_Arg_FASTCALL is direct and this needs
- // to have the same reference counting
- #define __Pyx_Arg_XDECREF_FASTCALL(arg) // no-op - arg was returned from array
+ #define __Pyx_Arg_NewRef_FASTCALL(arg) arg /* no-op, __Pyx_Arg_FASTCALL is direct and this needs
+ to have the same reference counting */
+ #define __Pyx_Arg_XDECREF_FASTCALL(arg) /* no-op - arg was returned from array */
#else
#define __Pyx_Arg_FASTCALL __Pyx_Arg_VARARGS
#define __Pyx_NumKwargs_FASTCALL __Pyx_NumKwargs_VARARGS
@@ -504,11 +537,11 @@ static CYTHON_INLINE PYOBJECT_TYPE __Pyx_GetKwValue_FASTCALL(HPY_CONTEXT_FIRST_A
{
int eq = __Pyx_PyUnicode_Equals(HPY_CONTEXT_FIRST_ARG_CALL s, TUPLE_GET_ITEM(kwnames, i), Py_EQ);
if (unlikely(eq != 0)) {
- if (unlikely(eq < 0)) return API_NULL_VALUE; // error
+ if (unlikely(eq < 0)) return API_NULL_VALUE; /* error */
return kwvalues[i];
}
}
- return API_NULL_VALUE; // not found (no exception set)
+ return API_NULL_VALUE; /* not found (no exception set) */
}
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000
diff --git a/Cython/Utility/HPyUtils.c b/Cython/Utility/HPyUtils.c
index cb63f567205..41ba47eb409 100644
--- a/Cython/Utility/HPyUtils.c
+++ b/Cython/Utility/HPyUtils.c
@@ -166,6 +166,9 @@
#define LIST_CREATE_ASSIGN(tuple, builder, index, item) HPyListBuilder_Set(HPY_CONTEXT_CNAME, builder, index, item)
#define LIST_CREATE_FINALISE(target, builder) target = HPyListBuilder_Build(HPY_CONTEXT_CNAME, builder);
+ //Unicode Type
+ #define UNICODE_GET_LENGTH(s) HPy_Length(HPY_CONTEXT_CNAME, s)
+
//PyObject/HPy Handle Type
#define PYOBJECT_GET_ITEM(o, attr_name) HPy_GetItem(HPY_CONTEXT_CNAME, o, attr_name)
#define PYOBJECT_SET_ITEM(o, attr_name, attr_val) HPy_SetItem(HPY_CONTEXT_CNAME, o, attr_name, attr_val)
@@ -370,6 +373,9 @@
#define LIST_CREATE_ASSIGN(tuple, builder, index, item) __Pyx_PyList_SET_ITEM(tuple, index, item)
#define LIST_CREATE_FINALISE(target, null)
+ //Unicode Type
+ #define UNICODE_GET_LENGTH(s) PyUnicode_GetLength(s)
+
//PyObject/HPy Handle Type
#define PYOBJECT_GET_ITEM(o, attr_name) PyObject_GetItem(o, attr_name)
#define PYOBJECT_SET_ITEM(o, attr_name, attr_val) PyObject_SetItem(o, attr_name, attr_val)
diff --git a/Cython/Utility/ImportExport.c b/Cython/Utility/ImportExport.c
index 546c4bb6978..3f1b6a04fd3 100644
--- a/Cython/Utility/ImportExport.c
+++ b/Cython/Utility/ImportExport.c
@@ -12,13 +12,11 @@ static PYOBJECT_TYPE __Pyx__ImportDottedModule_Error(HPY_CONTEXT_FIRST_ARG_DEF P
if (unlikely(PyErr_Occurred())) {
PyErr_Clear();
}
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_SIZE
size = PyTuple_GET_SIZE(parts_tuple);
#else
size = TUPLE_GET_SIZE_SAFE(parts_tuple);
- if (size < 0) {
- goto bad;
- }
+ if (size < 0) goto bad;
#endif
if (likely(size == count)) {
partial_name = name;
@@ -59,7 +57,7 @@ static PYOBJECT_TYPE __Pyx__ImportDottedModule_Lookup(HPY_CONTEXT_FIRST_ARG_DEF
static PYOBJECT_TYPE __Pyx_ImportDottedModule_WalkParts(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE module, PYOBJECT_TYPE name, PYOBJECT_TYPE parts_tuple) {
API_SSIZE_T i, nparts;
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_SIZE
nparts = PyTuple_GET_SIZE(parts_tuple);
#else
nparts = TUPLE_GET_SIZE_SAFE(parts_tuple);
@@ -304,13 +302,18 @@ __Pyx_import_all_from(PyObject *locals, PyObject *v)
PyErr_Clear();
break;
}
- if (skip_leading_underscores &&
- likely(PyUnicode_Check(name)) &&
- likely(__Pyx_PyUnicode_GET_LENGTH(name)) &&
- __Pyx_PyUnicode_READ_CHAR(name, 0) == '_')
- {
- Py_DECREF(name);
- continue;
+ if (skip_leading_underscores && likely(PyUnicode_Check(name))) {
+ Py_ssize_t length = __Pyx_PyUnicode_GET_LENGTH(name);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(length < 0)) {
+ Py_DECREF(name);
+ return -1;
+ }
+ #endif
+ if (likely(length) && __Pyx_PyUnicode_READ_CHAR(name, 0) == '_') {
+ Py_DECREF(name);
+ continue;
+ }
}
value = PyObject_GetAttr(v, name);
if (value == NULL)
@@ -766,27 +769,24 @@ static void* __Pyx_GetVtable(PyTypeObject *type) {
/////////////// MergeVTables.proto ///////////////
//@requires: GetVTable
-// TODO: find a way to make this work with the Limited API!
-#if !CYTHON_COMPILING_IN_LIMITED_API
static int __Pyx_MergeVtables(PyTypeObject *type); /*proto*/
-#endif
/////////////// MergeVTables ///////////////
-#if !CYTHON_COMPILING_IN_LIMITED_API
static int __Pyx_MergeVtables(PyTypeObject *type) {
- int i;
+ int i=0;
+ Py_ssize_t size;
void** base_vtables;
- __Pyx_TypeName tp_base_name;
- __Pyx_TypeName base_name;
+ __Pyx_TypeName tp_base_name = NULL;
+ __Pyx_TypeName base_name = NULL;
void* unknown = (void*)-1;
- PyObject* bases = type->tp_bases;
+ PyObject* bases = __Pyx_PyType_GetSlot(type, tp_bases, PyObject*);
int base_depth = 0;
{
- PyTypeObject* base = type->tp_base;
+ PyTypeObject* base = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*);
while (base) {
base_depth += 1;
- base = base->tp_base;
+ base = __Pyx_PyType_GetSlot(base, tp_base, PyTypeObject*);
}
}
base_vtables = (void**) malloc(sizeof(void*) * (size_t)(base_depth + 1));
@@ -797,11 +797,31 @@ static int __Pyx_MergeVtables(PyTypeObject *type) {
// resolution isn't possible and we must reject it just as when the
// instance struct is so extended. (It would be good to also do this
// check when a multiple-base class is created in pure Python as well.)
- for (i = 1; i < PyTuple_GET_SIZE(bases); i++) {
- void* base_vtable = __Pyx_GetVtable(((PyTypeObject*)PyTuple_GET_ITEM(bases, i)));
+#if CYTHON_COMPILING_IN_LIMITED_API
+ size = PyTuple_Size(bases);
+ if (size < 0) goto other_failure;
+#else
+ size = PyTuple_GET_SIZE(bases);
+#endif
+ for (i = 1; i < size; i++) {
+ PyObject *basei;
+ void* base_vtable;
+#if CYTHON_AVOID_BORROWED_REFS
+ basei = PySequence_GetItem(bases, i);
+ if (unlikely(!basei)) goto other_failure;
+#elif !CYTHON_ASSUME_SAFE_MACROS
+ basei = PyTuple_GetItem(bases, i);
+ if (unlikely(!basei)) goto other_failure;
+#else
+ basei = PyTuple_GET_ITEM(bases, i);
+#endif
+ base_vtable = __Pyx_GetVtable((PyTypeObject*)basei);
+#if CYTHON_AVOID_BORROWED_REFS
+ Py_DECREF(basei);
+#endif
if (base_vtable != NULL) {
int j;
- PyTypeObject* base = type->tp_base;
+ PyTypeObject* base = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*);
for (j = 0; j < base_depth; j++) {
if (base_vtables[j] == unknown) {
base_vtables[j] = __Pyx_GetVtable(base);
@@ -813,7 +833,7 @@ static int __Pyx_MergeVtables(PyTypeObject *type) {
// No more potential matching bases (with vtables).
goto bad;
}
- base = base->tp_base;
+ base = __Pyx_PyType_GetSlot(base, tp_base, PyTypeObject*);
}
}
}
@@ -821,16 +841,37 @@ static int __Pyx_MergeVtables(PyTypeObject *type) {
free(base_vtables);
return 0;
bad:
- tp_base_name = __Pyx_PyType_GetName(type->tp_base);
- base_name = __Pyx_PyType_GetName((PyTypeObject*)PyTuple_GET_ITEM(bases, i));
+ {
+ PyTypeObject* basei = NULL;
+ PyTypeObject* tp_base = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*);
+ tp_base_name = __Pyx_PyType_GetName(tp_base);
+#if CYTHON_AVOID_BORROWED_REFS
+ basei = (PyTypeObject*)PySequence_GetItem(bases, i);
+ if (unlikely(!basei)) goto really_bad;
+#elif !CYTHON_ASSUME_SAFE_MACROS
+ basei = (PyTypeObject*)PyTuple_GetItem(bases, i);
+ if (unlikely(!basei)) goto really_bad;
+#else
+ basei = (PyTypeObject*)PyTuple_GET_ITEM(bases, i);
+#endif
+ base_name = __Pyx_PyType_GetName(basei);
+#if CYTHON_AVOID_BORROWED_REFS
+ Py_DECREF(basei);
+#endif
+ }
PyErr_Format(PyExc_TypeError,
"multiple bases have vtable conflict: '" __Pyx_FMT_TYPENAME "' and '" __Pyx_FMT_TYPENAME "'", tp_base_name, base_name);
+#if CYTHON_AVOID_BORROWED_REFS || !CYTHON_ASSUME_SAFE_MACROS
+really_bad: // bad has failed!
+#endif
__Pyx_DECREF_TypeName(tp_base_name);
__Pyx_DECREF_TypeName(base_name);
+#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_AVOID_BORROWED_REFS || !CYTHON_ASSUME_SAFE_MACROS
+other_failure:
+#endif
free(base_vtables);
return -1;
}
-#endif
/////////////// ImportNumPyArray.proto ///////////////
diff --git a/Cython/Utility/ModuleSetupCode.c b/Cython/Utility/ModuleSetupCode.c
index d718001cf6d..1d81af50456 100644
--- a/Cython/Utility/ModuleSetupCode.c
+++ b/Cython/Utility/ModuleSetupCode.c
@@ -88,6 +88,8 @@
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
+ #undef CYTHON_ASSUME_SAFE_SIZE
+ #define CYTHON_ASSUME_SAFE_SIZE 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
@@ -114,6 +116,8 @@
#ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
#define CYTHON_UPDATE_DESCRIPTOR_DOC 0
#endif
+ #undef CYTHON_USE_FREELISTS
+ #define CYTHON_USE_FREELISTS 0
#elif defined(PYPY_VERSION)
#define CYTHON_COMPILING_IN_PYPY 1
@@ -144,6 +148,9 @@
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
+ #ifndef CYTHON_ASSUME_SAFE_SIZE
+ #define CYTHON_ASSUME_SAFE_SIZE 1
+ #endif
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
@@ -175,6 +182,8 @@
#ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
#define CYTHON_UPDATE_DESCRIPTOR_DOC 0
#endif
+ #undef CYTHON_USE_FREELISTS
+ #define CYTHON_USE_FREELISTS 0
#elif defined(CYTHON_LIMITED_API)
// EXPERIMENTAL !!
@@ -214,6 +223,8 @@
#endif
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
+ #undef CYTHON_ASSUME_SAFE_SIZE
+ #define CYTHON_ASSUME_SAFE_SIZE 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
@@ -242,6 +253,8 @@
#ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
#define CYTHON_UPDATE_DESCRIPTOR_DOC 0
#endif
+ #undef CYTHON_USE_FREELISTS
+ #define CYTHON_USE_FREELISTS 0
#elif defined(Py_GIL_DISABLED) || defined(Py_NOGIL)
#define CYTHON_COMPILING_IN_PYPY 0
@@ -253,11 +266,17 @@
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
+ #ifndef CYTHON_USE_TYPE_SPECS
+ #define CYTHON_USE_TYPE_SPECS 0
+ #endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#ifndef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
+ #ifndef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
@@ -265,24 +284,36 @@
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
- #undef CYTHON_USE_PYLONG_INTERNALS
- #define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
+ #ifndef CYTHON_ASSUME_SAFE_SIZE
+ #define CYTHON_ASSUME_SAFE_SIZE 1
+ #endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_GIL
+ #define CYTHON_FAST_GIL 0
+ #ifndef CYTHON_METH_FASTCALL
+ #define CYTHON_METH_FASTCALL 0
+ #endif
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
+ #ifndef CYTHON_PEP487_INIT_SUBCLASS
+ #define CYTHON_PEP487_INIT_SUBCLASS 1
+ #endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 1
#endif
+ #ifndef CYTHON_USE_MODULE_STATE
+ #define CYTHON_USE_MODULE_STATE 0
+ #endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 1
#endif
@@ -290,6 +321,15 @@
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
+ #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
+ #define CYTHON_UPDATE_DESCRIPTOR_DOC 1
+ #endif
+ #ifndef CYTHON_USE_FREELISTS
+ // TODO - we could probably enable CYTHON_USE_FREELISTS by default in future since
+ // this is just a variant of cpython now, but we'd need to be very careful to make
+ // them thread safe. Since it will probably work, let the user decide.
+ #define CYTHON_USE_FREELISTS 0
+ #endif
#else
#define CYTHON_COMPILING_IN_PYPY 0
@@ -335,6 +375,10 @@
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
+ // CYTHON_ASSUME_SAFE_SIZE - Assume that Py*_GET_SIZE() calls do not fail and do not raise exceptions.
+ #ifndef CYTHON_ASSUME_SAFE_SIZE
+ #define CYTHON_ASSUME_SAFE_SIZE 1
+ #endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
@@ -417,6 +461,8 @@
#define CYTHON_REFNANNY 0
#undef CYTHON_METH_FASTCALL
#define CYTHON_METH_FASTCALL 1
+ #undef CYTHON_ASSUME_SAFE_SIZE
+ #define CYTHON_ASSUME_SAFE_SIZE 0
#undef CYTHON_CLINE_IN_TRACEBACK
#define CYTHON_CLINE_IN_TRACEBACK 0 //Is disabled for the Limited API - probably safest to disable it for HPy then
#else
@@ -428,8 +474,17 @@
#endif
#ifndef CYTHON_VECTORCALL
+#if CYTHON_COMPILING_IN_LIMITED_API
+// Possibly needs a bit of clearing up, however:
+// the limited API doesn't define CYTHON_FAST_PYCCALL (because that involves
+// a lot of access to internals) but does define CYTHON_VECTORCALL because
+// that's available cleanly from Python 3.12. Note that only VectorcallDict isn't
+// available though.
+#define CYTHON_VECTORCALL (__PYX_LIMITED_VERSION_HEX >= 0x030C0000)
+#else
#define CYTHON_VECTORCALL (CYTHON_FAST_PYCCALL && PY_VERSION_HEX >= 0x030800B1)
#endif
+#endif
/* Whether to use METH_FASTCALL with a fake backported implementation of vectorcall */
#define CYTHON_BACKPORT_VECTORCALL (!CYTHON_USING_HPY && CYTHON_METH_FASTCALL && PY_VERSION_HEX < 0x030800B1)
@@ -701,7 +756,7 @@ class __Pyx_FakeReference {
PyObject *exception_table = NULL;
PyObject *types_module=NULL, *code_type=NULL, *result=NULL;
#if __PYX_LIMITED_VERSION_HEX < 0x030B0000
- PyObject *version_info; // borrowed
+ PyObject *version_info; /* borrowed */
PyObject *py_minor_version = NULL;
#endif
long minor_version = 0;
@@ -711,8 +766,9 @@ class __Pyx_FakeReference {
PyErr_Fetch(&type, &value, &traceback);
#if __PYX_LIMITED_VERSION_HEX >= 0x030B0000
- minor_version = 11; // we don't yet need to distinguish between versions > 11
- // Note that from 3.13, when we do we can use Py_Version
+ minor_version = 11;
+ // we don't yet need to distinguish between versions > 11
+ // Note that from 3.13, when we do, we can use Py_Version
#else
if (!(version_info = PySys_GetObject("version_info"))) goto end;
if (!(py_minor_version = PySequence_GetItem(version_info, 1))) goto end;
@@ -793,7 +849,7 @@ class __Pyx_FakeReference {
// 1. pass an empty bytes string as exception_table
// 2. pass name as qualname (TODO this might implementing properly in future)
PyCodeObject *result;
- PyObject *empty_bytes = PyBytes_FromStringAndSize("", 0); // we don't have access to __pyx_empty_bytes here
+ PyObject *empty_bytes = PyBytes_FromStringAndSize("", 0); /* we don't have access to __pyx_empty_bytes here */
if (!empty_bytes) return NULL;
result =
#if PY_VERSION_HEX >= 0x030C0000
@@ -975,7 +1031,7 @@ static CYTHON_INLINE int __Pyx__IsSameCFunction(PyObject *func, void *cfunc) {
#endif
#if CYTHON_COMPILING_IN_LIMITED_API
- #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
+ // __Pyx_PyCode_HasFreeVars isn't easily emulated in the limited API (but isn't really necessary)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
@@ -1079,8 +1135,10 @@ static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict,
// a little hacky, but it does work in the limited API .
// (It doesn't work on PyPy but that probably isn't a bug.)
#define __Pyx_SetItemOnTypeDict(tp, k, v) PyObject_GenericSetAttr((PyObject*)tp, k, v)
+ #define __Pyx_DelItemOnTypeDict(tp, k) PyObject_GenericSetAttr((PyObject*)tp, k, NULL)
#else
- #define __Pyx_SetItemOnTypeDict(tp, k, v) PyDict_SetItem(tp->tp_dict, k, v)
+ #define __Pyx_SetItemOnTypeDict(tp, k, v) PyDict_SetItem(((PyTypeObject*)(tp))->tp_dict, k, v)
+ #define __Pyx_DelItemOnTypeDict(tp, k) PyDict_DelItem(((PyTypeObject*)(tp))->tp_dict, k)
#endif
#if CYTHON_USE_TYPE_SPECS && PY_VERSION_HEX >= 0x03080000
@@ -1098,7 +1156,6 @@ static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict,
#if CYTHON_COMPILING_IN_LIMITED_API
#define __Pyx_PyUnicode_READY(op) (0)
- #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GetLength(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_ReadChar(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((void)u, 1114111U)
#define __Pyx_PyUnicode_KIND(u) ((void)u, (0))
@@ -1116,7 +1173,6 @@ static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict,
0 : _PyUnicode_Ready((PyObject *)(op)))
#endif
- #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) ((int)PyUnicode_KIND(u))
@@ -1205,23 +1261,30 @@ static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict,
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#define __Pyx_PyTuple_SET_ITEM(o, i, v) (PyTuple_SET_ITEM(o, i, v), (0))
#define __Pyx_PyList_SET_ITEM(o, i, v) (PyList_SET_ITEM(o, i, v), (0))
- #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_GET_SIZE(o)
- #define __Pyx_PyList_GET_SIZE(o) PyList_GET_SIZE(o)
- #define __Pyx_PySet_GET_SIZE(o) PySet_GET_SIZE(o)
- #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_GET_SIZE(o)
- #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_GET_SIZE(o)
#else
#define __Pyx_PySequence_ITEM(o, i) PySequence_GetItem(o, i)
// NOTE: might fail with exception => check for -1
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
- // Note that this doesn't leak a reference to whatever's at o[i]
+ // NOTE: this doesn't leak a reference to whatever is at o[i]
#define __Pyx_PyTuple_SET_ITEM(o, i, v) PyTuple_SetItem(o, i, v)
#define __Pyx_PyList_SET_ITEM(o, i, v) PyList_SetItem(o, i, v)
+#endif
+
+#if CYTHON_ASSUME_SAFE_SIZE
+ #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_GET_SIZE(o)
+ #define __Pyx_PyList_GET_SIZE(o) PyList_GET_SIZE(o)
+ #define __Pyx_PySet_GET_SIZE(o) PySet_GET_SIZE(o)
+ #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_GET_SIZE(o)
+ #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_GET_SIZE(o)
+ #define __Pyx_PyUnicode_GET_LENGTH(o) PyUnicode_GET_LENGTH(o)
+#else
+ // These all need exception checks for -1.
#define __Pyx_PyTuple_GET_SIZE(o) TUPLE_GET_SIZE(o)
#define __Pyx_PyList_GET_SIZE(o) LIST_GET_SIZE(o)
#define __Pyx_PySet_GET_SIZE(o) PySet_Size(o)
#define __Pyx_PyBytes_GET_SIZE(o) PyBytes_Size(o)
#define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_Size(o)
+ #define __Pyx_PyUnicode_GET_LENGTH(o) PyUnicode_GetLength(o)
#endif
#if PY_VERSION_HEX >= 0x030d00A1
diff --git a/Cython/Utility/ObjectHandling.c b/Cython/Utility/ObjectHandling.c
index 4cceeb7a49f..cf042a79517 100644
--- a/Cython/Utility/ObjectHandling.c
+++ b/Cython/Utility/ObjectHandling.c
@@ -52,10 +52,16 @@ static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/
static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) {
if (t == Py_None) {
__Pyx_RaiseNoneNotIterableError();
- } else if (PyTuple_GET_SIZE(t) < index) {
- __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t));
} else {
- __Pyx_RaiseTooManyValuesError(index);
+ Py_ssize_t size = __Pyx_PyTuple_GET_SIZE(t);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(size < 0)) return;
+ #endif
+ if (size < index) {
+ __Pyx_RaiseNeedMoreValuesError(size);
+ } else {
+ __Pyx_RaiseTooManyValuesError(index);
+ }
}
}
@@ -79,12 +85,8 @@ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
/////////////// UnpackTuple2.proto ///////////////
-#define __Pyx_unpack_tuple2(tuple, value1, value2, is_tuple, has_known_size, decref_tuple) \
- (likely(is_tuple || PyTuple_Check(tuple)) ? \
- (likely(has_known_size || PyTuple_GET_SIZE(tuple) == 2) ? \
- __Pyx_unpack_tuple2_exact(tuple, value1, value2, decref_tuple) : \
- (__Pyx_UnpackTupleError(tuple, 2), -1)) : \
- __Pyx_unpack_tuple2_generic(tuple, value1, value2, has_known_size, decref_tuple))
+static CYTHON_INLINE int __Pyx_unpack_tuple2(
+ PyObject* tuple, PyObject** value1, PyObject** value2, int is_tuple, int has_known_size, int decref_tuple);
static CYTHON_INLINE int __Pyx_unpack_tuple2_exact(
PyObject* tuple, PyObject** value1, PyObject** value2, int decref_tuple);
@@ -96,6 +98,27 @@ static int __Pyx_unpack_tuple2_generic(
//@requires: UnpackTupleError
//@requires: RaiseNeedMoreValuesToUnpack
+static CYTHON_INLINE int __Pyx_unpack_tuple2(
+ PyObject* tuple, PyObject** value1, PyObject** value2, int is_tuple, int has_known_size, int decref_tuple) {
+ if (likely(is_tuple || PyTuple_Check(tuple))) {
+ Py_ssize_t size;
+ if (has_known_size) {
+ return __Pyx_unpack_tuple2_exact(tuple, value1, value2, decref_tuple);
+ }
+ size = __Pyx_PyTuple_GET_SIZE(tuple);
+ if (likely(size == 2)) {
+ return __Pyx_unpack_tuple2_exact(tuple, value1, value2, decref_tuple);
+ }
+ if (size >= 0) {
+ // "size == -1" indicates an error already.
+ __Pyx_UnpackTupleError(tuple, 2);
+ }
+ return -1;
+ } else {
+ return __Pyx_unpack_tuple2_generic(tuple, value1, value2, has_known_size, decref_tuple);
+ }
+}
+
static CYTHON_INLINE int __Pyx_unpack_tuple2_exact(
PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, int decref_tuple) {
PyObject *value1 = NULL, *value2 = NULL;
@@ -241,9 +264,10 @@ static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/
// detects an error that occurred in the iterator, it returns -1.
static CYTHON_INLINE int __Pyx_IterFinish(void) {
+ PyObject* exc_type;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
- PyObject* exc_type = __Pyx_PyErr_CurrentExceptionType();
+ exc_type = __Pyx_PyErr_CurrentExceptionType();
if (unlikely(exc_type)) {
if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))
return -1;
@@ -431,7 +455,7 @@ static PYOBJECT_TYPE __Pyx_GetItemInt_Generic(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT
static CYTHON_INLINE PYOBJECT_TYPE __Pyx_GetItemInt_{{type}}_Fast(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE o, API_SSIZE_T i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
-#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += Py{{type}}_GET_SIZE(o);
@@ -451,7 +475,7 @@ static CYTHON_INLINE PYOBJECT_TYPE __Pyx_GetItemInt_{{type}}_Fast(HPY_CONTEXT_FI
static CYTHON_INLINE PYOBJECT_TYPE __Pyx_GetItemInt_Fast(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE o, API_SSIZE_T i,
int is_list, CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
-#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
+#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
@@ -529,7 +553,7 @@ static int __Pyx_SetItemInt_Generic(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE o, P
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE o, API_SSIZE_T i, PYOBJECT_TYPE v, int is_list,
CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) {
-#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
+#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o));
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) {
@@ -978,7 +1002,7 @@ static PYOBJECT_TYPE __Pyx_CalculateMetaclass(HPY_CONTEXT_FIRST_ARG_DEF PYTYPEOB
static PYOBJECT_TYPE __Pyx_CalculateMetaclass(HPY_CONTEXT_FIRST_ARG_DEF PYTYPEOBJECT_TYPE metaclass, PYOBJECT_TYPE bases) {
API_SSIZE_T i, nbases;
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_SIZE
nbases = PyTuple_GET_SIZE(bases);
#else
nbases = TUPLE_GET_SIZE(bases);
@@ -1033,7 +1057,14 @@ static PyObject *__Pyx_FindInheritedMetaclass(PyObject *bases); /*proto*/
static PyObject *__Pyx_FindInheritedMetaclass(PyObject *bases) {
PyObject *metaclass;
- if (PyTuple_Check(bases) && PyTuple_GET_SIZE(bases) > 0) {
+ #if CYTHON_ASSUME_SAFE_SIZE
+ if (PyTuple_Check(bases) && PyTuple_GET_SIZE(bases) > 0)
+ #else
+ Py_ssize_t tuple_size = PyTuple_Check(bases) ? PyTuple_Size(bases) : 0;
+ if (unlikely(tuple_size < 0)) return NULL;
+ if (tuple_size > 0)
+ #endif
+ {
PyTypeObject *metatype;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
PyObject *base = PyTuple_GET_ITEM(bases, 0);
@@ -1134,7 +1165,7 @@ __Pyx_PEP560_update_bases(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE bases)
PYOBJECT_TYPE result, CAPI_IS_POINTER new_bases = API_NULL_VALUE;
/*assert(PyTuple_Check(bases));*/
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_SIZE
size_bases = PyTuple_GET_SIZE(bases);
#else
size_bases = TUPLE_GET_SIZE_SAFE(bases);
@@ -1216,7 +1247,7 @@ __Pyx_PEP560_update_bases(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE bases)
#endif
}
}
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_SIZE
j = PyList_GET_SIZE(new_bases);
#else
j = LIST_GET_SIZE_SAFE(new_bases);
@@ -1908,10 +1939,11 @@ typedef struct {
static PyObject *__Pyx_SelflessCall(PyObject *method, PyObject *args, PyObject *kwargs) {
// NOTE: possible optimization - use vectorcall
+ PyObject *result;
PyObject *selfless_args = PyTuple_GetSlice(args, 1, PyTuple_Size(args));
if (unlikely(!selfless_args)) return NULL;
- PyObject *result = PyObject_Call(method, selfless_args, kwargs);
+ result = PyObject_Call(method, selfless_args, kwargs);
Py_DECREF(selfless_args);
return result;
}
@@ -1982,7 +2014,9 @@ static int __Pyx_TryUnpackUnboundCMethod(HPY_CONTEXT_FIRST_ARG_DEF __Pyx_CachedC
/////////////// CallUnboundCMethod0.proto ///////////////
//@substitute: naming
+CYTHON_UNUSED
static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); /*proto*/
+
#if CYTHON_COMPILING_IN_CPYTHON && !CYTHON_USING_HPY
// FASTCALL methods receive "&empty_tuple" as simple "PyObject[0]*"
#define __Pyx_CallUnboundCMethod0(cfunc, self) \
@@ -2026,6 +2060,7 @@ static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObje
/////////////// CallUnboundCMethod1.proto ///////////////
#if CYTHON_USING_HPY
+CYTHON_UNUSED
static PYOBJECT_TYPE __Pyx__CallUnboundCMethod1(HPyContext *HPY_CONTEXT_CNAME, __Pyx_CachedCFunction *cfunc, PYOBJECT_TYPE self, PYOBJECT_TYPE arg);/*proto*/
#if CYTHON_COMPILING_IN_CPYTHON
@@ -2105,6 +2140,7 @@ static PYOBJECT_TYPE __Pyx__CallUnboundCMethod1(HPY_CONTEXT_FIRST_ARG_DEF __Pyx_
/////////////// CallUnboundCMethod2.proto ///////////////
+CYTHON_UNUSED
static PYOBJECT_TYPE __Pyx__CallUnboundCMethod2(HPY_CONTEXT_FIRST_ARG_DEF __Pyx_CachedCFunction *cfunc, PYOBJECT_TYPE self, PYOBJECT_TYPE arg1, PYOBJECT_TYPE arg2);
#if CYTHON_COMPILING_IN_CPYTHON
@@ -2255,7 +2291,7 @@ static CYTHON_INLINE PYOBJECT_TYPE __Pyx_PyObject_FastCallDict(PyObject *func, P
#endif
if (!kwargs) {
- #if CYTHON_VECTORCALL
+ #if CYTHON_VECTORCALL && !CYTHON_COMPILING_IN_LIMITED_API
#if PY_VERSION_HEX < 0x03090000
vectorcallfunc f = _PyVectorcall_Function(func);
#else
@@ -2283,6 +2319,70 @@ static CYTHON_INLINE PYOBJECT_TYPE __Pyx_PyObject_FastCallDict(PyObject *func, P
#endif
}
#endif
+/////////////// PyObjectVectorCallKwBuilder.proto ////////////////
+//@requires: PyObjectFastCall
+// For versions that define PyObject_Vectorcall, use PyObject_Vectorcall and define functions to build a kwnames tuple and add arguments to args.
+// For versions that don't, use __Pyx_PyObject_FastCallDict and functions to build a keyword dictionary
+
+CYTHON_UNUSED static int __Pyx_VectorcallBuilder_AddArg_Check(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n); /* proto */
+
+#if CYTHON_VECTORCALL
+#if __Pyx > 0x03080000
+#define __Pyx_Object_Vectorcall_CallFromBuilder PyObject_Vectorcall
+#else
+#define __Pyx_Object_Vectorcall_CallFromBuilder _PyObject_Vectorcall
+#endif
+
+#define __Pyx_MakeVectorcallBuilderKwds(n) PyTuple_New(n)
+
+static int __Pyx_VectorcallBuilder_AddArg(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n); /* proto */
+static int __Pyx_VectorcallBuilder_AddArgStr(const char *key, PyObject *value, PyObject *builder, PyObject **args, int n); /* proto */
+#else
+#define __Pyx_Object_Vectorcall_CallFromBuilder __Pyx_PyObject_FastCallDict
+
+#define __Pyx_MakeVectorcallBuilderKwds(n) PyDict_New()
+
+#define __Pyx_VectorcallBuilder_AddArg(key, value, builder, args, n) PyDict_SetItem(builder, key, value)
+#define __Pyx_VectorcallBuilder_AddArgStr(key, value, builder, args, n) PyDict_SetItemString(builder, key, value)
+#endif
+
+
+
+/////////////// PyObjectVectorCallKwBuilder ////////////////
+
+#if CYTHON_VECTORCALL
+static int __Pyx_VectorcallBuilder_AddArg(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n) {
+ (void)__Pyx_PyObject_FastCallDict;
+
+ if (unlikely(__Pyx_PyTuple_SET_ITEM(builder, n, key))) return -1;
+ Py_INCREF(key);
+ args[n] = value;
+ return 0;
+}
+
+CYTHON_UNUSED static int __Pyx_VectorcallBuilder_AddArg_Check(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n) {
+ (void)__Pyx_VectorcallBuilder_AddArgStr;
+ if (unlikely(!PyUnicode_Check(key))) {
+ PyErr_SetString(PyExc_TypeError, "keywords must be strings");
+ return -1;
+ }
+ return __Pyx_VectorcallBuilder_AddArg(key, value, builder, args, n);
+}
+
+static int __Pyx_VectorcallBuilder_AddArgStr(const char *key, PyObject *value, PyObject *builder, PyObject **args, int n) {
+ PyObject *pyKey = PyUnicode_FromString(key);
+ if (!pyKey) return -1;
+ return __Pyx_VectorcallBuilder_AddArg(pyKey, value, builder, args, n);
+}
+#else // CYTHON_VECTORCALL
+CYTHON_UNUSED static int __Pyx_VectorcallBuilder_AddArg_Check(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n) {
+ if (unlikely(!PyUnicode_Check(key))) {
+ PyErr_SetString(PyExc_TypeError, "keywords must be strings");
+ return -1;
+ }
+ return PyDict_SetItem(builder, key, value);
+}
+#endif
/////////////// PyObjectCallMethod0.proto ///////////////
@@ -2672,13 +2772,13 @@ static CYTHON_INLINE PYOBJECT_TYPE __Pyx_PyObject_CallNoArg(HPY_CONTEXT_FIRST_AR
/////////////// PyVectorcallFastCallDict.proto ///////////////
-#if CYTHON_METH_FASTCALL
+#if CYTHON_METH_FASTCALL && (CYTHON_VECTORCALL || CYTHON_BACKPORT_VECTORCALL)
static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw);
#endif
/////////////// PyVectorcallFastCallDict ///////////////
-#if CYTHON_METH_FASTCALL
+#if CYTHON_METH_FASTCALL && (CYTHON_VECTORCALL || CYTHON_BACKPORT_VECTORCALL)
// Slow path when kw is non-empty
static PyObject *__Pyx_PyVectorcall_FastCallDict_kw(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw)
{
diff --git a/Cython/Utility/Optimize.c b/Cython/Utility/Optimize.c
index 711b96d3ed4..83c2c343fe6 100644
--- a/Cython/Utility/Optimize.c
+++ b/Cython/Utility/Optimize.c
@@ -94,7 +94,7 @@ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
static CYTHON_INLINE PYOBJECT_TYPE __Pyx__PyObject_Pop(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE L); /*proto*/
-#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE
static CYTHON_INLINE PyObject* __Pyx_PyList_Pop(PyObject* L); /*proto*/
#define __Pyx_PyObject_Pop(L) (likely(PyList_CheckExact(L)) ? \
__Pyx_PyList_Pop(L) : __Pyx__PyObject_Pop(L))
@@ -116,7 +116,7 @@ static CYTHON_INLINE PYOBJECT_TYPE __Pyx__PyObject_Pop(HPY_CONTEXT_FIRST_ARG_DEF
return HPY_LEGACY_OBJECT_FROM(__Pyx_PyObject_CallMethod0(HPY_LEGACY_OBJECT_AS(L), HPY_LEGACY_OBJECT_AS(PYIDENT("pop"))));
}
-#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE
static CYTHON_INLINE PyObject* __Pyx_PyList_Pop(PyObject* L) {
/* Check that both the size is positive and no reallocation shrinking needs to be done. */
if (likely(PyList_GET_SIZE(L) > (((PyListObject*)L)->allocated >> 1))) {
@@ -146,7 +146,7 @@ static PYOBJECT_TYPE __Pyx__PyObject_PopIndex(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT
static PYOBJECT_TYPE __Pyx__PyObject_PopNewIndex(PYOBJECT_TYPE L, PYOBJECT_TYPE py_ix); /*proto*/
static PYOBJECT_TYPE __Pyx__PyObject_PopIndex(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE L, PYOBJECT_TYPE py_ix); /*proto*/
-#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE
static PyObject* __Pyx__PyList_PopIndex(PyObject* L, PyObject* py_ix, Py_ssize_t ix); /*proto*/
#define __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) ( \
@@ -192,7 +192,7 @@ static PYOBJECT_TYPE __Pyx__PyObject_PopIndex(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT
return retval;
}
-#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE
static PyObject* __Pyx__PyList_PopIndex(PyObject* L, PyObject* py_ix, Py_ssize_t ix) {
Py_ssize_t size = PyList_GET_SIZE(L);
if (likely(size > (((PyListObject*)L)->allocated >> 1))) {
@@ -286,17 +286,57 @@ static CYTHON_INLINE PyObject *__Pyx_PyDict_Pop(PyObject *d, PyObject *key, PyOb
/////////////// py_dict_pop ///////////////
static CYTHON_INLINE PyObject *__Pyx_PyDict_Pop(PyObject *d, PyObject *key, PyObject *default_value) {
-#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000
- if ((1)) {
- return _PyDict_Pop(d, key, default_value);
- } else
- // avoid "function unused" warnings
-#endif
+#if PY_VERSION_HEX >= 0x030d00A2
+ PyObject *value;
+ if (PyDict_Pop(d, key, &value) == 0) {
+ if (default_value) {
+ Py_INCREF(default_value);
+ } else {
+ PyErr_SetObject(PyExc_KeyError, key);
+ }
+ value = default_value;
+ }
+ // On error, PyDict_Pop() returns -1 and sets value to NULL (our own exception return value).
+ return value;
+#elif CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000
+ return _PyDict_Pop(d, key, default_value);
+#else
if (default_value) {
return CALL_UNBOUND_METHOD(PyDict_Type, "pop", d, key, default_value);
} else {
return CALL_UNBOUND_METHOD(PyDict_Type, "pop", d, key);
}
+#endif
+}
+
+
+/////////////// py_dict_pop_ignore.proto ///////////////
+
+static CYTHON_INLINE int __Pyx_PyDict_Pop_ignore(PyObject *d, PyObject *key, PyObject *default_value); /*proto*/
+
+/////////////// py_dict_pop_ignore ///////////////
+
+static CYTHON_INLINE int __Pyx_PyDict_Pop_ignore(PyObject *d, PyObject *key, PyObject *default_value) {
+ // We take the "default_value" as argument to avoid "unused" warnings, but we ignore it here.
+#if PY_VERSION_HEX >= 0x030d00A2
+ int result = PyDict_Pop(d, key, NULL);
+ CYTHON_UNUSED_VAR(default_value);
+ return (unlikely(result == -1)) ? -1 : 0;
+#else
+ PyObject *value;
+ CYTHON_UNUSED_VAR(default_value);
+
+ #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000
+ value = _PyDict_Pop(d, key, Py_None);
+ #else
+ value = CALL_UNBOUND_METHOD(PyDict_Type, "pop", d, key, Py_None);
+ #endif
+
+ if (unlikely(value == NULL))
+ return -1;
+ Py_DECREF(value);
+ return 0;
+#endif
}
@@ -384,8 +424,22 @@ static CYTHON_INLINE int __Pyx_dict_iter_next(
}
Py_INCREF(key);
Py_INCREF(value);
+ #if CYTHON_ASSUME_SAFE_MACROS
PyTuple_SET_ITEM(tuple, 0, key);
PyTuple_SET_ITEM(tuple, 1, value);
+ #else
+ if (unlikely(PyTuple_SetItem(tuple, 0, key) < 0)) {
+ // decref value; PyTuple_SetItem decrefs key on failure
+ Py_DECREF(value);
+ Py_DECREF(tuple);
+ return -1;
+ }
+ if (unlikely(PyTuple_SetItem(tuple, 1, value) < 0)) {
+ // PyTuple_SetItem decrefs value on failure
+ Py_DECREF(tuple);
+ return -1;
+ }
+ #endif
*pitem = tuple;
} else {
if (pkey) {
@@ -400,15 +454,33 @@ static CYTHON_INLINE int __Pyx_dict_iter_next(
return 1;
} else if (PyTuple_CheckExact(iter_obj)) {
Py_ssize_t pos = *ppos;
- if (unlikely(pos >= PyTuple_GET_SIZE(iter_obj))) return 0;
+ Py_ssize_t tuple_size = __Pyx_PyTuple_GET_SIZE(iter_obj);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(tuple_size < 0)) return -1;
+ #endif
+ if (unlikely(pos >= tuple_size)) return 0;
*ppos = pos + 1;
+ #if CYTHON_ASSUME_SAFE_MACROS
next_item = PyTuple_GET_ITEM(iter_obj, pos);
+ #else
+ next_item = PyTuple_GetItem(iter_obj, pos);
+ if (unlikely(!next_item)) return -1;
+ #endif
Py_INCREF(next_item);
} else if (PyList_CheckExact(iter_obj)) {
Py_ssize_t pos = *ppos;
- if (unlikely(pos >= PyList_GET_SIZE(iter_obj))) return 0;
+ Py_ssize_t list_size = __Pyx_PyList_GET_SIZE(iter_obj);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(list_size < 0)) return -1;
+ #endif
+ if (unlikely(pos >= list_size)) return 0;
*ppos = pos + 1;
+ #if CYTHON_ASSUME_SAFE_MACROS
next_item = PyList_GET_ITEM(iter_obj, pos);
+ #else
+ next_item = PyList_GetItem(iter_obj, pos);
+ if (unlikely(!next_item)) return -1;
+ #endif
Py_INCREF(next_item);
} else
#endif
@@ -603,7 +675,7 @@ static double __Pyx__PyObject_AsDouble(PyObject* obj); /* proto */
PyFloat_AsDouble(obj) : __Pyx__PyObject_AsDouble(obj))
#else
#define __Pyx_PyObject_AsDouble(obj) \
-((likely(PyFloat_CheckExact(obj))) ? PyFloat_AS_DOUBLE(obj) : \
+((likely(PyFloat_CheckExact(obj))) ? __Pyx_PyFloat_AS_DOUBLE(obj) : \
likely(PyLong_CheckExact(obj)) ? \
PyLong_AsDouble(obj) : __Pyx__PyObject_AsDouble(obj))
#endif
@@ -644,7 +716,7 @@ static double __Pyx__PyObject_AsDouble(PyObject* obj) {
}
#endif
if (likely(float_value)) {
- double value = PyFloat_AS_DOUBLE(float_value);
+ double value = __Pyx_PyFloat_AS_DOUBLE(float_value);
Py_DECREF(float_value);
return value;
}
@@ -818,7 +890,7 @@ static double __Pyx__PyBytes_AsDouble(PyObject *obj, const char* start, Py_ssize
static CYTHON_INLINE double __Pyx_PyBytes_AsDouble(PyObject *obj) {
char* as_c_string;
Py_ssize_t size;
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE
as_c_string = PyBytes_AS_STRING(obj);
size = PyBytes_GET_SIZE(obj);
#else
@@ -831,7 +903,7 @@ static CYTHON_INLINE double __Pyx_PyBytes_AsDouble(PyObject *obj) {
static CYTHON_INLINE double __Pyx_PyByteArray_AsDouble(PyObject *obj) {
char* as_c_string;
Py_ssize_t size;
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE
as_c_string = PyByteArray_AS_STRING(obj);
size = PyByteArray_GET_SIZE(obj);
#else
@@ -850,11 +922,7 @@ static CYTHON_INLINE double __Pyx_PyByteArray_AsDouble(PyObject *obj) {
static double __Pyx_SlowPyString_AsDouble(PyObject *obj) {
PyObject *float_value = PyFloat_FromString(obj);
if (likely(float_value)) {
-#if CYTHON_ASSUME_SAFE_MACROS
- double value = PyFloat_AS_DOUBLE(float_value);
-#else
- double value = PyFloat_AsDouble(float_value);
-#endif
+ double value = __Pyx_PyFloat_AS_DOUBLE(float_value);
Py_DECREF(float_value);
return value;
}
@@ -1101,11 +1169,7 @@ static CYTHON_INLINE {{c_ret_type}} __Pyx_PyInt_{{'' if ret_type.is_pyobject els
if (FLOAT_CHECK_EXACT({{pyval}})) {
const long {{'a' if order == 'CObj' else 'b'}} = intval;
-#if CYTHON_COMPILING_IN_LIMITED_API
- double {{ival}} = __pyx_PyFloat_AsDouble({{pyval}});
-#else
- double {{ival}} = PyFloat_AS_DOUBLE({{pyval}});
-#endif
+ double {{ival}} = __Pyx_PyFloat_AS_DOUBLE({{pyval}});
{{return_compare('(double)a', '(double)b', c_op)}}
}
@@ -1327,11 +1391,7 @@ static {{c_ret_type}} {{cfunc_name}}(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE op1
{{if c_op in '+-*' or op in ('TrueDivide', 'Eq', 'Ne')}}
if (FLOAT_CHECK_EXACT({{pyval}})) {
const long {{'a' if order == 'CObj' else 'b'}} = intval;
-#if CYTHON_COMPILING_IN_LIMITED_API
- double {{ival}} = __pyx_PyFloat_AsDouble({{pyval}});
-#else
- double {{ival}} = PyFloat_AS_DOUBLE({{pyval}});
-#endif
+ double {{ival}} = __Pyx_PyFloat_AS_DOUBLE({{pyval}});
{{if op in ('Eq', 'Ne')}}
if ((double)a {{c_op}} (double)b) {
{{return_true}};
@@ -1411,11 +1471,7 @@ static {{c_ret_type}} {{cfunc_name}}(PyObject *op1, PyObject *op2, double floatv
{{endif}}
if (likely(PyFloat_CheckExact({{pyval}}))) {
-#if CYTHON_COMPILING_IN_LIMITED_API
- {{fval}} = __pyx_PyFloat_AsDouble({{pyval}});
-#else
- {{fval}} = PyFloat_AS_DOUBLE({{pyval}});
-#endif
+ {{fval}} = __Pyx_PyFloat_AS_DOUBLE({{pyval}});
{{zerodiv_check(fval)}}
} else
diff --git a/Cython/Utility/StringTools.c b/Cython/Utility/StringTools.c
index 7d92c8bd110..d3e65e38c3c 100644
--- a/Cython/Utility/StringTools.c
+++ b/Cython/Utility/StringTools.c
@@ -79,75 +79,13 @@ static CYTHON_INLINE int __Pyx_UnicodeContainsUCS4(PyObject* unicode, Py_UCS4 ch
//////////////////// PyUCS4InUnicode ////////////////////
-#if PY_VERSION_HEX < 0x03090000 || (defined(PyUnicode_WCHAR_KIND) && defined(PyUnicode_AS_UNICODE))
-
-#if PY_VERSION_HEX < 0x03090000
-#define __Pyx_PyUnicode_AS_UNICODE(op) PyUnicode_AS_UNICODE(op)
-#define __Pyx_PyUnicode_GET_SIZE(op) PyUnicode_GET_SIZE(op)
-#else
-// Avoid calling deprecated C-API functions in Py3.9+ that PEP-623 schedules for removal in Py3.12.
-// https://www.python.org/dev/peps/pep-0623/
-#define __Pyx_PyUnicode_AS_UNICODE(op) (((PyASCIIObject *)(op))->wstr)
-#define __Pyx_PyUnicode_GET_SIZE(op) ((PyCompactUnicodeObject *)(op))->wstr_length
-#endif
-
-#if !defined(Py_UNICODE_SIZE) || Py_UNICODE_SIZE == 2
-static int __Pyx_PyUnicodeBufferContainsUCS4_SP(Py_UNICODE* buffer, Py_ssize_t length, Py_UCS4 character) {
- /* handle surrogate pairs for Py_UNICODE buffers in 16bit Unicode builds */
- Py_UNICODE high_val, low_val;
- Py_UNICODE* pos;
- high_val = (Py_UNICODE) (0xD800 | (((character - 0x10000) >> 10) & ((1<<10)-1)));
- low_val = (Py_UNICODE) (0xDC00 | ( (character - 0x10000) & ((1<<10)-1)));
- for (pos=buffer; pos < buffer+length-1; pos++) {
- if (unlikely((high_val == pos[0]) & (low_val == pos[1]))) return 1;
- }
- return 0;
-}
-#endif
-
-static int __Pyx_PyUnicodeBufferContainsUCS4_BMP(Py_UNICODE* buffer, Py_ssize_t length, Py_UCS4 character) {
- Py_UNICODE uchar;
- Py_UNICODE* pos;
- uchar = (Py_UNICODE) character;
- for (pos=buffer; pos < buffer+length; pos++) {
- if (unlikely(uchar == pos[0])) return 1;
- }
- return 0;
-}
-#endif
-
static CYTHON_INLINE int __Pyx_UnicodeContainsUCS4(PyObject* unicode, Py_UCS4 character) {
- const int kind = PyUnicode_KIND(unicode);
- #ifdef PyUnicode_WCHAR_KIND
- if (likely(kind != PyUnicode_WCHAR_KIND))
- #endif
- {
- Py_ssize_t i;
- const void* udata = PyUnicode_DATA(unicode);
- const Py_ssize_t length = PyUnicode_GET_LENGTH(unicode);
- for (i=0; i < length; i++) {
- if (unlikely(character == PyUnicode_READ(kind, udata, i))) return 1;
- }
- return 0;
- }
-
-#if PY_VERSION_HEX < 0x03090000 || (defined(PyUnicode_WCHAR_KIND) && defined(PyUnicode_AS_UNICODE))
-#if !defined(Py_UNICODE_SIZE) || Py_UNICODE_SIZE == 2
- if ((sizeof(Py_UNICODE) == 2) && unlikely(character > 65535)) {
- return __Pyx_PyUnicodeBufferContainsUCS4_SP(
- __Pyx_PyUnicode_AS_UNICODE(unicode),
- __Pyx_PyUnicode_GET_SIZE(unicode),
- character);
- } else
-#endif
- {
- return __Pyx_PyUnicodeBufferContainsUCS4_BMP(
- __Pyx_PyUnicode_AS_UNICODE(unicode),
- __Pyx_PyUnicode_GET_SIZE(unicode),
- character);
-
- }
-#endif
+ // Note that from Python 3.7, the indices of FindChar are adjusted to match the bounds
+ // so need to check the length
+ Py_ssize_t idx = PyUnicode_FindChar(unicode, character, 0, PY_SSIZE_T_MAX, 1);
+ if (unlikely(idx == -2)) return -1;
+ // >= 0: found the index, == -1: not found
+ return idx >= 0;
}
@@ -197,13 +135,22 @@ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(HPY_CONTEXT_FIRST_ARG_DEF PYOBJE
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
if (s1_is_unicode & s2_is_unicode) {
- Py_ssize_t length;
+ Py_ssize_t length, length2;
int kind;
void *data1, *data2;
+ #if !CYTHON_COMPILING_IN_LIMITED_API
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
+ #endif
length = __Pyx_PyUnicode_GET_LENGTH(s1);
- if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(length < 0)) return -1;
+ #endif
+ length2 = __Pyx_PyUnicode_GET_LENGTH(s2);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(length2 < 0)) return -1;
+ #endif
+ if (length != length2) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
@@ -260,7 +207,7 @@ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int eq
//@requires: IncludeStringH
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
-#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API
+#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API || !(CYTHON_ASSUME_SAFE_SIZE && CYTHON_ASSUME_SAFE_MACROS)
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
@@ -323,7 +270,10 @@ static CYTHON_INLINE int __Pyx_GetItemInt_ByteArray_Fast(PyObject* string, Py_ss
int wraparound, int boundscheck) {
Py_ssize_t length;
if (wraparound | boundscheck) {
- length = PyByteArray_GET_SIZE(string);
+ length = __Pyx_PyByteArray_GET_SIZE(string);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(length < 0)) return -1;
+ #endif
if (wraparound & unlikely(i < 0)) i += length;
if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) {
return (unsigned char) (PyByteArray_AS_STRING(string)[i]);
@@ -353,7 +303,10 @@ static CYTHON_INLINE int __Pyx_SetItemInt_ByteArray_Fast(PyObject* string, Py_ss
int wraparound, int boundscheck) {
Py_ssize_t length;
if (wraparound | boundscheck) {
- length = PyByteArray_GET_SIZE(string);
+ length = __Pyx_PyByteArray_GET_SIZE(string);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(length < 0)) return -1;
+ #endif
if (wraparound & unlikely(i < 0)) i += length;
if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) {
PyByteArray_AS_STRING(string)[i] = (char) v;
@@ -387,6 +340,9 @@ static CYTHON_INLINE Py_UCS4 __Pyx_GetItemInt_Unicode_Fast(PyObject* ustring, Py
if (unlikely(__Pyx_PyUnicode_READY(ustring) < 0)) return (Py_UCS4)-1;
if (wraparound | boundscheck) {
length = __Pyx_PyUnicode_GET_LENGTH(ustring);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(length < 0)) return (Py_UCS4)-1;
+ #endif
if (wraparound & unlikely(i < 0)) i += length;
if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) {
return __Pyx_PyUnicode_READ_CHAR(ustring, i);
@@ -518,7 +474,7 @@ static CYTHON_INLINE PyObject* __Pyx_decode_bytes(
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
char* as_c_string;
Py_ssize_t size;
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE
as_c_string = PyBytes_AS_STRING(string);
size = PyBytes_GET_SIZE(string);
#else
@@ -540,7 +496,7 @@ static CYTHON_INLINE PyObject* __Pyx_decode_bytearray(
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
char* as_c_string;
Py_ssize_t size;
-#if CYTHON_ASSUME_SAFE_MACROS
+#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE
as_c_string = PyByteArray_AS_STRING(string);
size = PyByteArray_GET_SIZE(string);
#else
@@ -563,8 +519,13 @@ static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Substring(
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Substring(
PyObject* text, Py_ssize_t start, Py_ssize_t stop) {
Py_ssize_t length;
+ #if !CYTHON_COMPILING_IN_LIMITED_API
if (unlikely(__Pyx_PyUnicode_READY(text) == -1)) return NULL;
+ #endif
length = __Pyx_PyUnicode_GET_LENGTH(text);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(length < 0)) return NULL;
+ #endif
if (start < 0) {
start += length;
if (start < 0)
@@ -578,8 +539,13 @@ static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Substring(
return __Pyx_NewRef($empty_unicode);
if (start == 0 && stop == length)
return __Pyx_NewRef(text);
+#if CYTHON_COMPILING_IN_LIMITED_API
+ // PyUnicode_Substring() does not support negative indexing but is otherwise fine to use.
+ return PyUnicode_Substring(text, start, stop);
+#else
return PyUnicode_FromKindAndData(PyUnicode_KIND(text),
PyUnicode_1BYTE_DATA(text) + start*PyUnicode_KIND(text), stop-start);
+#endif
}
@@ -637,7 +603,10 @@ static int __Pyx_PyUnicode_Tailmatch(
static int __Pyx_PyUnicode_TailmatchTuple(PyObject* s, PyObject* substrings,
Py_ssize_t start, Py_ssize_t end, int direction) {
- Py_ssize_t i, count = PyTuple_GET_SIZE(substrings);
+ Py_ssize_t i, count = __Pyx_PyTuple_GET_SIZE(substrings);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(count < 0)) return -1;
+ #endif
for (i = 0; i < count; i++) {
Py_ssize_t result;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
@@ -685,9 +654,16 @@ static int __Pyx_PyBytes_SingleTailmatch(PyObject* self, PyObject* arg,
Py_buffer view;
view.obj = NULL;
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(self_len < 0)) return -1;
+ #endif
+
if (PyBytes_Check(arg)) {
sub_ptr = PyBytes_AS_STRING(arg);
sub_len = PyBytes_GET_SIZE(arg);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(sub_len < 0)) return -1;
+ #endif
} else {
if (unlikely(PyObject_GetBuffer(self, &view, PyBUF_SIMPLE) == -1))
return -1;
@@ -726,6 +702,9 @@ static int __Pyx_PyBytes_SingleTailmatch(PyObject* self, PyObject* arg,
static int __Pyx_PyBytes_TailmatchTuple(PyObject* self, PyObject* substrings,
Py_ssize_t start, Py_ssize_t end, int direction) {
Py_ssize_t i, count = PyTuple_GET_SIZE(substrings);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(count < 0)) return -1;
+ #endif
for (i = 0; i < count; i++) {
int result;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
@@ -781,10 +760,18 @@ static CYTHON_INLINE char __Pyx_PyBytes_GetItemInt(PyObject* bytes, Py_ssize_t i
/////////////// bytes_index ///////////////
static CYTHON_INLINE char __Pyx_PyBytes_GetItemInt(PyObject* bytes, Py_ssize_t index, int check_bounds) {
- if (index < 0)
- index += PyBytes_GET_SIZE(bytes);
+ if (index < 0) {
+ Py_ssize_t size = __Pyx_PyBytes_GET_SIZE(bytes);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(size < 0)) return (char) -1;
+ #endif
+ index += size;
+ }
if (check_bounds) {
- Py_ssize_t size = PyBytes_GET_SIZE(bytes);
+ Py_ssize_t size = __Pyx_PyBytes_GET_SIZE(bytes);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(size < 0)) return (char) -1;
+ #endif
if (unlikely(!__Pyx_is_valid_index(index, size))) {
PyErr_SetString(PyExc_IndexError, "string index out of range");
return (char) -1;
@@ -816,21 +803,22 @@ static CYTHON_INLINE PyObject* __Pyx_PyBytes_Join(PyObject* sep, PyObject* value
/////////////// JoinPyUnicode.proto ///////////////
-static PYOBJECT_TYPE __Pyx_PyUnicode_Join(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE value_tuple, API_SSIZE_T value_count,
- API_SSIZE_T result_ulength, Py_UCS4 max_char);
+static PYOBJECT_TYPE __Pyx_PyUnicode_Join(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE *values, API_SSIZE_T value_count,
+ API_SSIZE_T result_ulength, Py_UCS4 max_char);
/////////////// JoinPyUnicode ///////////////
//@requires: IncludeStringH
//@substitute: naming
-static PYOBJECT_TYPE __Pyx_PyUnicode_Join(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE value_tuple, API_SSIZE_T value_count,
- API_SSIZE_T result_ulength, Py_UCS4 max_char) {
+static PYOBJECT_TYPE __Pyx_PyUnicode_Join(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYPE *values, API_SSIZE_T value_count,
+ API_SSIZE_T result_ulength, Py_UCS4 max_char) {
#if CYTHON_USE_UNICODE_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
PyObject *result_uval;
int result_ukind, kind_shift;
Py_ssize_t i, char_pos;
void *result_udata;
+ if (max_char > 1114111) max_char = 1114111;
result_uval = PyUnicode_New(result_ulength, max_char);
if (unlikely(!result_uval)) return NULL;
result_ukind = (max_char <= 255) ? PyUnicode_1BYTE_KIND : (max_char <= 65535) ? PyUnicode_2BYTE_KIND : PyUnicode_4BYTE_KIND;
@@ -838,15 +826,23 @@ static PYOBJECT_TYPE __Pyx_PyUnicode_Join(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYP
result_udata = PyUnicode_DATA(result_uval);
assert(kind_shift == 2 || kind_shift == 1 || kind_shift == 0);
+ if (unlikely((PY_SSIZE_T_MAX >> kind_shift) - result_ulength < 0))
+ goto overflow;
+
char_pos = 0;
for (i=0; i < value_count; i++) {
int ukind;
Py_ssize_t ulength;
void *udata;
- PyObject *uval = PyTuple_GET_ITEM(value_tuple, i);
+ PyObject *uval = values[i];
+ #if !CYTHON_COMPILING_IN_LIMITED_API
if (unlikely(__Pyx_PyUnicode_READY(uval)))
goto bad;
+ #endif
ulength = __Pyx_PyUnicode_GET_LENGTH(uval);
+ #if !CYTHON_ASSUME_SAFE_SIZE
+ if (unlikely(ulength < 0)) goto bad;
+ #endif
if (unlikely(!ulength))
continue;
if (unlikely((PY_SSIZE_T_MAX >> kind_shift) - ulength < char_pos))
@@ -878,13 +874,35 @@ static PYOBJECT_TYPE __Pyx_PyUnicode_Join(HPY_CONTEXT_FIRST_ARG_DEF PYOBJECT_TYP
return NULL;
#else
// non-CPython fallback
+ API_SSIZE_T i;
+#if CYTHON_USING_HPY
+ PYOBJECT_TYPE result = PYOBJECT_GLOBAL_LOAD($empty_unicode);
+ PYOBJECT_TYPE value_tuple = API_NULL_VALUE;
+ for (i=0; i= 0)
#define __Pyx_PyLong_IsZero(x) (Py_SIZE(x) == 0)
@@ -512,7 +514,7 @@ static CYTHON_INLINE {{struct_type_decl}} {{funcname}}(PyObject *);
/////////////// FromPyCTupleUtility ///////////////
-#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+#if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE && !CYTHON_AVOID_BORROWED_REFS
static void __Pyx_tuple_{{funcname}}(PyObject * o, {{struct_type_decl}} *result) {
{{for ix, component in enumerate(components):}}
{{py:attr = "result->f%s" % ix}}
@@ -567,7 +569,7 @@ static void __Pyx_seq_{{funcname}}(PyObject * o, {{struct_type_decl}} *result) {
static CYTHON_INLINE {{struct_type_decl}} {{funcname}}(PyObject * o) {
{{struct_type_decl}} result;
- #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ #if CYTHON_ASSUME_SAFE_MACROS && CYTHON_ASSUME_SAFE_SIZE && !CYTHON_AVOID_BORROWED_REFS
if (likely(PyTuple_Check(o) && PyTuple_GET_SIZE(o) == {{size}})) {
__Pyx_tuple_{{funcname}}(o, &result);
} else if (likely(PyList_Check(o) && PyList_GET_SIZE(o) == {{size}})) {
@@ -589,15 +591,16 @@ static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject*);
/////////////// UnicodeAsUCS4 ///////////////
static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject* x) {
- Py_ssize_t length;
- length = PyUnicode_GET_LENGTH(x);
- if (likely(length == 1)) {
- return PyUnicode_READ_CHAR(x, 0);
- }
- PyErr_Format(PyExc_ValueError,
- "only single character unicode strings can be converted to Py_UCS4, "
- "got length %" CYTHON_FORMAT_SSIZE_T "d", length);
- return (Py_UCS4)-1;
+ Py_ssize_t length = __Pyx_PyUnicode_GET_LENGTH(x);
+ if (likely(length == 1)) {
+ return __Pyx_PyUnicode_READ_CHAR(x, 0);
+ } else if (likely(length >= 0)) {
+ // "length == -1" indicates an error already.
+ PyErr_Format(PyExc_ValueError,
+ "only single character unicode strings can be converted to Py_UCS4, "
+ "got length %" CYTHON_FORMAT_SSIZE_T "d", length);
+ }
+ return (Py_UCS4)-1;
}
@@ -646,10 +649,14 @@ static CYTHON_INLINE Py_UNICODE __Pyx_PyObject_AsPy_UNICODE(PyObject* x) {
const long maxval = 1114111;
#endif
if (PyUnicode_Check(x)) {
- if (unlikely(__Pyx_PyUnicode_GET_LENGTH(x) != 1)) {
- PyErr_Format(PyExc_ValueError,
- "only single character unicode strings can be converted to Py_UNICODE, "
- "got length %" CYTHON_FORMAT_SSIZE_T "d", __Pyx_PyUnicode_GET_LENGTH(x));
+ Py_ssize_t length = __Pyx_PyUnicode_GET_LENGTH(x);
+ if (unlikely(length != 1)) {
+ // -1 indicates an error.
+ if (length >= 0) {
+ PyErr_Format(PyExc_ValueError,
+ "only single character unicode strings can be converted to Py_UNICODE, "
+ "got length %" CYTHON_FORMAT_SSIZE_T "d", length);
+ }
return (Py_UNICODE)-1;
}
ival = PyUnicode_READ_CHAR(x, 0);
@@ -678,6 +685,7 @@ static CYTHON_INLINE PYOBJECT_TYPE {{TO_PY_FUNCTION}}(HPY_CONTEXT_FIRST_ARG_DEF
/////////////// CIntToPy ///////////////
//@requires: GCCDiagnostics
+//@requires: ObjectHandling.c::PyObjectVectorCallKwBuilder
static CYTHON_INLINE PYOBJECT_TYPE {{TO_PY_FUNCTION}}(HPY_CONTEXT_FIRST_ARG_DEF {{TYPE}} value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
@@ -728,6 +736,7 @@ static CYTHON_INLINE PYOBJECT_TYPE {{TO_PY_FUNCTION}}(HPY_CONTEXT_FIRST_ARG_DEF
if (API_IS_NULL(py_bytes)) goto limited_bad;
// I'm deliberately not using PYIDENT here because this code path is very unlikely
// to ever run so it seems a pessimization mostly.
+#if CYTHON_USING_HPY
order_str = PYOBJECT_UNICODE_FROM_STRING(little ? "little" : "big");
if (API_IS_NULL(order_str)) goto limited_bad;
arg_tuple = TUPLE_PACK(2, py_bytes, order_str);
@@ -739,6 +748,19 @@ static CYTHON_INLINE PYOBJECT_TYPE {{TO_PY_FUNCTION}}(HPY_CONTEXT_FIRST_ARG_DEF
if (DICT_SET_ITEM_STR(kwds, "signed", __Pyx_hNewRef(API_TRUE))) goto limited_bad;
}
result = API_CALL_TUPLE_DICT(from_bytes, arg_tuple, kwds);
+#else
+ order_str = PyUnicode_FromString(little ? "little" : "big");
+ if (!order_str) goto limited_bad;
+ {
+ PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
+ if (!is_unsigned) {
+ kwds = __Pyx_MakeVectorcallBuilderKwds(1);
+ if (!kwds) goto limited_bad;
+ if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
+ }
+ result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
+ }
+#endif
limited_bad:
PYOBJECT_XCLOSEREF(from_bytes);
diff --git a/Cython/Utility/UFuncs_C.c b/Cython/Utility/UFuncs_C.c
index e7ce8812ed8..2115e4b2b52 100644
--- a/Cython/Utility/UFuncs_C.c
+++ b/Cython/Utility/UFuncs_C.c
@@ -19,7 +19,7 @@
// getter functions because we can't forward-declare arrays
static PyUFuncGenericFunction* {{ufunc_funcs_name}}(void); /* proto */
static char* {{ufunc_types_name}}(void); /* proto */
-static void* {{ufunc_data_name}}[] = {NULL}; // always null
+static void* {{ufunc_data_name}}[] = {NULL}; /* always null */
/////////////////////// UFuncConsts /////////////////////////
diff --git a/Makefile b/Makefile
index ab714dfb2c3..ab17516c1ab 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
PACKAGENAME=Cython
-PYTHON?=python
+PYTHON?=python3
TESTOPTS?=
REPO = git://github.com/cython/cython.git
VERSION?=$(shell sed -ne 's|^__version__\s*=\s*"\([^"]*\)".*|\1|p' Cython/Shadow.py)
@@ -33,11 +33,11 @@ sdist: dist/$(PACKAGENAME)-$(VERSION).tar.gz
dist/$(PACKAGENAME)-$(VERSION).tar.gz:
$(PYTHON) setup.py sdist
-pywheel: dist/$(PACKAGENAME)-$(VERSION)-py2.py3-none-any.whl
+pywheel: dist/$(PACKAGENAME)-$(VERSION)-py3-none-any.whl
-dist/$(PACKAGENAME)-$(VERSION)-py2.py3-none-any.whl:
- ${PYTHON} setup.py bdist_wheel --no-cython-compile --universal
- [ -f "$@" ] # check that we generated the expected universal wheel
+dist/$(PACKAGENAME)-$(VERSION)-py3-none-any.whl:
+ ${PYTHON} setup.py bdist_wheel --no-cython-compile
+ [ -f "$@" ] # check that we generated the expected Py3-only wheel
TMPDIR = .repo_tmp
.git: .gitrev
diff --git a/Tools/make_dataclass_tests.py b/Tools/make_dataclass_tests.py
index 7ad32427f9f..2dec4211928 100644
--- a/Tools/make_dataclass_tests.py
+++ b/Tools/make_dataclass_tests.py
@@ -49,12 +49,17 @@
("TestKeywordArgs", "test_KW_ONLY_twice"),
("TestKeywordArgs", "test_defaults"),
# uses local variable in class definition
+ # Also: difficulty lining up correct repr string when converting tests
("TestCase", "test_default_factory"),
+ # Also: Mock unassignable to list - legitimate for Cython to raise an error
("TestCase", "test_default_factory_with_no_init"),
+ # Also: attributes not available on class itself, only instances
("TestCase", "test_field_default"),
("TestCase", "test_function_annotations"),
("TestDescriptors", "test_lookup_on_instance"),
+ # Also: Mock unassignable to int - legitimate for Cython to raise an error
("TestCase", "test_default_factory_not_called_if_value_given"),
+ # Also: cdef classes never don't have the attribute
("TestCase", "test_class_attrs"),
("TestCase", "test_hash_field_rules"),
("TestStringAnnotations",), # almost all the texts here use local variables
@@ -79,6 +84,7 @@
"test_class_var_frozen",
), # __annotations__ not present on cdef classes https://github.com/cython/cython/issues/4519
("TestCase", "test_dont_include_other_annotations"), # __annotations__
+ ("TestCase", "test_class_marker"), # __annotations__
("TestDocString",), # don't think cython dataclasses currently set __doc__
# either cython.dataclasses.field or cython.dataclasses.dataclass called directly as functions
# (will probably never be supported)
@@ -162,8 +168,6 @@
("TestReplace", "test_initvar_with_default_value"), # needs investigating
# Maybe bugs?
# ==========
- # non-default argument 'z' follows default argument in dataclass __init__ - this message looks right to me!
- ("TestCase", "test_class_marker"),
# cython.dataclasses.field parameter 'metadata' must be a literal value - possibly not something we can support?
("TestCase", "test_field_metadata_custom_mapping"),
(
diff --git a/doc-requirements.txt b/doc-requirements.txt
index fe0a95fae23..70ac73b6b84 100644
--- a/doc-requirements.txt
+++ b/doc-requirements.txt
@@ -1,5 +1,112 @@
-sphinx==4.5.0
+sphinx==7.2.6
sphinx-issues==3.0.1
-sphinx-tabs==3.4.0
-Jinja2==3.0.3
-jupyter
+sphinx-tabs==3.4.4
+Jinja2==3.1.3
+jupyter==1.0.0
+# automatic requirements from pip freeze below:
+alabaster==0.7.16
+anyio==4.2.0
+argon2-cffi==23.1.0
+argon2-cffi-bindings==21.2.0
+arrow==1.3.0
+asttokens==2.4.1
+async-lru==2.0.4
+attrs==23.2.0
+Babel==2.14.0
+beautifulsoup4==4.12.2
+bleach==6.1.0
+certifi==2023.11.17
+cffi==1.16.0
+charset-normalizer==3.3.2
+comm==0.2.1
+debugpy==1.8.0
+decorator==5.1.1
+defusedxml==0.7.1
+docutils==0.18.1
+exceptiongroup==1.2.0
+executing==2.0.1
+fastjsonschema==2.19.1
+fqdn==1.5.1
+idna==3.6
+imagesize==1.4.1
+importlib-metadata==7.0.1
+ipykernel==6.28.0
+ipython==8.18.1
+ipywidgets==8.1.1
+isoduration==20.11.0
+jedi==0.19.1
+json5==0.9.14
+jsonpointer==2.4
+jsonschema==4.20.0
+jsonschema-specifications==2023.12.1
+jupyter-console==6.6.3
+jupyter-events==0.9.0
+jupyter-lsp==2.2.1
+jupyter_client==8.6.0
+jupyter_core==5.7.1
+jupyter_server==2.12.4
+jupyter_server_terminals==0.5.1
+jupyterlab==4.0.10
+jupyterlab-widgets==3.0.9
+jupyterlab_pygments==0.3.0
+jupyterlab_server==2.25.2
+MarkupSafe==2.1.3
+matplotlib-inline==0.1.6
+mistune==3.0.2
+nbclient==0.9.0
+nbconvert==7.14.1
+nbformat==5.9.2
+nest-asyncio==1.5.8
+notebook==7.0.6
+notebook_shim==0.2.3
+overrides==7.4.0
+packaging==23.2
+pandocfilters==1.5.0
+parso==0.8.3
+pexpect==4.9.0
+platformdirs==4.1.0
+prometheus-client==0.19.0
+prompt-toolkit==3.0.43
+psutil==5.9.7
+ptyprocess==0.7.0
+pure-eval==0.2.2
+pycparser==2.21
+Pygments==2.17.2
+python-dateutil==2.8.2
+python-json-logger==2.0.7
+PyYAML==6.0.1
+pyzmq==25.1.2
+qtconsole==5.5.1
+QtPy==2.4.1
+referencing==0.32.1
+requests==2.31.0
+rfc3339-validator==0.1.4
+rfc3986-validator==0.1.1
+rpds-py==0.17.1
+Send2Trash==1.8.2
+six==1.16.0
+sniffio==1.3.0
+snowballstemmer==2.2.0
+soupsieve==2.5
+sphinxcontrib-applehelp==1.0.8
+sphinxcontrib-devhelp==1.0.6
+sphinxcontrib-htmlhelp==2.0.5
+sphinxcontrib-jsmath==1.0.1
+sphinxcontrib-qthelp==1.0.7
+sphinxcontrib-serializinghtml==1.1.10
+stack-data==0.6.3
+terminado==0.18.0
+tinycss2==1.2.1
+tomli==2.0.1
+tornado==6.4
+traitlets==5.14.1
+types-python-dateutil==2.8.19.20240106
+typing_extensions==4.9.0
+uri-template==1.3.0
+urllib3==2.1.0
+wcwidth==0.2.13
+webcolors==1.13
+webencodings==0.5.1
+websocket-client==1.7.0
+widgetsnbextension==4.0.9
+zipp==3.17.0
diff --git a/docs/examples/quickstart/build/hello.pyx b/docs/examples/quickstart/build/hello.pyx
index da1b827ac9a..a5cf55e7ec4 100644
--- a/docs/examples/quickstart/build/hello.pyx
+++ b/docs/examples/quickstart/build/hello.pyx
@@ -1,2 +1,2 @@
def say_hello_to(name):
- print("Hello %s!" % name)
+ print(f"Hello {name}!")
diff --git a/docs/examples/tutorial/clibraries/test_queue.py b/docs/examples/tutorial/clibraries/test_queue.py
index 41b267395cf..d42c75cb99f 100644
--- a/docs/examples/tutorial/clibraries/test_queue.py
+++ b/docs/examples/tutorial/clibraries/test_queue.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
import time
import queue
diff --git a/docs/examples/userguide/extension_types/cheesy.py b/docs/examples/userguide/extension_types/cheesy.py
index 0995c399356..d0b59d8237c 100644
--- a/docs/examples/userguide/extension_types/cheesy.py
+++ b/docs/examples/userguide/extension_types/cheesy.py
@@ -10,7 +10,7 @@ def __cinit__(self):
@property
def cheese(self):
- return "We don't have: %s" % self.cheeses
+ return f"We don't have: {self.cheeses}"
@cheese.setter
def cheese(self, value):
diff --git a/docs/examples/userguide/extension_types/cheesy.pyx b/docs/examples/userguide/extension_types/cheesy.pyx
index 2859d848fd8..f697e41ac4a 100644
--- a/docs/examples/userguide/extension_types/cheesy.pyx
+++ b/docs/examples/userguide/extension_types/cheesy.pyx
@@ -10,7 +10,7 @@ cdef class CheeseShop:
@property
def cheese(self):
- return "We don't have: %s" % self.cheeses
+ return f"We don't have: {self.cheeses}"
@cheese.setter
def cheese(self, value):
diff --git a/docs/examples/userguide/extension_types/my_module.pyx b/docs/examples/userguide/extension_types/my_module.pyx
index fb0701c12f0..95bb2e15169 100644
--- a/docs/examples/userguide/extension_types/my_module.pyx
+++ b/docs/examples/userguide/extension_types/my_module.pyx
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
cdef class Shrubbery:
def __init__(self, w, h):
diff --git a/docs/examples/userguide/extension_types/shrubbery.pyx b/docs/examples/userguide/extension_types/shrubbery.pyx
index 8c4e587764b..1b2d883bbbe 100644
--- a/docs/examples/userguide/extension_types/shrubbery.pyx
+++ b/docs/examples/userguide/extension_types/shrubbery.pyx
@@ -1,4 +1,3 @@
-from __future__ import print_function
cdef class Shrubbery:
cdef int width
cdef int height
diff --git a/docs/examples/userguide/language_basics/cdef_block.pyx b/docs/examples/userguide/language_basics/cdef_block.pyx
index c0c30302921..4753f8cd710 100644
--- a/docs/examples/userguide/language_basics/cdef_block.pyx
+++ b/docs/examples/userguide/language_basics/cdef_block.pyx
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
cdef:
struct Spam:
int tons
diff --git a/docs/examples/userguide/language_basics/compile_time.pyx b/docs/examples/userguide/language_basics/compile_time.pyx
index f302dd241b5..d05c2a264c6 100644
--- a/docs/examples/userguide/language_basics/compile_time.pyx
+++ b/docs/examples/userguide/language_basics/compile_time.pyx
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
DEF FavouriteFood = u"spam"
DEF ArraySize = 42
DEF OtherArraySize = 2 * ArraySize + 17
diff --git a/docs/examples/userguide/language_basics/optional_subclassing.py b/docs/examples/userguide/language_basics/optional_subclassing.py
index 480ae100b65..1f2d25d458a 100644
--- a/docs/examples/userguide/language_basics/optional_subclassing.py
+++ b/docs/examples/userguide/language_basics/optional_subclassing.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
@cython.cclass
class A:
@cython.cfunc
diff --git a/docs/examples/userguide/language_basics/optional_subclassing.pyx b/docs/examples/userguide/language_basics/optional_subclassing.pyx
index b2a3d4decca..1eef3affd0e 100644
--- a/docs/examples/userguide/language_basics/optional_subclassing.pyx
+++ b/docs/examples/userguide/language_basics/optional_subclassing.pyx
@@ -1,6 +1,4 @@
-from __future__ import print_function
-
-
+
cdef class A:
cdef foo(self):
diff --git a/docs/examples/userguide/language_basics/override.py b/docs/examples/userguide/language_basics/override.py
index f9e0be83fa7..540f79429de 100644
--- a/docs/examples/userguide/language_basics/override.py
+++ b/docs/examples/userguide/language_basics/override.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
@cython.cclass
class A:
@cython.cfunc
diff --git a/docs/examples/userguide/language_basics/override.pyx b/docs/examples/userguide/language_basics/override.pyx
index 1a7ceefb70b..0e57c43ca7e 100644
--- a/docs/examples/userguide/language_basics/override.pyx
+++ b/docs/examples/userguide/language_basics/override.pyx
@@ -1,6 +1,4 @@
-from __future__ import print_function
-
-
+
cdef class A:
cdef foo(self):
diff --git a/docs/examples/userguide/language_basics/parameter_refcount.py b/docs/examples/userguide/language_basics/parameter_refcount.py
index 2b25915ba90..bcca63906c2 100644
--- a/docs/examples/userguide/language_basics/parameter_refcount.py
+++ b/docs/examples/userguide/language_basics/parameter_refcount.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
from cython.cimports.cpython.ref import PyObject
import sys
diff --git a/docs/examples/userguide/language_basics/parameter_refcount.pyx b/docs/examples/userguide/language_basics/parameter_refcount.pyx
index 6fe3ffadd58..f42e9c391a0 100644
--- a/docs/examples/userguide/language_basics/parameter_refcount.pyx
+++ b/docs/examples/userguide/language_basics/parameter_refcount.pyx
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
from cpython.ref cimport PyObject
import sys
diff --git a/docs/examples/userguide/memoryviews/quickstart.py b/docs/examples/userguide/memoryviews/quickstart.py
index 4f31185b27f..cc575489026 100644
--- a/docs/examples/userguide/memoryviews/quickstart.py
+++ b/docs/examples/userguide/memoryviews/quickstart.py
@@ -14,7 +14,7 @@
cyarr_view = cython.declare(cython.int[:, :, :], cyarr)
# Show the sum of all the arrays before altering it
-print("NumPy sum of the NumPy array before assignments: %s" % narr.sum())
+print(f"NumPy sum of the NumPy array before assignments: {narr.sum()}")
# We can copy the values from one memoryview into another using a single
# statement, by either indexing with ... or (NumPy-style) with a colon.
@@ -28,7 +28,7 @@
cyarr_view[0, 0, 0] = 1000
# Assigning into the memoryview on the NumPy array alters the latter
-print("NumPy sum of NumPy array after assignments: %s" % narr.sum())
+print(f"NumPy sum of NumPy array after assignments: {narr.sum()}")
# A function using a memoryview does not usually need the GIL
@cython.nogil
@@ -52,8 +52,8 @@ def sum3d(arr: cython.int[:, :, :]) -> cython.int:
# A function accepting a memoryview knows how to use a NumPy array,
# a C array, a Cython array...
-print("Memoryview sum of NumPy array is %s" % sum3d(narr))
-print("Memoryview sum of C array is %s" % sum3d(carr))
-print("Memoryview sum of Cython array is %s" % sum3d(cyarr))
+print(f"Memoryview sum of NumPy array is {sum3d(narr)}")
+print(f"Memoryview sum of C array is {sum3d(carr)}")
+print(f"Memoryview sum of Cython array is {sum3d(cyarr)}")
# ... and of course, a memoryview.
-print("Memoryview sum of C memoryview is %s" % sum3d(carr_view))
+print(f"Memoryview sum of C memoryview is {sum3d(carr_view)}")
diff --git a/docs/examples/userguide/memoryviews/quickstart.pyx b/docs/examples/userguide/memoryviews/quickstart.pyx
index 0ba012b1a1b..73a3e4cdb71 100644
--- a/docs/examples/userguide/memoryviews/quickstart.pyx
+++ b/docs/examples/userguide/memoryviews/quickstart.pyx
@@ -14,7 +14,7 @@ cyarr = cvarray(shape=(3, 3, 3), itemsize=sizeof(int), format="i")
cdef int [:, :, :] cyarr_view = cyarr
# Show the sum of all the arrays before altering it
-print("NumPy sum of the NumPy array before assignments: %s" % narr.sum())
+print(f"NumPy sum of the NumPy array before assignments: {narr.sum()}")
# We can copy the values from one memoryview into another using a single
# statement, by either indexing with ... or (NumPy-style) with a colon.
@@ -28,7 +28,7 @@ carr_view[0, 0, 0] = 100
cyarr_view[0, 0, 0] = 1000
# Assigning into the memoryview on the NumPy array alters the latter
-print("NumPy sum of NumPy array after assignments: %s" % narr.sum())
+print(f"NumPy sum of NumPy array after assignments: {narr.sum()}")
# A function using a memoryview does not usually need the GIL
cpdef int sum3d(int[:, :, :] arr) nogil:
@@ -52,8 +52,8 @@ cpdef int sum3d(int[:, :, :] arr) nogil:
# A function accepting a memoryview knows how to use a NumPy array,
# a C array, a Cython array...
-print("Memoryview sum of NumPy array is %s" % sum3d(narr))
-print("Memoryview sum of C array is %s" % sum3d(carr))
-print("Memoryview sum of Cython array is %s" % sum3d(cyarr))
+print(f"Memoryview sum of NumPy array is {sum3d(narr)}")
+print(f"Memoryview sum of C array is {sum3d(carr)}")
+print(f"Memoryview sum of Cython array is {sum3d(cyarr)}")
# ... and of course, a memoryview.
-print("Memoryview sum of C memoryview is %s" % sum3d(carr_view))
+print(f"Memoryview sum of C memoryview is {sum3d(carr_view)}")
diff --git a/docs/examples/userguide/numpy_tutorial/compute_fused_types.py b/docs/examples/userguide/numpy_tutorial/compute_fused_types.py
new file mode 100644
index 00000000000..4fcc6382561
--- /dev/null
+++ b/docs/examples/userguide/numpy_tutorial/compute_fused_types.py
@@ -0,0 +1,45 @@
+# cython: infer_types=True
+import numpy as np
+import cython
+
+my_type = cython.fused_type(cython.int, cython.double, cython.longlong)
+
+
+
+@cython.exceptval(check=False)
+@cython.cfunc
+def clip(a: my_type, min_value: my_type, max_value: my_type) -> my_type:
+ return min(max(a, min_value), max_value)
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def compute(array_1: my_type[:, ::1], array_2: my_type[:, ::1], a: my_type, b: my_type, c: my_type):
+
+ x_max = array_1.shape[0]
+ y_max = array_1.shape[1]
+
+ assert tuple(array_1.shape) == tuple(array_2.shape)
+
+ if my_type is cython.int:
+ dtype = np.intc
+ elif my_type is cython.double:
+ dtype = np.double
+ elif my_type is cython.longlong:
+ dtype = np.longlong
+
+ result = np.zeros((x_max, y_max), dtype=dtype)
+ result_view: my_type[:, ::1] = result
+
+ tmp: my_type
+ x: cython.Py_ssize_t
+ y: cython.Py_ssize_t
+
+ for x in range(x_max):
+ for y in range(y_max):
+
+ tmp = clip(array_1[x, y], 2, 10)
+ tmp = tmp * a + array_2[x, y] * b
+ result_view[x, y] = tmp + c
+
+ return result
diff --git a/docs/examples/userguide/numpy_tutorial/compute_fused_types.pyx b/docs/examples/userguide/numpy_tutorial/compute_fused_types.pyx
index af5ef9071af..cdecdef630f 100644
--- a/docs/examples/userguide/numpy_tutorial/compute_fused_types.pyx
+++ b/docs/examples/userguide/numpy_tutorial/compute_fused_types.pyx
@@ -34,6 +34,7 @@ def compute(my_type[:, ::1] array_1, my_type[:, ::1] array_2, my_type a, my_type
cdef my_type tmp
cdef Py_ssize_t x, y
+
for x in range(x_max):
for y in range(y_max):
diff --git a/docs/examples/userguide/numpy_tutorial/compute_infer_types.py b/docs/examples/userguide/numpy_tutorial/compute_infer_types.py
new file mode 100644
index 00000000000..416e0b9d580
--- /dev/null
+++ b/docs/examples/userguide/numpy_tutorial/compute_infer_types.py
@@ -0,0 +1,36 @@
+# cython: infer_types=True
+import numpy as np
+import cython
+
+DTYPE = np.intc
+
+@cython.cfunc
+def clip(a: cython.int, min_value: cython.int, max_value: cython.int) -> cython.int:
+ return min(max(a, min_value), max_value)
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def compute(array_1: cython.int[:, ::1], array_2: cython.int[:, ::1],
+ a: cython.int, b: cython.int, c: cython.int):
+
+ x_max = array_1.shape[0]
+ y_max = array_1.shape[1]
+
+ assert tuple(array_1.shape) == tuple(array_2.shape)
+
+ result = np.zeros((x_max, y_max), dtype=DTYPE)
+ result_view: cython.int[:, ::1] = result
+
+ tmp: cython.int
+ x: cython.Py_ssize_t
+ y: cython.Py_ssize_t
+
+ for x in range(x_max):
+ for y in range(y_max):
+
+ tmp = clip(array_1[x, y], 2, 10)
+ tmp = tmp * a + array_2[x, y] * b
+ result_view[x, y] = tmp + c
+
+ return result
diff --git a/docs/examples/userguide/numpy_tutorial/compute_infer_types.pyx b/docs/examples/userguide/numpy_tutorial/compute_infer_types.pyx
index 3882c289dd5..b17eb139edf 100644
--- a/docs/examples/userguide/numpy_tutorial/compute_infer_types.pyx
+++ b/docs/examples/userguide/numpy_tutorial/compute_infer_types.pyx
@@ -13,6 +13,7 @@ cdef int clip(int a, int min_value, int max_value):
@cython.wraparound(False)
def compute(int[:, ::1] array_1, int[:, ::1] array_2, int a, int b, int c):
+
x_max = array_1.shape[0]
y_max = array_1.shape[1]
@@ -24,6 +25,7 @@ def compute(int[:, ::1] array_1, int[:, ::1] array_2, int a, int b, int c):
cdef int tmp
cdef Py_ssize_t x, y
+
for x in range(x_max):
for y in range(y_max):
diff --git a/docs/examples/userguide/numpy_tutorial/compute_memview.py b/docs/examples/userguide/numpy_tutorial/compute_memview.py
new file mode 100644
index 00000000000..1706b661100
--- /dev/null
+++ b/docs/examples/userguide/numpy_tutorial/compute_memview.py
@@ -0,0 +1,37 @@
+import numpy as np
+import cython
+
+DTYPE = np.intc
+
+@cython.cfunc
+def clip(a: cython.int, min_value: cython.int, max_value: cython.int) -> cython.int:
+ return min(max(a, min_value), max_value)
+
+
+def compute(array_1: cython.int[:, :], array_2: cython.int[:, :],
+ a: cython.int, b: cython.int, c: cython.int):
+
+ x_max: cython.Py_ssize_t = array_1.shape[0]
+ y_max: cython.Py_ssize_t = array_1.shape[1]
+
+ # array_1.shape is now a C array, no it's not possible
+ # to compare it simply by using == without a for-loop.
+ # To be able to compare it to array_2.shape easily,
+ # we convert them both to Python tuples.
+ assert tuple(array_1.shape) == tuple(array_2.shape)
+
+ result = np.zeros((x_max, y_max), dtype=DTYPE)
+ result_view: cython.int[:, :] = result
+
+ tmp: cython.int
+ x: cython.Py_ssize_t
+ y: cython.Py_ssize_t
+
+ for x in range(x_max):
+ for y in range(y_max):
+
+ tmp = clip(array_1[x, y], 2, 10)
+ tmp = tmp * a + array_2[x, y] * b
+ result_view[x, y] = tmp + c
+
+ return result
diff --git a/docs/examples/userguide/numpy_tutorial/compute_memview.pyx b/docs/examples/userguide/numpy_tutorial/compute_memview.pyx
index 166cd6df3f8..084bea37336 100644
--- a/docs/examples/userguide/numpy_tutorial/compute_memview.pyx
+++ b/docs/examples/userguide/numpy_tutorial/compute_memview.pyx
@@ -1,5 +1,6 @@
import numpy as np
+
DTYPE = np.intc
@@ -9,6 +10,7 @@ cdef int clip(int a, int min_value, int max_value):
def compute(int[:, :] array_1, int[:, :] array_2, int a, int b, int c):
+
cdef Py_ssize_t x_max = array_1.shape[0]
cdef Py_ssize_t y_max = array_1.shape[1]
@@ -24,6 +26,7 @@ def compute(int[:, :] array_1, int[:, :] array_2, int a, int b, int c):
cdef int tmp
cdef Py_ssize_t x, y
+
for x in range(x_max):
for y in range(y_max):
diff --git a/docs/examples/userguide/numpy_tutorial/compute_prange.py b/docs/examples/userguide/numpy_tutorial/compute_prange.py
new file mode 100644
index 00000000000..68fc8a11f5b
--- /dev/null
+++ b/docs/examples/userguide/numpy_tutorial/compute_prange.py
@@ -0,0 +1,52 @@
+# tag: openmp
+
+# distutils: extra_compile_args=-fopenmp
+# distutils: extra_link_args=-fopenmp
+
+import numpy as np
+import cython
+from cython.parallel import prange
+
+my_type = cython.fused_type(cython.int, cython.double, cython.longlong)
+
+
+# We declare our plain c function nogil
+@cython.exceptval(check=False)
+@cython.nogil
+@cython.cfunc
+def clip(a: my_type, min_value: my_type, max_value: my_type) -> my_type:
+ return min(max(a, min_value), max_value)
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def compute(array_1: my_type[:, ::1], array_2: my_type[:, ::1], a: my_type, b: my_type, c: my_type):
+
+ x_max: cython.Py_ssize_t = array_1.shape[0]
+ y_max: cython.Py_ssize_t = array_1.shape[1]
+
+ assert tuple(array_1.shape) == tuple(array_2.shape)
+
+ if my_type is cython.int:
+ dtype = np.intc
+ elif my_type is cython.double:
+ dtype = np.double
+ elif my_type is cython.longlong:
+ dtype = np.longlong
+
+ result = np.zeros((x_max, y_max), dtype=dtype)
+ result_view: my_type[:, ::1] = result
+
+ tmp: my_type
+ x: cython.Py_ssize_t
+ y: cython.Py_ssize_t
+
+ # We use prange here.
+ for x in prange(x_max, nogil=True):
+ for y in range(y_max):
+
+ tmp = clip(array_1[x, y], 2, 10)
+ tmp = tmp * a + array_2[x, y] * b
+ result_view[x, y] = tmp + c
+
+ return result
diff --git a/docs/examples/userguide/numpy_tutorial/compute_prange.pyx b/docs/examples/userguide/numpy_tutorial/compute_prange.pyx
index 562c7307093..45f6525930a 100644
--- a/docs/examples/userguide/numpy_tutorial/compute_prange.pyx
+++ b/docs/examples/userguide/numpy_tutorial/compute_prange.pyx
@@ -1,6 +1,4 @@
# tag: openmp
-# You can ignore the previous line.
-# It's for internal testing of the cython documentation.
# distutils: extra_compile_args=-fopenmp
# distutils: extra_link_args=-fopenmp
@@ -16,7 +14,7 @@ ctypedef fused my_type:
# We declare our plain c function nogil
-cdef my_type clip(my_type a, my_type min_value, my_type max_value) nogil:
+cdef my_type clip(my_type a, my_type min_value, my_type max_value) noexcept nogil:
return min(max(a, min_value), max_value)
@@ -42,6 +40,7 @@ def compute(my_type[:, ::1] array_1, my_type[:, ::1] array_2, my_type a, my_type
cdef my_type tmp
cdef Py_ssize_t x, y
+
# We use prange here.
for x in prange(x_max, nogil=True):
for y in range(y_max):
diff --git a/docs/examples/userguide/numpy_tutorial/compute_typed.py b/docs/examples/userguide/numpy_tutorial/compute_typed.py
new file mode 100644
index 00000000000..b6d90c564a9
--- /dev/null
+++ b/docs/examples/userguide/numpy_tutorial/compute_typed.py
@@ -0,0 +1,53 @@
+import numpy as np
+import cython
+# We now need to fix a datatype for our arrays. I've used the variable
+# DTYPE for this, which is assigned to the usual NumPy runtime
+# type info object.
+DTYPE = np.intc
+
+# @cython.cfunc means here that this function is a plain C function (so faster).
+# To get all the benefits, we type the arguments and the return value.
+@cython.exceptval(check=False)
+@cython.cfunc
+def clip(a: cython.int, min_value: cython.int, max_value: cython.int) -> cython.int:
+ return min(max(a, min_value), max_value)
+
+
+def compute(array_1, array_2, a: cython.int, b: cython.int, c: cython.int):
+
+ # Annotation is also used within functions to type variables. It
+ # can only be used at the top indentation level (there are non-trivial
+ # problems with allowing them in other places, though we'd love to see
+ # good and thought out proposals for it).
+ x_max: cython.Py_ssize_t = array_1.shape[0]
+ y_max: cython.Py_ssize_t = array_1.shape[1]
+
+ assert array_1.shape == array_2.shape
+ assert array_1.dtype == DTYPE
+ assert array_2.dtype == DTYPE
+
+ result = np.zeros((x_max, y_max), dtype=DTYPE)
+
+ # It is very important to type ALL your variables. You do not get any
+ # warnings if not, only much slower code (they are implicitly typed as
+ # Python objects).
+ # For the "tmp" variable, we want to use the same data type as is
+ # stored in the array, so we use int because it correspond to np.intc.
+ # NB! An important side-effect of this is that if "tmp" overflows its
+ # datatype size, it will simply wrap around like in C, rather than raise
+ # an error like in Python.
+
+ tmp: cython.int
+
+ # cython.Py_ssize_t is the proper C type for Python array indices.
+ x: cython.Py_ssize_t
+ y: cython.Py_ssize_t
+
+ for x in range(x_max):
+ for y in range(y_max):
+
+ tmp = clip(array_1[x, y], 2, 10)
+ tmp = tmp * a + array_2[x, y] * b
+ result[x, y] = tmp + c
+
+ return result
diff --git a/docs/examples/userguide/numpy_tutorial/compute_typed.pyx b/docs/examples/userguide/numpy_tutorial/compute_typed.pyx
index cccc1aa3b2d..a78589d4ebe 100644
--- a/docs/examples/userguide/numpy_tutorial/compute_typed.pyx
+++ b/docs/examples/userguide/numpy_tutorial/compute_typed.pyx
@@ -11,6 +11,8 @@ cdef int clip(int a, int min_value, int max_value):
return min(max(a, min_value), max_value)
+
+
def compute(array_1, array_2, int a, int b, int c):
# The "cdef" keyword is also used within functions to type variables. It
@@ -40,6 +42,7 @@ def compute(array_1, array_2, int a, int b, int c):
# Py_ssize_t is the proper C type for Python array indices.
cdef Py_ssize_t x, y
+
for x in range(x_max):
for y in range(y_max):
diff --git a/docs/examples/userguide/numpy_tutorial/numpy_and_cython.ipynb b/docs/examples/userguide/numpy_tutorial/numpy_and_cython.ipynb
index e2fc5877343..5d79001a1ae 100644
--- a/docs/examples/userguide/numpy_tutorial/numpy_and_cython.ipynb
+++ b/docs/examples/userguide/numpy_tutorial/numpy_and_cython.ipynb
@@ -37,7 +37,6 @@
}
],
"source": [
- "from __future__ import print_function\n",
"%load_ext cython\n",
"import Cython\n",
"print(Cython.__version__)"
@@ -762,7 +761,7 @@
"\n",
"\n",
"# We declare our plain c function nogil\n",
- "cdef my_type clip(my_type a, my_type min_value, my_type max_value) nogil:\n",
+ "cdef my_type clip(my_type a, my_type min_value, my_type max_value) noexcept nogil:\n",
" return min(max(a, min_value), max_value)\n",
"\n",
"\n",
diff --git a/docs/examples/userguide/parallelism/condition_sum.py b/docs/examples/userguide/parallelism/condition_sum.py
new file mode 100644
index 00000000000..88d7d2cca1b
--- /dev/null
+++ b/docs/examples/userguide/parallelism/condition_sum.py
@@ -0,0 +1,14 @@
+from cython.parallel import prange
+
+def psum(n: cython.int):
+
+ i: cython.int
+ sum: cython.int = 0
+
+ for i in prange(n, nogil=True, use_threads_if=n>1000):
+ sum += i
+
+ return sum
+
+psum(30) # Executed sequentially
+psum(10000) # Executed in parallel
diff --git a/docs/examples/userguide/parallelism/condition_sum.pyx b/docs/examples/userguide/parallelism/condition_sum.pyx
new file mode 100644
index 00000000000..c926d16f369
--- /dev/null
+++ b/docs/examples/userguide/parallelism/condition_sum.pyx
@@ -0,0 +1,14 @@
+from cython.parallel import prange
+
+def psum(int n):
+
+ cdef int i
+ cdef int sum = 0
+
+ for i in prange(n, nogil=True, use_threads_if=n>1000):
+ sum += i
+
+ return sum
+
+psum(30) # Executed sequentially
+psum(10000) # Executed in parallel
diff --git a/docs/src/tutorial/numpy.rst b/docs/src/tutorial/numpy.rst
index a3a7d01f450..e1eddaf8170 100644
--- a/docs/src/tutorial/numpy.rst
+++ b/docs/src/tutorial/numpy.rst
@@ -9,6 +9,11 @@ Working with NumPy
below, have less overhead, and can be passed around without requiring the GIL.
They should be preferred to the syntax presented in this page.
See :ref:`Cython for NumPy users `.
+
+.. NOTE:: There is currently no way to usefully specify Numpy arrays using
+ Python-style annotations and we do not currently plan to add one.
+ If you want to use annotation typing then we recommend using
+ typed memoryviews instead.
You can use NumPy from Cython exactly the same as in regular Python, but by
doing so you are losing potentially high speedups because Cython has support
diff --git a/docs/src/tutorial/pure.rst b/docs/src/tutorial/pure.rst
index 32a7fa0cae8..438741ac8d1 100644
--- a/docs/src/tutorial/pure.rst
+++ b/docs/src/tutorial/pure.rst
@@ -364,6 +364,11 @@ Note the use of ``cython.int`` rather than ``int`` - Cython does not translate
an ``int`` annotation to a C integer by default since the behaviour can be
quite different with respect to overflow and division.
+Annotations on global variables are currently ignored. This is because we expect
+annotation-typed code to be in majority written for Python, and global type annotations
+would turn the Python variable into an internal C variable, thus removing it from the
+module dict. To declare global variables as typed C variables, use ``@cython.declare()``.
+
Annotations can be combined with the ``@cython.exceptval()`` decorator for non-Python
return types:
diff --git a/docs/src/userguide/compute_typed_html.jpg b/docs/src/userguide/compute_typed_html.jpg
deleted file mode 100644
index a1e00657341..00000000000
Binary files a/docs/src/userguide/compute_typed_html.jpg and /dev/null differ
diff --git a/docs/src/userguide/compute_typed_py_html.png b/docs/src/userguide/compute_typed_py_html.png
new file mode 100644
index 00000000000..a75173111d5
Binary files /dev/null and b/docs/src/userguide/compute_typed_py_html.png differ
diff --git a/docs/src/userguide/compute_typed_pyx_html.png b/docs/src/userguide/compute_typed_pyx_html.png
new file mode 100644
index 00000000000..58ae6907f04
Binary files /dev/null and b/docs/src/userguide/compute_typed_pyx_html.png differ
diff --git a/docs/src/userguide/extension_types.rst b/docs/src/userguide/extension_types.rst
index 09720c36ca6..41c99b581ad 100644
--- a/docs/src/userguide/extension_types.rst
+++ b/docs/src/userguide/extension_types.rst
@@ -658,6 +658,8 @@ definition, for example,::
# attributes and methods
+.. _freelist:
+
Fast instantiation
===================
@@ -1034,8 +1036,6 @@ objects defined in the Python core or in a non-Cython extension module.
Here is an example which will let you get at the C-level members of the
built-in complex object::
- from __future__ import print_function
-
cdef extern from "complexobject.h":
struct Py_complex:
diff --git a/docs/src/userguide/external_C_code.rst b/docs/src/userguide/external_C_code.rst
index 6bd20dc9268..4abb53f4701 100644
--- a/docs/src/userguide/external_C_code.rst
+++ b/docs/src/userguide/external_C_code.rst
@@ -498,6 +498,8 @@ file consists of the full dotted name of the module, e.g. a module called
the resulting ``.so`` file like a dynamic library.
Beware that this is not portable, so it should be avoided.
+.. _CYTHON_EXTERN_C:
+
C++ public declarations
^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/src/userguide/language_basics.rst b/docs/src/userguide/language_basics.rst
index f45d6e12bae..a4b990146e9 100644
--- a/docs/src/userguide/language_basics.rst
+++ b/docs/src/userguide/language_basics.rst
@@ -117,18 +117,20 @@ the declaration in most cases:
f: cython.float = 2.5
g: cython.int[4] = [1, 2, 3, 4]
h: cython.p_float = cython.address(f)
+ c: cython.doublecomplex = 2 + 3j
.. group-tab:: Cython
.. code-block:: cython
- cdef int a_global_variable
+ cdef int a_global_variable = 42
def func():
cdef int i = 10, j, k
cdef float f = 2.5
cdef int[4] g = [1, 2, 3, 4]
cdef float *h = &f
+ cdef double complex c = 2 + 3j
.. note::
@@ -289,7 +291,64 @@ Types
The Cython language uses the normal C syntax for C types, including pointers. It provides
all the standard C types, namely ``char``, ``short``, ``int``, ``long``,
``long long`` as well as their ``unsigned`` versions,
-e.g. ``unsigned int`` (``cython.uint`` in Python code).
+e.g. ``unsigned int`` (``cython.uint`` in Python code):
+
+
+.. list-table:: Numeric Types
+ :widths: 25 25
+ :header-rows: 1
+
+ * - Cython type
+ - Pure Python type
+
+ * - ``bint``
+ - ``cython.bint``
+ * - ``char``
+ - ``cython.char``
+ * - ``signed char``
+ - ``cython.schar``
+ * - ``unsigned char``
+ - ``cython.uchar``
+ * - ``short``
+ - ``cython.short``
+ * - ``unsigned short``
+ - ``cython.ushort``
+ * - ``int``
+ - ``cython.int``
+ * - ``unsigned int``
+ - ``cython.uint``
+ * - ``long``
+ - ``cython.long``
+ * - ``unsigned long``
+ - ``cython.ulong``
+ * - ``long long``
+ - ``cython.longlong``
+ * - ``unsigned long long``
+ - ``cython.ulonglong``
+ * - ``float``
+ - ``cython.float``
+ * - ``double``
+ - ``cython.double``
+ * - ``long double``
+ - ``cython.longdouble``
+ * - ``float complex``
+ - ``cython.floatcomplex``
+ * - ``double complex``
+ - ``cython.doublecomplex``
+ * - ``long double complex``
+ - ``cython.longdoublecomplex``
+ * - ``size_t``
+ - ``cython.size_t``
+ * - ``Py_ssize_t``
+ - ``cython.Py_ssize_t``
+ * - ``Py_hash_t``
+ - ``cython.Py_hash_t``
+ * - ``Py_UCS4``
+ - ``cython.Py_UCS4``
+
+.. note::
+ Additional types are declared in the `stdint pxd file `_.
+
The special ``bint`` type is used for C boolean values (``int`` with 0/non-0
values for False/True) and ``Py_ssize_t`` for (signed) sizes of Python
containers.
@@ -300,7 +359,6 @@ use a naming scheme with "p"s instead, separated from the type name with an unde
a pointer to a C int. Further pointer types can be constructed with the ``cython.pointer()`` function,
e.g. ``cython.pointer(cython.int)``.
-
Arrays use the normal C array syntax, e.g. ``int[10]``, and the size must be known
at compile time for stack allocated arrays. Cython doesn't support variable length arrays from C99.
Note that Cython uses array access for pointer dereferencing, as ``*x`` is not valid Python syntax,
@@ -1452,6 +1510,7 @@ if the corresponding definition file also defines that type.
and classes from each other without the Python overhead. To read more about
what how to do that, you can see :ref:`pxd_files`.
+.. _definition_file:
The definition file
-------------------
@@ -1480,6 +1539,7 @@ wants to access :keyword:`cdef` attributes and methods, or to inherit from
presence in a definition file does that. You only need a public
declaration if you want to make something available to external C code.
+.. _include_statement:
The include statement and include files
---------------------------------------
diff --git a/docs/src/userguide/migrating_to_cy30.rst b/docs/src/userguide/migrating_to_cy30.rst
index 77c768a6ecb..f427e84dc94 100644
--- a/docs/src/userguide/migrating_to_cy30.rst
+++ b/docs/src/userguide/migrating_to_cy30.rst
@@ -247,6 +247,11 @@ any Python object for ``x``), unless the language level is explicitly
set to 2. To mitigate the effect, Cython 3.0 still accepts both Python
``int`` and ``long`` values under Python 2.x.
+One potential issue you may encounter is that types like ``typing.List``
+are now understood in annotations (where previously they were ignored)
+and are interpreted to mean *exact* ``list``. This is stricter than
+the interpretation specified in PEP-484, which also allows subclasses.
+
To make it easier to handle cases where your interpretation of type
annotations differs from Cython's, Cython 3 now supports setting the
``annotation_typing`` :ref:`directive ` on a
diff --git a/docs/src/userguide/numpy_tutorial.rst b/docs/src/userguide/numpy_tutorial.rst
index d2828ebaba4..03db4cd1aab 100644
--- a/docs/src/userguide/numpy_tutorial.rst
+++ b/docs/src/userguide/numpy_tutorial.rst
@@ -6,6 +6,9 @@
Cython for NumPy users
**************************
+.. include::
+ ../two-syntax-variants-used
+
This tutorial is aimed at NumPy users who have no experience with Cython at
all. If you have some knowledge of Cython you may want to skip to the
''Efficient indexing'' section.
@@ -108,8 +111,13 @@ then execute :
This will install the newest Cython into SAGE.
+.. _numpy_compilation:
+
+Compilation
+===========
+
Manual compilation
-====================
+------------------
As it is always important to know what is going on, I'll describe the manual
method here. First Cython is run:
@@ -143,6 +151,60 @@ in your Cython code.
This creates :file:`yourmod.so` in the same directory, which is importable by
Python by using a normal ``import yourmod`` statement.
+
+Compilation using setuptools
+----------------------------
+
+Setuptools allows us to create setup.py file to automate compilation of both Cython files and generated C files.::
+
+ from setuptools import Extension, setup
+ from Cython.Build import cythonize
+ import numpy
+
+ extensions = [
+ Extension("*", ["*.pyx"],
+ include_dirs=[numpy.get_include()]),
+ ]
+ setup(
+ name="My hello app",
+ ext_modules=cythonize(extensions),
+ )
+
+The path to the NumPy headers is passed to the C compiler via the ``include_dirs=[numpy.get_include()]`` parameter.
+
+.. note::
+
+ Using memoryviews or importing NumPy with ``import numpy`` does not mean that
+ you have to add the path to NumPy include files. You need to add this path only
+ if you use ``cimport numpy``.
+
+Despite this, you may still get warnings like the following from the compiler,
+because Cython is not disabling the usage of the old deprecated Numpy API::
+
+ .../include/numpy/npy_1_7_deprecated_api.h:15:2: warning: #warning "Using deprecated NumPy API, disable it by " "#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" [-Wcpp]
+
+In Cython 3.0, you can get rid of this warning by defining the C macro
+``NPY_NO_DEPRECATED_API`` as ``NPY_1_7_API_VERSION``
+in your build, e.g.::
+
+ # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION
+
+or (see below)::
+
+ Extension(
+ ...,
+ define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")],
+ )
+
+With older Cython releases, setting this macro will fail the C compilation,
+because Cython generates code that uses this deprecated C-API. However, the
+warning has no negative effects even in recent NumPy versions.
+You can ignore it until you (or your library's users) switch to a newer NumPy
+version that removes this long deprecated API, in which case you also need to
+use Cython 3.0 or later. Thus, the earlier you switch to Cython 3.0, the
+better for your users.
+
+
The first Cython program
==========================
@@ -212,11 +274,22 @@ Adding types
=============
To add types we use custom Cython syntax, so we are now breaking Python source
-compatibility. Here's :file:`compute_typed.pyx`. *Read the comments!*
+compatibility. *Read the comments!*
+
+.. tabs::
+
+ .. group-tab:: Pure Python
-.. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_typed.pyx
+ .. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_typed.py
+ :caption: compute_typed.py
+ .. figure:: compute_typed_py_html.png
+
+ .. group-tab:: Cython
+
+ .. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_typed.pyx
+ :caption: compute_typed.pyx
+ .. figure:: compute_typed_pyx_html.png
-.. figure:: compute_typed_html.jpg
At this point, have a look at the generated C code for :file:`compute_cy.pyx` and
:file:`compute_typed.pyx`. Click on the lines to expand them and see corresponding C.
@@ -275,12 +348,27 @@ the NumPy array isn't contiguous in memory.
They can be indexed by C integers, thus allowing fast access to the
NumPy array data.
-Here is how to declare a memoryview of integers::
+Here is how to declare a memoryview of integers:
+
+.. tabs::
+
+ .. group-tab:: Pure Python
- cdef int [:] foo # 1D memoryview
- cdef int [:, :] foo # 2D memoryview
- cdef int [:, :, :] foo # 3D memoryview
- ... # You get the idea.
+ .. code-block:: python
+
+ foo: cython.int [:] # 1D memoryview
+ foo: cython.int [:, :] # 2D memoryview
+ foo: cython.int [:, :, :] # 3D memoryview
+ ... # You get the idea.
+
+ .. group-tab:: Cython
+
+ .. code-block:: cython
+
+ cdef int [:] foo # 1D memoryview
+ cdef int [:, :] foo # 2D memoryview
+ cdef int [:, :, :] foo # 3D memoryview
+ ... # You get the idea.
No data is copied from the NumPy array to the memoryview in our example.
As the name implies, it is only a "view" of the memory. So we can use the
@@ -289,9 +377,18 @@ array ``result`` that holds the data that we operated on.
Here is how to use them in our code:
-:file:`compute_memview.pyx`
-.. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_memview.pyx
+.. tabs::
+
+ .. group-tab:: Pure Python
+
+ .. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_memview.py
+ :caption: compute_memview.py
+
+ .. group-tab:: Cython
+
+ .. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_memview.pyx
+ :caption: compute_memview.pyx
Let's see how much faster accessing is now.
@@ -318,14 +415,32 @@ The array lookups are still slowed down by two factors:
explicitly coded so that it doesn't use negative indices, and it
(hopefully) always access within bounds.
-With decorators, we can deactivate those checks::
+With decorators, we can deactivate those checks:
- ...
- cimport cython
- @cython.boundscheck(False) # Deactivate bounds checking
- @cython.wraparound(False) # Deactivate negative indexing.
- def compute(int[:, :] array_1, int[:, :] array_2, int a, int b, int c):
- ...
+.. tabs::
+
+ .. group-tab:: Pure Python
+
+ .. code-block:: python
+
+ ...
+ import cython
+ @cython.boundscheck(False) # Deactivate bounds checking
+ @cython.wraparound(False) # Deactivate negative indexing.
+ def compute(array_1: cython.int[:, :], array_2: cython.int[:, :],
+ a: cython.int, b: cython.int, c: cython.int):
+ ...
+
+ .. group-tab:: Cython
+
+ .. code-block:: cython
+
+ ...
+ cimport cython
+ @cython.boundscheck(False) # Deactivate bounds checking
+ @cython.wraparound(False) # Deactivate negative indexing.
+ def compute(int[:, :] array_1, int[:, :] array_2, int a, int b, int c):
+ ...
Now bounds checking is not performed (and, as a side-effect, if you ''do''
happen to access out of bounds you will in the best case crash your program
@@ -366,14 +481,38 @@ memoryview as contiguous.
We give an example on an array that has 3 dimensions.
If you want to give Cython the information that the data is C-contiguous
-you have to declare the memoryview like this::
+you have to declare the memoryview like this:
+
+.. tabs::
+
+ .. group-tab:: Pure Python
+
+ .. code-block:: python
- cdef int [:,:,::1] a
+ a: cython.int[:,:,::1]
+
+ .. group-tab:: Cython
+
+ .. code-block:: cython
+
+ cdef int [:,:,::1] a
If you want to give Cython the information that the data is Fortran-contiguous
-you have to declare the memoryview like this::
+you have to declare the memoryview like this:
+
+.. tabs::
+
+ .. group-tab:: Pure Python
+
+ .. code-block:: python
+
+ a: cython.int[::1, :, :]
+
+ .. group-tab:: Cython
- cdef int [::1, :, :] a
+ .. code-block:: cython
+
+ cdef int [::1, :, :] a
If all this makes no sense to you, you can skip this part, declaring
arrays as contiguous constrains the usage of your functions as it rejects array slices as input.
@@ -409,7 +548,15 @@ our code. This is why, we must still declare manually the type of the
And actually, manually giving the type of the ``tmp`` variable will
be useful when using fused types.
-.. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_infer_types.pyx
+.. tabs::
+
+ .. group-tab:: Pure Python
+
+ .. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_infer_types.py
+
+ .. group-tab:: Cython
+
+ .. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_infer_types.pyx
We now do a speed test:
@@ -443,7 +590,15 @@ know what NumPy data type we should use for our output array.
In this case, our function now works for ints, doubles and floats.
-.. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_fused_types.pyx
+.. tabs::
+
+ .. group-tab:: Pure Python
+
+ .. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_fused_types.py
+
+ .. group-tab:: Cython
+
+ .. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_fused_types.pyx
We can check that the output type is the right one::
@@ -483,7 +638,25 @@ For MSVC (on Windows) you should use ``/openmp`` instead of ``-fopenmp``.
The GIL must be released (see :ref:`Releasing the GIL `), so this is why we
declare our :func:`clip` function ``nogil``.
-.. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_prange.pyx
+.. tabs::
+
+ .. group-tab:: Pure Python
+
+ .. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_prange.py
+ :lines: 3-
+
+ .. group-tab:: Cython
+
+ .. literalinclude:: ../../examples/userguide/numpy_tutorial/compute_prange.pyx
+ :lines: 3-
+
+.. note::
+
+ Currently, Cython is checking whether there was a raised exception after every call of the function ``clip()``.
+ Checking a raised exception requires the GIL to be held which causes overhead inside a `nogil` loop.
+ The need to check here is a bug with functions returning a fused type (see :issue:`5586` for the details).
+ To avoid acquiring the GIL, the function is declared as ``noexcept`` or ``@cython.exceptval(check=False)``. See the :ref:`error_return_values` section for more details.
+
We can have substantial speed gains for minimal effort:
diff --git a/docs/src/userguide/parallelism.rst b/docs/src/userguide/parallelism.rst
index 7c342022cdb..015dcfbf4a4 100644
--- a/docs/src/userguide/parallelism.rst
+++ b/docs/src/userguide/parallelism.rst
@@ -20,7 +20,7 @@ It currently supports OpenMP, but later on more backends might be supported.
or parallel regions due to OpenMP restrictions.
-.. function:: prange([start,] stop[, step][, nogil=False][, schedule=None[, chunksize=None]][, num_threads=None])
+.. function:: prange([start,] stop[, step][, nogil=False][, use_threads_if=CONDITION][, schedule=None[, chunksize=None]][, num_threads=None])
This function can be used for parallel loops. OpenMP automatically
starts a thread pool and distributes the work according to the schedule
@@ -52,6 +52,12 @@ It currently supports OpenMP, but later on more backends might be supported.
This function can only be used with the GIL released.
If ``nogil`` is true, the loop will be wrapped in a nogil section.
+ :param use_threads_if: The loop is run in multiple threads only if ``CONDITION``
+ is evaluated as true. Otherwise the code is run sequentially. Running
+ the loop sequentially can be handy in the cases when the cost of spawning
+ threads is greater than the benefit of running the loop in parallel
+ (e.g. for small data sets).
+
:param schedule:
The ``schedule`` is passed to OpenMP and can be one of the following:
@@ -141,13 +147,25 @@ Example with a :term:`typed memoryview` (e.g. a NumPy array)
.. literalinclude:: ../../examples/userguide/parallelism/memoryview_sum.pyx
-.. function:: parallel(num_threads=None)
+Example with conditional parallelism:
+
+.. tabs::
+
+ .. group-tab:: Pure Python
+
+ .. literalinclude:: ../../examples/userguide/parallelism/condition_sum.py
+
+ .. group-tab:: Cython
+
+ .. literalinclude:: ../../examples/userguide/parallelism/condition_sum.pyx
+
+.. function:: parallel(num_threads=None, use_threads_if=CONDITION)
This directive can be used as part of a ``with`` statement to execute code
sequences in parallel. This is currently useful to setup thread-local
- buffers used by a prange. A contained prange will be a worksharing loop
+ buffers used by a ``prange``. A contained ``prange`` will be a worksharing loop
that is not parallel, so any variable assigned to in the parallel section
- is also private to the prange. Variables that are private in the parallel
+ is also private to the ``prange``. Variables that are private in the parallel
block are unavailable after the parallel block.
Example with thread-local buffers
diff --git a/docs/src/userguide/source_files_and_compilation.rst b/docs/src/userguide/source_files_and_compilation.rst
index 4ac8a8af6d5..18a473122a1 100644
--- a/docs/src/userguide/source_files_and_compilation.rst
+++ b/docs/src/userguide/source_files_and_compilation.rst
@@ -150,7 +150,11 @@ documentation`_. To compile the extension for use in the current directory use:
Configuring the C-Build
------------------------
-If you have include files in non-standard places you can pass an
+.. note::
+
+ More details on building Cython modules that use cimport numpy can be found in the :ref:`Numpy section ` of the user guide.
+
+If you have :ref:`Cython include files ` or :ref:`Cython definition files ` in non-standard places you can pass an
``include_path`` parameter to ``cythonize``::
from setuptools import setup
@@ -161,43 +165,6 @@ If you have include files in non-standard places you can pass an
ext_modules=cythonize("src/*.pyx", include_path=[...]),
)
-Often, Python packages that offer a C-level API provide a way to find
-the necessary include files, e.g. for NumPy::
-
- include_path = [numpy.get_include()]
-
-.. note::
-
- Using memoryviews or importing NumPy with ``import numpy`` does not mean that
- you have to add the path to NumPy include files. You need to add this path only
- if you use ``cimport numpy``.
-
-Despite this, you may still get warnings like the following from the compiler,
-because Cython is not disabling the usage of the old deprecated Numpy API::
-
- .../include/numpy/npy_1_7_deprecated_api.h:15:2: warning: #warning "Using deprecated NumPy API, disable it by " "#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" [-Wcpp]
-
-In Cython 3.0, you can get rid of this warning by defining the C macro
-``NPY_NO_DEPRECATED_API`` as ``NPY_1_7_API_VERSION``
-in your build, e.g.::
-
- # distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION
-
-or (see below)::
-
- Extension(
- ...,
- define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")],
- )
-
-With older Cython releases, setting this macro will fail the C compilation,
-because Cython generates code that uses this deprecated C-API. However, the
-warning has no negative effects even in recent NumPy versions including 1.18.x.
-You can ignore it until you (or your library's users) switch to a newer NumPy
-version that removes this long deprecated API, in which case you also need to
-use Cython 3.0 or later. Thus, the earlier you switch to Cython 3.0, the
-better for your users.
-
If you need to specify compiler options, libraries to link with or other
linker options you will need to create ``Extension`` instances manually
(note that glob syntax can still be used to specify multiple extensions
@@ -222,9 +189,30 @@ in one line)::
ext_modules=cythonize(extensions),
)
+Some useful options to know about are
+
+* ``include_dirs``- list of directories to search for C/C++ header files (in Unix form for portability),
+* ``libraries`` - list of library names (not filenames or paths) to link against,
+* ``library_dirs`` - list of directories to search for C/C++ libraries at link time.
+
Note that when using setuptools, you should import it before Cython, otherwise,
both might disagree about the class to use here.
+Often, Python packages that offer a C-level API provide a way to find
+the necessary C header files::
+
+ from setuptools import Extension, setup
+ from Cython.Build import cythonize
+
+ extensions = [
+ Extension("*", ["*.pyx"],
+ include_dirs=["/usr/local/include"]),
+ ]
+ setup(
+ name="My hello app",
+ ext_modules=cythonize(extensions),
+ )
+
If your options are static (for example you do not need to call a tool like
``pkg-config`` to determine them) you can also provide them directly in your
.pyx or .pxd source file using a special comment block at the start of the file::
@@ -258,9 +246,7 @@ as follows::
)
The :class:`Extension` class takes many options, and a fuller explanation can
-be found in the `setuptools documentation`_. Some useful options to know about
-are ``include_dirs``, ``libraries``, and ``library_dirs`` which specify where
-to find the ``.h`` and library files when linking to external libraries.
+be found in the `setuptools documentation`_.
.. _setuptools documentation: https://setuptools.readthedocs.io/
@@ -1123,3 +1109,136 @@ argument to ``cythonize``::
This will override the default directives as specified in the ``compiler_directives`` dictionary.
Note that explicit per-file or local directives as explained above take precedence over the
values passed to ``cythonize``.
+
+C macro defines
+===============
+
+Cython has a number of C macros that can be used to control compilation. Typically, these
+would be set using ``extra_compile_args`` in `setup.py` (for example
+``extra_compile_args=['-DCYTHON_USE_TYPE_SPECS=1']``), however they can also be set in
+other ways like using the ``CFLAGS`` environmental variable.
+
+These macros are set automatically by Cython to sensible default values unless
+you chose to explicitly override them, so they are a tool that most users
+can happily ignore. Not all combinations of macros are compatible or tested, and
+some change the default value of other macros. They are listed below in rough order from
+most important to least important:
+
+``CYTHON_LIMITED_API``
+ Turns on Cython's experimental Limited API support, meaning that one compiled module
+ can be used by many Python interpreter versions (at the cost of some performance).
+ At this stage many features do not work in the Limited API. If you use this macro
+ you should also set the macro ``Py_LIMITED_API`` to be the version hex for the
+ minimum Python version you want to support (>=3.7). ``0x03070000`` will support
+ Python 3.7 upwards.
+
+``CYTHON_PEP489_MULTI_PHASE_INIT``
+ Uses multi-phase module initialization as described in PEP489. This improves
+ Python compatibility, especially when running the initial import of the code when it
+ makes attributes such as ``__file__`` available. It is therefore on by default
+ where supported.
+
+``CYTHON_USE_MODULE_STATE``
+ Stores module data on a struct associated with the module object rather than as
+ C global variables. The advantage is that it should be possible to import the
+ same module more than once (e.g. in different sub-interpreters). At the moment
+ this is experimental and not all data has been moved. It also requires that
+ ``CYTHON_PEP489_MULTI_PHASE_INIT`` is off - we plan to remove this limitation
+ in the future.
+
+``CYTHON_USE_TYPE_SPECS``
+ Defines ``cdef classes`` as `"heap types" `_
+ rather than "static types". Practically this does not change a lot from a user
+ point of view, but it is needed to implement Limited API support.
+
+``CYTHON_EXTERN_C``
+ Slightly different to the other macros, this controls how ``cdef public``
+ functions appear to C++ code. See :ref:`CYTHON_EXTERN_C` for full details.
+
+There is a further list of macros which turn off various optimizations or language
+features. Under normal circumstance Cython enables these automatically based on the
+version of Python you are compiling for so there is no need to use them
+to try to enable extra optimizations - all supported optimizations are enabled by
+default. These are mostly relevant if you're tying to get Cython working in a
+new and unsupported Python interpreter where you will typically want to set
+them to 0 to *disable* optimizations. They are listed below for completeness but
+hidden by default since most users will be uninterested in changing them.
+
+.. tabs::
+ .. tab:: Hide
+
+ .. tab:: Show
+
+ ``CYTHON_USE_TYPE_SLOTS``
+ If enabled, Cython will directly access members of the ``PyTypeObject``
+ struct.
+
+ ``CYTHON_USE_PYTYPE_LOOKUP``
+ Use the internal `_PyType_Lookup()` function for more efficient access
+ to properties of C classes.
+
+ ``CYTHON_USE_ASYNC_SLOTS``
+ Support the ``tp_as_async`` attribute on type objects.
+
+ ``CYTHON_USE_PYLONG_INTERNALS``/``CYTHON_USE_PYLIST_INTERNALS``/``CYTHON_USE_UNICODE_INTERNALS``
+ Enable optimizations based on direct access into the internals of Python
+ ``int``/``list``/``unicode`` objects respectively.
+
+ ``CYTHON_USE_UNICODE_WRITER``
+ Use a faster (but internal) mechanism for building unicode strings, for
+ example in f-strings.
+
+ ``CYTHON_AVOID_BORROWED_REFS``
+ Avoid using "borrowed references" and ensure that Cython always holds
+ a reference to objects it manipulates. Most useful for
+ non-reference-counted implementations of Python, like PyPy
+ (where it is enabled by default).
+
+ ``CYTHON_ASSUME_SAFE_MACROS``
+ Use some C-API macros that increase performance by skipping error checking,
+ which may not be safe on all Python implementations (e.g. PyPy).
+
+ ``CYTHON_ASSUME_SAFE_SIZE``
+ Prefer the ``Py*_GET_SIZE()`` C-API macros / inline-functions for builtin types
+ over their ``Py*_GetSize()`` counterparts if errors are not expected.
+
+ ``CYTHON_FAST_GIL``
+ On some Python versions this speeds up getting/releasing the GIL.
+
+ ``CYTHON_UNPACK_METHODS``
+ Try to speed up method calls at the cost of code-size. Linked to
+ the ``optimize.unpack_method_calls`` compiler directive - this macro
+ is used to selectively enable the compiler directive only on versions
+ of Python that support it.
+
+ ``CYTHON_METH_FASTCALL``/``CYTHON_FAST_PYCALL``
+ These are used internally to incrementally enable the vectorcall calling
+ mechanism on older Python versions (<3.8).
+
+ ``CYTHON_PEP487_INIT_SUBCLASS``
+ Enable `PEP-487 `_ ``__init_subclass__`` behaviour.
+
+ ``CYTHON_USE_TP_FINALIZE``
+ Use the ``tp_finalize`` type-slot instead of ``tp_dealloc``,
+ as described in `PEP-442 `_.
+
+ ``CYTHON_USE_DICT_VERSIONS``
+ Try to optimize attribute lookup by using versioned dictionaries
+ where supported.
+
+ ``CYTHON_USE_EXC_INFO_STACK``
+ Use an internal structure to track exception state,
+ used in CPython 3.7 and later.
+
+ ``CYTHON_UPDATE_DESCRIPTOR_DOC``
+ Attempt to provide docstrings also for special (double underscore) methods.
+
+ ``CYTHON_USE_FREELISTS``
+ Enable the use of freelists on extension types with
+ :ref:`the @cython.freelist decorator`.
+
+ ``CYTHON_ATOMICS``
+ Enable the use of atomic reference counting (as opposed to locking then
+ reference counting) in Cython typed memoryviews.
+
+
diff --git a/docs/src/userguide/special_methods.rst b/docs/src/userguide/special_methods.rst
index 38b80e7cd4e..027a46cfa04 100644
--- a/docs/src/userguide/special_methods.rst
+++ b/docs/src/userguide/special_methods.rst
@@ -223,7 +223,7 @@ Depending on the application, one way or the other may be better:
These constants can be cimported from the ``cpython.object`` module.
-* If you use the `functools.total_ordering`_
+* If you use the `functools.total_ordering `_
decorator on an extension type/``cdef`` class, Cython replaces it with a low-level reimplementation
designed specifically for extension types. (On a normal Python classes, the ``functools``
decorator continues to work as before.) As a shortcut you can also use ``cython.total_ordering``, which
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000000..4561320c9dd
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,30 @@
+[build-system]
+requires = ["setuptools", "wheel"]
+
+[tool.cibuildwheel]
+build-verbosity = 2
+skip = ["pp*", "cp36*"]
+# test-command = "make test"
+
+[tool.cibuildwheel.linux]
+archs = ["x86_64", "aarch64", "i686"]
+repair-wheel-command = "auditwheel repair --strip -w {dest_dir} {wheel}"
+
+[tool.cibuildwheel.linux.environment]
+CFLAGS = "-O3 -g0 -pipe -fPIC -march=core2"
+AR = "gcc-ar"
+NM = "gcc-nm"
+RANLIB = "gcc-ranlib"
+
+[[tool.cibuildwheel.overrides]]
+select = "*aarch64"
+environment = {CFLAGS = "-O3 -g0 -pipe -fPIC -march=armv8-a -mtune=cortex-a72", AR = "gcc-ar", NM = "gcc-nm", RANLIB = "gcc-ranlib" }
+
+[tool.cibuildwheel.windows]
+archs = ["AMD64", "x86"]
+
+[tool.cibuildwheel.macos]
+# https://cibuildwheel.readthedocs.io/en/stable/faq/#what-to-provide suggests to provide
+# x86_64 and one of universal2 or arm64 wheels. x86_64 is still required by older pips,
+# so additional arm64 wheels should suffice.
+archs = ["x86_64", "arm64"]
diff --git a/runtests.py b/runtests.py
index 3c0abc8bdd6..28a4d031636 100755
--- a/runtests.py
+++ b/runtests.py
@@ -1,7 +1,5 @@
#!/usr/bin/env python
-from __future__ import print_function
-
import atexit
import base64
import doctest
@@ -1901,6 +1899,7 @@ def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
+ from importlib import import_module
if include_debugger:
skipped_dirs = []
@@ -1927,9 +1926,7 @@ def package_matches(dirname):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
- module = __import__(modulename)
- for x in modulename.split('.')[1:]:
- module = getattr(module, x)
+ module = import_module(modulename)
suite.addTests([loader.loadTestsFromModule(module)])
@@ -1938,6 +1935,7 @@ def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
+
def file_matches(filename):
filename, ext = os.path.splitext(filename)
excludelist = ['libcython', 'libpython', 'test_libcython_in_gdb',
@@ -1947,7 +1945,10 @@ def file_matches(filename):
'#' in filename and not
filename.startswith('.') and not
filename in excludelist)
+
import doctest
+ from importlib import import_module
+
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
@@ -1966,9 +1967,7 @@ def file_matches(filename):
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
- module = __import__(modulename)
- for x in modulename.split('.')[1:]:
- module = getattr(module, x)
+ module = import_module(modulename)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
@@ -2022,30 +2021,20 @@ def tearDown(self):
break
os.chdir(self.old_dir)
- def _try_decode(self, content):
- if not isinstance(content, bytes):
- return content
- try:
- return content.decode()
- except UnicodeDecodeError:
- return content.decode('iso-8859-1')
-
def runTest(self):
self.success = False
old_path = os.environ.get('PYTHONPATH')
- env = dict(os.environ)
new_path = self.cython_syspath
if old_path:
new_path = new_path + os.pathsep + self.workdir + os.pathsep + old_path
- env['PYTHONPATH'] = new_path
- if not env.get("PYTHONIOENCODING"):
- env["PYTHONIOENCODING"] = sys.stdout.encoding or sys.getdefaultencoding()
+ env = dict(os.environ, PYTHONPATH=new_path, PYTHONIOENCODING='utf8')
cmd = []
out = []
err = []
for command_no, command in enumerate(self.commands, 1):
- with self.stats.time('%s(%d)' % (self.name, command_no), 'c',
- 'etoe-build' if 'setup.py' in command else 'etoe-run'):
+ time_category = 'etoe-build' if (
+ 'setup.py' in command or 'cythonize.py' in command or 'cython.py' in command) else 'etoe-run'
+ with self.stats.time('%s(%d)' % (self.name, command_no), 'c', time_category):
if self.capture:
p = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
_out, _err = p.communicate()
@@ -2055,21 +2044,21 @@ def runTest(self):
_out, _err = b'', b''
res = p
cmd.append(command)
- out.append(_out)
- err.append(_err)
+ out.append(_out.decode('utf-8'))
+ err.append(_err.decode('utf-8'))
if res == 0 and b'REFNANNY: ' in _out:
res = -1
if res != 0:
for c, o, e in zip(cmd, out, err):
sys.stderr.write("[%d] %s\n%s\n%s\n\n" % (
- self.shard_num, c, self._try_decode(o), self._try_decode(e)))
+ self.shard_num, c, o, e))
sys.stderr.write("Final directory layout of '%s':\n%s\n\n" % (
self.name,
'\n'.join(os.path.join(dirpath, filename) for dirpath, dirs, files in os.walk(".") for filename in files),
))
self.assertEqual(0, res, "non-zero exit status, last output was:\n%r\n-- stdout:%s\n-- stderr:%s\n" % (
- ' '.join(command), self._try_decode(out[-1]), self._try_decode(err[-1])))
+ ' '.join(command), out[-1], err[-1]))
self.success = True
diff --git a/setup.py b/setup.py
index a2fe7929194..da679ce7458 100755
--- a/setup.py
+++ b/setup.py
@@ -247,7 +247,7 @@ def run_build():
name='Cython',
version=version,
url='https://cython.org/',
- author='Robert Bradshaw, Stefan Behnel, Dag Seljebotn, Greg Ewing, et al.',
+ author='Robert Bradshaw, Stefan Behnel, David Woods, Greg Ewing, et al.',
author_email='cython-devel@python.org',
description="The Cython compiler for writing C extensions in the Python language.",
long_description=textwrap.dedent("""\
@@ -264,12 +264,12 @@ def run_build():
This makes Cython the ideal language for writing glue code for external
C/C++ libraries, and for fast C modules that speed up the execution of
Python code.
-
- The newest Cython release can always be downloaded from https://cython.org/.
+
+ The newest Cython release can always be downloaded from https://cython.org/.
Unpack the tarball or zip file, enter the directory, and then run::
-
+
pip install .
-
+
Note that for one-time builds, e.g. for CI/testing, on platforms that are not
covered by one of the wheel packages provided on PyPI *and* the pure Python wheel
that we provide is not used, it is substantially faster than a full source build
diff --git a/tests/errors/e_cython_parallel.pyx b/tests/errors/e_cython_parallel.pyx
index f727d045534..9b37758b09d 100644
--- a/tests/errors/e_cython_parallel.pyx
+++ b/tests/errors/e_cython_parallel.pyx
@@ -149,6 +149,26 @@ with nogil, cython.parallel.parallel():
with cython.parallel.parallel():
pass
+cdef bint gil_function():
+ return True
+
+for i in prange(10, nogil=True, use_threads_if=gil_function()):
+ pass
+
+with nogil, parallel.parallel(use_threads_if=gil_function()):
+ pass
+
+def bar():
+
+ python_var = object()
+
+ cdef int i
+
+ for i in prange(10, nogil=True, use_threads_if=python_var):
+ pass
+
+ with nogil, parallel.parallel(use_threads_if=python_var):
+ pass
_ERRORS = u"""
3:8: cython.parallel.parallel is not a module
@@ -184,4 +204,8 @@ _ERRORS = u"""
139:62: Chunksize not valid for the schedule runtime
145:70: Calling gil-requiring function not allowed without gil
149:33: Nested parallel with blocks are disallowed
+155:59: Calling gil-requiring function not allowed without gil
+158:57: Calling gil-requiring function not allowed without gil
+167:51: use_threads_if may not be a Python object as we don't have the GIL
+170:49: use_threads_if may not be a Python object as we don't have the GIL
"""
diff --git a/tests/errors/e_excvalfunctype.pyx b/tests/errors/e_excvalfunctype.pyx
index fb5b4c7ce67..374edc53422 100644
--- a/tests/errors/e_excvalfunctype.pyx
+++ b/tests/errors/e_excvalfunctype.pyx
@@ -11,6 +11,6 @@ spam = grail # type mismatch
_ERRORS = u"""
-9:8: Cannot assign type 'spamfunc' to 'grailfunc'. Exception values are incompatible. Suggest adding 'noexcept' to type 'int (int, char *) except 42'.
-10:7: Cannot assign type 'grailfunc' to 'spamfunc'. Exception values are incompatible.
+9:8: Cannot assign type 'spamfunc' (alias of 'int (*)(int, char *) except 42') to 'grailfunc' (alias of 'int (*)(int, char *) noexcept'). Exception values are incompatible. Suggest adding 'noexcept' to type 'int (int, char *) except 42'.
+10:7: Cannot assign type 'grailfunc' (alias of 'int (*)(int, char *) noexcept') to 'spamfunc' (alias of 'int (*)(int, char *) except 42'). Exception values are incompatible.
"""
diff --git a/tests/memoryview/memoryview.pyx b/tests/memoryview/memoryview.pyx
index 536a0c77e42..41411da018e 100644
--- a/tests/memoryview/memoryview.pyx
+++ b/tests/memoryview/memoryview.pyx
@@ -1313,5 +1313,6 @@ def test_untyped_index(i):
return mview_arr[i] # should generate a performance hint
_PERFORMANCE_HINTS = """
+243:9: Use boundscheck(False) for faster access
1313:21: Index should be typed for more efficient access
"""
diff --git a/tests/run/annotation_typing.pyx b/tests/run/annotation_typing.pyx
index fbdd62b0498..e6f5caac1ce 100644
--- a/tests/run/annotation_typing.pyx
+++ b/tests/run/annotation_typing.pyx
@@ -391,6 +391,24 @@ def int_alias(a: cython.int, b: cy_i):
print(cython.typeof(b))
+def test_inexact_types(d: dict):
+ """
+ >>> test_inexact_types({}) # good
+
+ Check that our custom pep484 warning is in either the error message
+ or the exception notes
+ >>> from collections import OrderedDict
+ >>> try:
+ ... test_inexact_types(OrderedDict())
+ ... except TypeError as e:
+ ... assert ("Cython is deliberately stricter than PEP-484" in e.args[0] or
+ ... any("Cython is deliberately stricter than PEP-484" in note for note in getattr(e, "__notes__", []))), e
+ ... else:
+ ... assert False
+ """
+ pass
+
+
_WARNINGS = """
15:32: Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.
15:47: Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.
diff --git a/tests/run/async_def.pyx b/tests/run/async_def.pyx
index bf5f653245c..262560ee912 100644
--- a/tests/run/async_def.pyx
+++ b/tests/run/async_def.pyx
@@ -8,15 +8,16 @@ Cython specific tests in addition to "test_coroutines_pep492.pyx"
"""
-def run_async(coro):
- #assert coro.__class__ is types.GeneratorType
- assert coro.__class__.__name__ in ('coroutine', '_GeneratorWrapper'), coro.__class__.__name__
+def run_async(coro, assert_type=True, send_value=None):
+ if assert_type:
+ #assert coro.__class__ is types.GeneratorType
+ assert coro.__class__.__name__ in ('coroutine', '_GeneratorWrapper'), coro.__class__.__name__
buffer = []
result = None
while True:
try:
- buffer.append(coro.send(None))
+ buffer.append(coro.send(send_value))
except StopIteration as ex:
result = ex.value
break
@@ -58,3 +59,46 @@ async def outer_with_nested(called):
called.append('return inner')
return inner
+
+# used in "await_in_genexpr_iterator"
+async def h(arg):
+ return [arg, arg+1]
+
+async def await_in_genexpr_iterator():
+ """
+ >>> _, x = run_async(await_in_genexpr_iterator())
+ >>> x
+ [4, 6]
+ """
+ lst = list # obfuscate from any optimizations cython might try
+ return lst(x*2 for x in await h(2))
+
+def yield_in_genexpr_iterator():
+ """
+ Same test as await_in_genexpr_iterator but with yield.
+ (Possibly in the wrong place, but grouped with related tests)
+
+ >>> g = yield_in_genexpr_iterator()
+ >>> g.send(None)
+ >>> _, x = run_async(g, assert_type=False, send_value=[2, 3])
+ >>> x
+ [4, 6]
+ """
+ lst = list # obfuscate from any optimizations cython might try
+ return lst(x*2 for x in (yield))
+
+def h_yield_from(arg):
+ yield
+ return [arg, arg+1]
+
+def yield_from_in_genexpr_iterator():
+ """
+ Same test as await_in_genexpr_iterator but with "yield from".
+ (Possibly in the wrong place, but grouped with related tests)
+
+ >>> _, x = run_async(yield_from_in_genexpr_iterator(), assert_type=False)
+ >>> x
+ [4, 6]
+ """
+ lst = list # obfuscate from any optimizations cython might try
+ return lst(x*2 for x in (yield from h_yield_from(2)))
diff --git a/tests/run/bytesmethods.pyx b/tests/run/bytesmethods.pyx
index 5973a7334d4..ac4e9b719d0 100644
--- a/tests/run/bytesmethods.pyx
+++ b/tests/run/bytesmethods.pyx
@@ -259,7 +259,7 @@ def bytes_join(bytes s, *args):
babab
"""
result = s.join(args)
- assert cython.typeof(result) == 'Python object', cython.typeof(result)
+ assert cython.typeof(result) == 'bytes object', cython.typeof(result)
return result
@@ -275,7 +275,7 @@ def literal_join(*args):
b|b|b|b
"""
result = b'|'.join(args)
- assert cython.typeof(result) == 'Python object', cython.typeof(result)
+ assert cython.typeof(result) == 'bytes object', cython.typeof(result)
return result
def fromhex(bytes b):
diff --git a/tests/run/call_py_cy.pyx b/tests/run/call_py_cy.pyx
index 0bacd2cf593..ec6ff35ef14 100644
--- a/tests/run/call_py_cy.pyx
+++ b/tests/run/call_py_cy.pyx
@@ -4,8 +4,11 @@
#######
# Test that Cython and Python functions can call each other in various signature combinations.
+# and check that the right calls use vectorcall (PyMethodCallNode)
#######
+cimport cython
+
py_call_noargs = eval("lambda: 'noargs'")
py_call_onearg = eval("lambda arg: arg")
py_call_twoargs = eval("lambda arg, arg2: (arg, arg2)")
@@ -15,6 +18,7 @@ py_call_starstarargs = eval("lambda **kw: sorted(kw.items())")
py_call_args_and_starstarargs = eval("lambda *args, **kw: (args, sorted(kw.items()))")
+#@cython.test_fail_if_path_exists("//PyMethodCallNode")
def cy_call_noargs():
"""
>>> cy_call_noargs()
@@ -23,6 +27,7 @@ def cy_call_noargs():
return py_call_noargs()
+@cython.test_assert_path_exists("//PyMethodCallNode")
def cy_call_onearg(f):
"""
>>> cy_call_onearg(py_call_onearg)
@@ -43,6 +48,7 @@ def cy_call_onearg(f):
return f('onearg')
+@cython.test_assert_path_exists("//PyMethodCallNode")
def cy_call_twoargs(f, arg):
"""
>>> cy_call_twoargs(py_call_twoargs, 132)
@@ -61,6 +67,25 @@ def cy_call_twoargs(f, arg):
return f(arg, 'twoargs')
+@cython.test_assert_path_exists("//PyMethodCallNode")
+def cy_call_arg_and_kwarg(f, arg):
+ """
+ >>> cy_call_arg_and_kwarg(py_call_twoargs, 123)
+ (123, 'twoargs')
+
+
+ >>> class Class(object):
+ ... def method1(self, arg, arg2): return arg, arg2
+ ... def method2(self, arg): return arg
+ >>> cy_call_arg_and_kwarg(Class().method1, 123)
+ (123, 'twoargs')
+ >>> cy_call_twoargs(Class.method2, Class())
+ 'twoargs'
+ """
+ return f(arg, arg2='twoargs')
+
+
+@cython.test_assert_path_exists("//PyMethodCallNode")
def cy_call_two_kwargs(f, arg):
"""
>>> cy_call_two_kwargs(py_call_twoargs, arg=132)
@@ -79,6 +104,7 @@ def cy_call_two_kwargs(f, arg):
return f(arg2='two-kwargs', arg=arg)
+@cython.test_fail_if_path_exists("//PyMethodCallNode")
def cy_call_starargs(*args):
"""
>>> cy_call_starargs()
@@ -93,6 +119,7 @@ def cy_call_starargs(*args):
return py_call_starargs(*args)
+@cython.test_fail_if_path_exists("//PyMethodCallNode")
def cy_call_pos_and_starargs(f, *args):
"""
>>> cy_call_pos_and_starargs(py_call_onearg)
@@ -127,6 +154,10 @@ def cy_call_pos_and_starargs(f, *args):
return f(args[0] if args else 'no-arg', *args[1:])
+# Choice of whether to use PyMethodCallNode here is pretty arbitrary -
+# vectorcall_dict or PyObject_Call are likely to be fairly similar cost.
+# The test is for the current behaviour but it isn't a big issue if it changes
+@cython.test_fail_if_path_exists("//PyMethodCallNode")
def cy_call_starstarargs(**kw):
"""
>>> kw = {}
@@ -142,6 +173,10 @@ def cy_call_starstarargs(**kw):
return py_call_starstarargs(**kw)
+# Choice of whether to use PyMethodCallNode here is pretty arbitrary -
+# vectorcall_dict or PyObject_Call are likely to be fairly similar cost.
+# The test is for the current behaviour but it isn't a big issue if it changes
+@cython.test_fail_if_path_exists("//PyMethodCallNode")
def cy_call_kw_and_starstarargs(f=None, arg1=None, **kw):
"""
>>> kw = {}
@@ -202,6 +237,7 @@ def cy_call_kw_and_starstarargs(f=None, arg1=None, **kw):
return (f or py_call_starstarargs)(arg=arg1, **kw)
+@cython.test_assert_path_exists("//PyMethodCallNode")
def cy_call_pos_and_starstarargs(f=None, arg1=None, **kw):
"""
>>> cy_call_pos_and_starstarargs(arg=123)
diff --git a/tests/run/cpdef_enums_import.srctree b/tests/run/cpdef_enums_import.srctree
index 928a2d0b156..7dd6e2fbe82 100644
--- a/tests/run/cpdef_enums_import.srctree
+++ b/tests/run/cpdef_enums_import.srctree
@@ -8,7 +8,7 @@ from Cython.Build.Dependencies import cythonize
from distutils.core import setup
setup(
- ext_modules = cythonize(["enums.pyx", "no_enums.pyx"]),
+ ext_modules = cythonize(["enums.pyx", "enums_same_name.pyx", "no_enums.pyx"]),
)
######## enums.pyx ########
@@ -28,6 +28,14 @@ cpdef enum NamedEnumType:
cpdef foo()
+######## enums_same_name.pyx ############
+
+######## enums_same_name.pxd ############
+
+# Note - same name as enums.pxd but shouldn't conflict
+cpdef enum NamedEnumType:
+ Value = 1
+
######## enums_without_pyx.pxd #####
cpdef enum EnumTypeNotInPyx:
@@ -37,10 +45,16 @@ cpdef enum EnumTypeNotInPyx:
from enums cimport *
from enums_without_pyx cimport *
+cimport enums_same_name
def get_named_enum_value():
return NamedEnumType.NamedEnumValue
+def get_from_enums_same_name():
+ # This should not generate conflicting "to py" functions with the other
+ # identically named enum from a different pxd file.
+ return enums_same_name.NamedEnumType.Value
+
def get_named_without_pyx():
# This'll generate a warning but return a c int
return EnumTypeNotInPyx.AnotherEnumValue
@@ -49,6 +63,7 @@ def get_named_without_pyx():
# We can import enums with a star import.
from enums import *
+import enums_same_name
print(dir())
assert 'BAR' in dir() and 'FOO' in dir()
@@ -64,3 +79,5 @@ assert no_enums.get_named_enum_value() == NamedEnumType.NamedEnumValue
# In this case the enum isn't accessible from Python (by design)
# but the conversion to Python goes through a reasonable fallback
assert no_enums.get_named_without_pyx() == 500
+
+assert no_enums.get_from_enums_same_name() == enums_same_name.NamedEnumType.Value
diff --git a/tests/run/cpdef_extern_func.pyx b/tests/run/cpdef_extern_func.pyx
index e1ba3d09617..4b708e03e40 100644
--- a/tests/run/cpdef_extern_func.pyx
+++ b/tests/run/cpdef_extern_func.pyx
@@ -1,6 +1,5 @@
# cython: c_string_type=str
# cython: c_string_encoding=ascii
-# distutils: extra_compile_args=-fpermissive
__doc__ = """
>>> sqrt(1)
@@ -9,9 +8,16 @@ __doc__ = """
2.0
>>> pxd_sqrt(9)
3.0
+
>>> log(10) # doctest: +ELLIPSIS
Traceback (most recent call last):
NameError: ...name 'log' is not defined
+
+>>> my_strchr('abcabc', ord('c'))
+'cabc'
+>>> my_strchr(needle=ord('c'), haystack='abcabc')
+'cabc'
+
>>> strchr('abcabc', ord('c'))
'cabc'
>>> strchr(needle=ord('c'), haystack='abcabc')
@@ -24,5 +30,14 @@ cdef extern from "math.h":
cdef double log(double) # not wrapped
cdef extern from "string.h":
- # signature must be exact in C++, disagrees with C
- cpdef const char* strchr(const char *haystack, int needle);
+ """
+ /* The return type of strchr differs between C and C++.
+ This test is not interested in that, so create a wrapper function
+ with a known return type.
+ */
+ static const char* my_strchr(const char *haystack, int needle) {
+ return strchr(haystack, needle);
+ }
+ """
+ cpdef const char* my_strchr(const char *haystack, int needle)
+ cpdef const char* strchr "my_strchr" (const char *haystack, int needle)
diff --git a/tests/run/cython_includes.pyx b/tests/run/cython_includes.pyx
index af91f6f9e80..efdca6e9bb9 100644
--- a/tests/run/cython_includes.pyx
+++ b/tests/run/cython_includes.pyx
@@ -12,7 +12,6 @@ cimport cpython.bytearray
cimport cpython.bytes
cimport cpython.cellobject
cimport cpython.ceval
-cimport cpython.cobject
cimport cpython.codecs
cimport cpython.complex
cimport cpython.contextvars
@@ -26,7 +25,6 @@ cimport cpython.function
cimport cpython.genobject
cimport cpython.getargs
cimport cpython.instance
-cimport cpython.int
cimport cpython.iterator
cimport cpython.iterobject
cimport cpython.list
@@ -40,7 +38,6 @@ cimport cpython.method
cimport cpython.module
cimport cpython.number
cimport cpython.object
-cimport cpython.oldbuffer
cimport cpython.pycapsule
cimport cpython.pylifecycle
cimport cpython.pystate
diff --git a/tests/run/dict_pop.pyx b/tests/run/dict_pop.pyx
index 1efd5b82221..a8a09ed9888 100644
--- a/tests/run/dict_pop.pyx
+++ b/tests/run/dict_pop.pyx
@@ -3,6 +3,11 @@
cimport cython
+class FailHash:
+ def __hash__(self):
+ raise TypeError()
+
+
@cython.test_assert_path_exists("//PythonCapiCallNode")
@cython.test_fail_if_path_exists("//AttributeNode")
def dict_pop(dict d, key):
@@ -10,6 +15,11 @@ def dict_pop(dict d, key):
>>> d = { 1: 10, 2: 20 }
>>> dict_pop(d, 1)
(10, {2: 20})
+ >>> dict_pop(d, FailHash())
+ Traceback (most recent call last):
+ TypeError
+ >>> d
+ {2: 20}
>>> dict_pop(d, 3)
Traceback (most recent call last):
KeyError: 3
@@ -26,6 +36,11 @@ def dict_pop_default(dict d, key, default):
>>> d = { 1: 10, 2: 20 }
>>> dict_pop_default(d, 1, "default")
(10, {2: 20})
+ >>> dict_pop_default(d, FailHash(), 30)
+ Traceback (most recent call last):
+ TypeError
+ >>> d
+ {2: 20}
>>> dict_pop_default(d, 3, None)
(None, {2: 20})
>>> dict_pop_default(d, 3, "default")
@@ -36,6 +51,26 @@ def dict_pop_default(dict d, key, default):
return d.pop(key, default), d
+@cython.test_assert_path_exists("//PythonCapiCallNode")
+@cython.test_fail_if_path_exists("//AttributeNode")
+def dict_pop_ignored(dict d, key):
+ """
+ >>> d = {1: 2, 'a': 'b'}
+ >>> dict_pop_ignored(d, 'a')
+ >>> d
+ {1: 2}
+ >>> dict_pop_ignored(d, FailHash())
+ Traceback (most recent call last):
+ TypeError
+ >>> d
+ {1: 2}
+ >>> dict_pop_ignored(d, 123)
+ >>> d
+ {1: 2}
+ """
+ d.pop(key, None)
+
+
cdef class MyType:
cdef public int i
def __init__(self, i):
diff --git a/tests/run/fused_cpdef.pxd b/tests/run/fused_cpdef.pxd
new file mode 100644
index 00000000000..e4f4a600dd6
--- /dev/null
+++ b/tests/run/fused_cpdef.pxd
@@ -0,0 +1,4 @@
+cimport cython
+
+cdef class C:
+ cpdef object has_default_struct(self, cython.floating x, a=?)
diff --git a/tests/run/fused_cpdef.pyx b/tests/run/fused_cpdef.pyx
index 3979570b75c..4f569c24c3e 100644
--- a/tests/run/fused_cpdef.pyx
+++ b/tests/run/fused_cpdef.pyx
@@ -205,3 +205,16 @@ def test_defaults():
>>> mutable_default(3,[])
[3]
"""
+
+cdef class C:
+ cpdef object has_default_struct(self, cython.floating x, a=None):
+ return x, a
+
+# https://github.com/cython/cython/issues/5588
+# On some Python versions this was causing a compiler crash
+def test_call_has_default_struct(C c, double x):
+ """
+ >>> test_call_has_default_struct(C(), 5.)
+ (5.0, None)
+ """
+ return c.has_default_struct(x)
diff --git a/tests/run/fused_types.pyx b/tests/run/fused_types.pyx
index 72305d3a6da..31d5cf1d594 100644
--- a/tests/run/fused_types.pyx
+++ b/tests/run/fused_types.pyx
@@ -24,6 +24,13 @@ fused_type3 = cython.fused_type(int, double)
fused_composite = cython.fused_type(fused_type2, fused_type3)
just_float = cython.fused_type(float)
+ctypedef int inttypedef
+ctypedef double doubletypedef
+fused_with_typedef = cython.fused_type(inttypedef, doubletypedef)
+
+ctypedef float const_inttypedef # misleading name
+fused_misleading_name = cython.fused_type(const_inttypedef, char)
+
def test_pure():
"""
@@ -554,6 +561,26 @@ def convert_to_ptr(cython.floating x):
elif cython.floating is double:
return handle_double(&x)
+def constfused_with_typedef(const fused_with_typedef[:] x):
+ """
+ >>> constfused_with_typedef(get_array(8, 'd'))
+ 5.0
+ >>> constfused_with_typedef(get_intc_array())
+ 5
+ """
+ return x[5]
+
+def constfused_typedef_name_clashes(const fused_with_typedef[:] x, fused_misleading_name[:] y):
+ """
+ This'll deliberately end up with two typedefs that generate the same name in dispatch code
+ (and thus one needs to end up numbered to make it work).
+ It's mainly a compile test and the runtime part is fairly token.
+
+ >>> constfused_typedef_name_clashes(get_intc_array(), get_array(4, 'f'))
+ (5, 5.0)
+ """
+ return x[5], y[5]
+
cdef double get_double():
return 1.0
cdef float get_float():
diff --git a/tests/run/hasattr.pyx b/tests/run/hasattr.pyx
index 00d005d6cf4..700bfb74bcc 100644
--- a/tests/run/hasattr.pyx
+++ b/tests/run/hasattr.pyx
@@ -31,10 +31,15 @@ def wrap_hasattr(obj, name):
False
>>> Foo().baz #doctest: +ELLIPSIS
Traceback (most recent call last):
- ...
ZeroDivisionError: ...
- >>> wrap_hasattr(Foo(), "baz")
+ >>> import sys
+ >>> if sys.version_info < (3,13): wrap_hasattr(Foo(), "baz") # doctest: +ELLIPSIS
+ ... else: print(False)
False
+ >>> if sys.version_info >= (3,13): wrap_hasattr(Foo(), "baz") # doctest: +ELLIPSIS
+ ... else: raise ZeroDivisionError
+ Traceback (most recent call last):
+ ZeroDivisionError...
>>> hasattr(Foo(), None) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
diff --git a/tests/run/line_trace.pyx b/tests/run/line_trace.pyx
index ea84fa15182..4bc5ab56e1f 100644
--- a/tests/run/line_trace.pyx
+++ b/tests/run/line_trace.pyx
@@ -4,6 +4,8 @@
# tag: trace
import sys
+import gc
+from contextlib import contextmanager
from cpython.ref cimport PyObject, Py_INCREF, Py_XDECREF
@@ -180,6 +182,17 @@ def py_return(retval=123): return retval
""", plain_python_functions)
+@contextmanager
+def gc_off():
+ was_enabled = gc.isenabled()
+ gc.disable()
+ try:
+ yield
+ finally:
+ if was_enabled:
+ gc.enable()
+
+
def run_trace(func, *args, bint with_sys=False):
"""
>>> py_add = plain_python_functions['py_add']
@@ -229,17 +242,18 @@ def run_trace(func, *args, bint with_sys=False):
"""
trace = []
trace_func = _create_trace_func(trace)
- if with_sys:
- sys.settrace(trace_func)
- else:
- PyEval_SetTrace(trace_trampoline, trace_func)
- try:
- func(*args)
- finally:
+ with gc_off():
if with_sys:
- sys.settrace(None)
+ sys.settrace(trace_func)
else:
- PyEval_SetTrace(NULL, NULL)
+ PyEval_SetTrace(trace_trampoline, trace_func)
+ try:
+ func(*args)
+ finally:
+ if with_sys:
+ sys.settrace(None)
+ else:
+ PyEval_SetTrace(NULL, NULL)
return trace
@@ -278,24 +292,25 @@ def run_trace_with_exception(func, bint with_sys=False, bint fail=False):
"""
trace = ['cy_try_except' if fail else 'NO ERROR']
trace_func = _create__failing_line_trace_func(trace) if fail else _create_trace_func(trace)
- if with_sys:
- sys.settrace(trace_func)
- else:
- PyEval_SetTrace(trace_trampoline, trace_func)
- try:
- try:
- retval = cy_try_except(func)
- except ValueError as exc:
- print("%s(%r)" % (type(exc).__name__, str(exc)))
- except AttributeError as exc:
- print("%s(%r)" % (type(exc).__name__, str(exc)))
- else:
- print('OK: %r' % retval)
- finally:
+ with gc_off():
if with_sys:
- sys.settrace(None)
+ sys.settrace(trace_func)
else:
- PyEval_SetTrace(NULL, NULL)
+ PyEval_SetTrace(trace_trampoline, trace_func)
+ try:
+ try:
+ retval = cy_try_except(func)
+ except ValueError as exc:
+ print("%s(%r)" % (type(exc).__name__, str(exc)))
+ except AttributeError as exc:
+ print("%s(%r)" % (type(exc).__name__, str(exc)))
+ else:
+ print('OK: %r' % retval)
+ finally:
+ if with_sys:
+ sys.settrace(None)
+ else:
+ PyEval_SetTrace(NULL, NULL)
return trace[1:]
@@ -312,11 +327,12 @@ def fail_on_call_trace(func, *args):
"""
trace = []
trace_func = _create_failing_call_trace_func(trace)
- PyEval_SetTrace(trace_trampoline, trace_func)
- try:
- func(*args)
- finally:
- PyEval_SetTrace(NULL, NULL)
+ with gc_off():
+ PyEval_SetTrace(trace_trampoline, trace_func)
+ try:
+ func(*args)
+ finally:
+ PyEval_SetTrace(NULL, NULL)
assert not trace
@@ -376,20 +392,21 @@ def fail_on_line_trace(fail_func, add_func, nogil_add_func):
trace = ['NO ERROR']
exception = None
trace_func = _create__failing_line_trace_func(trace)
- PyEval_SetTrace(trace_trampoline, trace_func)
- try:
- x += 1
- add_func(1, 2)
- x += 1
- if fail_func:
- trace[0] = fail_func # trigger error on first line
- x += 1
- nogil_add_func(3, 4)
- x += 1
- except Exception as exc:
- exception = str(exc)
- finally:
- PyEval_SetTrace(NULL, NULL)
+ with gc_off():
+ PyEval_SetTrace(trace_trampoline, trace_func)
+ try:
+ x += 1
+ add_func(1, 2)
+ x += 1
+ if fail_func:
+ trace[0] = fail_func # trigger error on first line
+ x += 1
+ nogil_add_func(3, 4)
+ x += 1
+ except Exception as exc:
+ exception = str(exc)
+ finally:
+ PyEval_SetTrace(NULL, NULL)
if exception:
print(exception)
else:
@@ -417,15 +434,16 @@ def disable_trace(func, *args, bint with_sys=False):
"""
trace = []
trace_func = _create_disable_tracing(trace)
- if with_sys:
- sys.settrace(trace_func)
- else:
- PyEval_SetTrace(trace_trampoline, trace_func)
- try:
- func(*args)
- finally:
+ with gc_off():
if with_sys:
- sys.settrace(None)
+ sys.settrace(trace_func)
else:
- PyEval_SetTrace(NULL, NULL)
+ PyEval_SetTrace(trace_trampoline, trace_func)
+ try:
+ func(*args)
+ finally:
+ if with_sys:
+ sys.settrace(None)
+ else:
+ PyEval_SetTrace(NULL, NULL)
return trace
diff --git a/tests/run/nogil.pyx b/tests/run/nogil.pyx
index 0c1bfb0ceef..feed30ed324 100644
--- a/tests/run/nogil.pyx
+++ b/tests/run/nogil.pyx
@@ -200,19 +200,19 @@ def test_performance_hint_nogil():
# Note that we're only able to check the first line of the performance hint
_PERFORMANCE_HINTS = """
-20:9: Exception check will always require the GIL to be acquired.
+20:9: Exception check after calling 'f' will always require the GIL to be acquired.
24:5: Exception check on 'f' will always require the GIL to be acquired.
34:5: Exception check on 'release_gil_in_nogil' will always require the GIL to be acquired.
39:6: Exception check on 'release_gil_in_nogil2' will always require the GIL to be acquired.
-49:28: Exception check will always require the GIL to be acquired.
-51:29: Exception check will always require the GIL to be acquired.
+49:28: Exception check after calling 'release_gil_in_nogil' will always require the GIL to be acquired.
+51:29: Exception check after calling 'release_gil_in_nogil2' will always require the GIL to be acquired.
55:5: Exception check on 'get_gil_in_nogil' will always require the GIL to be acquired.
59:6: Exception check on 'get_gil_in_nogil2' will always require the GIL to be acquired.
-68:24: Exception check will always require the GIL to be acquired.
-70:25: Exception check will always require the GIL to be acquired.
+68:24: Exception check after calling 'get_gil_in_nogil' will always require the GIL to be acquired.
+70:25: Exception check after calling 'get_gil_in_nogil2' will always require the GIL to be acquired.
133:5: Exception check on 'copy_array_exception' will always require the GIL to be acquired.
-184:28: Exception check will always require the GIL to be acquired.
+184:28: Exception check after calling 'copy_array_exception' will always require the GIL to be acquired.
187:5: Exception check on 'voidexceptnogil_in_pxd' will always require the GIL to be acquired.
-195:30: Exception check will always require the GIL to be acquired.
-198:36: Exception check will always require the GIL to be acquired.
+195:30: Exception check after calling 'voidexceptnogil_in_pxd' will always require the GIL to be acquired.
+198:36: Exception check after calling 'voidexceptnogil_in_other_pxd' will always require the GIL to be acquired.
"""
diff --git a/tests/run/parallel.pyx b/tests/run/parallel.pyx
index 44a3a38f2dc..acdf12fe014 100644
--- a/tests/run/parallel.pyx
+++ b/tests/run/parallel.pyx
@@ -36,14 +36,31 @@ cdef int get_num_threads() noexcept with gil:
print "get_num_threads called"
return 3
-def test_num_threads():
+cdef bint check_size(int size) nogil:
+ return size > 5
+
+def test_num_threads(int size):
"""
- >>> test_num_threads()
+ >>> test_num_threads(6)
1
get_num_threads called
3
get_num_threads called
3
+ get_num_threads called
+ 3
+ get_num_threads called
+ 3
+ >>> test_num_threads(4)
+ 1
+ get_num_threads called
+ 1
+ get_num_threads called
+ 1
+ get_num_threads called
+ 1
+ get_num_threads called
+ 1
"""
cdef int dyn = openmp.omp_get_dynamic()
cdef int num_threads
@@ -56,14 +73,27 @@ def test_num_threads():
print num_threads
- with nogil, cython.parallel.parallel(num_threads=get_num_threads()):
+ with nogil, cython.parallel.parallel(num_threads=get_num_threads(), use_threads_if=size > 5):
+ p[0] = openmp.omp_get_num_threads()
+
+ print num_threads
+
+ # Checks that temporary variables are released properly
+ with nogil, cython.parallel.parallel(num_threads=get_num_threads(), use_threads_if=check_size(size)):
p[0] = openmp.omp_get_num_threads()
print num_threads
cdef int i
+ # Checks that temporary variables are released properly
+ for i in prange(1, nogil=True, num_threads=get_num_threads(), use_threads_if=check_size(size)):
+ p[0] = openmp.omp_get_num_threads()
+ break
+
+ print num_threads
+
num_threads = 0xbad
- for i in prange(1, nogil=True, num_threads=get_num_threads()):
+ for i in prange(1, nogil=True, num_threads=get_num_threads(), use_threads_if=size > 5):
p[0] = openmp.omp_get_num_threads()
break
diff --git a/tests/run/pure_cdef_class_dataclass.py b/tests/run/pure_cdef_class_dataclass.py
index f8136958e1c..54f5ff957d7 100644
--- a/tests/run/pure_cdef_class_dataclass.py
+++ b/tests/run/pure_cdef_class_dataclass.py
@@ -76,3 +76,14 @@ def __post_init__(self):
# and not initializing it will mess up repr
assert not hasattr(self, "neither")
self.neither = None
+
+
+@cython.dataclasses.dataclass
+class NonInitDefaultArgument:
+ """
+ >>> NonInitDefaultArgument(1.0, "hello")
+ NonInitDefaultArgument(x=1.0, y=10, z='hello')
+ """
+ x: float
+ y: int = cython.dataclasses.field(default=10, init=False)
+ z: str # This is allowed despite following a default argument, because the default argument isn't in init
diff --git a/tests/run/r_docstrings.pyx b/tests/run/r_docstrings.pyx
index 4ee3f87351e..514946b53a4 100644
--- a/tests/run/r_docstrings.pyx
+++ b/tests/run/r_docstrings.pyx
@@ -42,8 +42,8 @@ Compare with standard Python:
... '''
... This is a function docstring.
... '''
- >>> Pyf.__doc__
- '\\n This is a function docstring.\\n '
+ >>> Pyf.__doc__.strip() # .strip() is needed because Py3.13 removes the indentation.
+ 'This is a function docstring.'
>>> class PyC(object):
... '''
@@ -58,21 +58,21 @@ Compare with standard Python:
>>> class PyCSS(PyCS):
... docstring_copy_CSS = __doc__
- >>> PyC.__doc__
- '\\n This is a class docstring.\\n '
- >>> PyC.docstring_copy_C
- '\\n This is a class docstring.\\n '
- >>> PyCS.docstring_copy_C
- '\\n This is a class docstring.\\n '
- >>> PyCSS.docstring_copy_C
- '\\n This is a class docstring.\\n '
-
- >>> PyCS.__doc__
- '\\n This is a subclass docstring.\\n '
- >>> PyCS.docstring_copy_CS
- '\\n This is a subclass docstring.\\n '
- >>> PyCSS.docstring_copy_CS
- '\\n This is a subclass docstring.\\n '
+ >>> PyC.__doc__.strip()
+ 'This is a class docstring.'
+ >>> PyC.docstring_copy_C.strip()
+ 'This is a class docstring.'
+ >>> PyCS.docstring_copy_C.strip()
+ 'This is a class docstring.'
+ >>> PyCSS.docstring_copy_C.strip()
+ 'This is a class docstring.'
+
+ >>> PyCS.__doc__.strip()
+ 'This is a subclass docstring.'
+ >>> PyCS.docstring_copy_CS.strip()
+ 'This is a subclass docstring.'
+ >>> PyCSS.docstring_copy_CS.strip()
+ 'This is a subclass docstring.'
>>> PyCSS.__doc__
>>> PyCSS.docstring_copy_CSS
diff --git a/tests/run/special_methods_T561_py3.pyx b/tests/run/special_methods_T561_py3.pyx
index 56ea83ceca3..95edd6260b2 100644
--- a/tests/run/special_methods_T561_py3.pyx
+++ b/tests/run/special_methods_T561_py3.pyx
@@ -1,5 +1,5 @@
# ticket: t561
-# tag: py3
+# tag: py3, warnings
# This file tests the behavior of special methods under Python 3
# after #561. (Only methods whose behavior differs between Python 2 and 3
# are tested here; see special_methods_T561.pyx for the rest of the tests.)
@@ -8,32 +8,9 @@ __doc__ = u"""
>>> vs0 = VerySpecial(0)
VS __init__ 0
- >>> # Python 3 does not use __cmp__, so any provided __cmp__ method is
- >>> # discarded under Python 3.
- >>> vs0_cmp = vs0.__cmp__ # doctest: +ELLIPSIS
- Traceback (most recent call last):
- AttributeError: 'special_methods_T561_py3.VerySpecial' object has no attribute '__cmp__'...
-
- >>> # Python 3 does not use __div__ or __idiv__, so these methods are
- >>> # discarded under Python 3.
- >>> vs0_div = vs0.__div__ # doctest: +ELLIPSIS
- Traceback (most recent call last):
- AttributeError: 'special_methods_T561_py3.VerySpecial' object has no attribute '__div__'...
- >>> vs0_rdiv = vs0.__rdiv__ # doctest: +ELLIPSIS
- Traceback (most recent call last):
- AttributeError: 'special_methods_T561_py3.VerySpecial' object has no attribute '__rdiv__'...
- >>> vs0_idiv = vs0.__idiv__ # doctest: +ELLIPSIS
- Traceback (most recent call last):
- AttributeError: 'special_methods_T561_py3.VerySpecial' object has no attribute '__idiv__'...
-
- >>> # Python 3 does not use __oct__ or __hex__, so these methods are
- >>> # discarded under Python 3.
- >>> vs0_oct = vs0.__oct__ # doctest: +ELLIPSIS
- Traceback (most recent call last):
- AttributeError: 'special_methods_T561_py3.VerySpecial' object has no attribute '__oct__'...
- >>> vs0_hex = vs0.__hex__ # doctest: +ELLIPSIS
- Traceback (most recent call last):
- AttributeError: 'special_methods_T561_py3.VerySpecial' object has no attribute '__hex__'...
+ >>> # Python 3 does not use __cmp__, __div__, __idiv__, __oct__ or __hex__;
+ >>> # These methods have no special behaviour and aren't tested beyond that
+ >>> # they don't break compilation.
>>> # Python 3 does not use __long__; if you define __long__ but not
>>> # __int__, the __long__ definition will be used for __int__.
@@ -79,3 +56,8 @@ cdef class VerySpecial:
cdef class Long:
def __long__(self):
print "Long __long__"
+
+_WARNINGS = """
+38:4: __nonzero__ was removed in Python 3; use __bool__ instead
+57:4: __long__ was removed in Python 3; use __int__ instead
+"""
diff --git a/tests/run/strmethods.pyx b/tests/run/strmethods.pyx
index 58d7a7801ef..77730929cf8 100644
--- a/tests/run/strmethods.pyx
+++ b/tests/run/strmethods.pyx
@@ -1,5 +1,15 @@
+# mode: run
+
+# cython: language_level=3
+
cimport cython
+# Also used by the language_level=2 tests in "strmethods_ll2.pyx"
+assert cython.typeof(1 / 2) in ('long', 'double')
+IS_LANGUAGE_LEVEL_3 = cython.typeof(1 / 2) == 'double'
+str_type = "unicode object" if IS_LANGUAGE_LEVEL_3 else "str object"
+
+
@cython.test_assert_path_exists(
"//PythonCapiCallNode")
def str_startswith(str s, sub, start=None, stop=None):
@@ -75,33 +85,31 @@ def str_as_name(str):
return str.endswith("x")
-@cython.test_assert_path_exists(
- "//SimpleCallNode",
- "//SimpleCallNode//NoneCheckNode",
- "//SimpleCallNode//AttributeNode[@is_py_attr = false]")
+#@cython.test_fail_if_path_exists(
+# "//SimpleCallNode",
+# "//SimpleCallNode//NoneCheckNode",
+# "//SimpleCallNode//AttributeNode[@is_py_attr = false]")
def str_join(str s, args):
"""
>>> print(str_join('a', list('bbb')))
babab
"""
result = s.join(args)
- assert cython.typeof(result) == 'basestring object', cython.typeof(result)
+ assert cython.typeof(result) == str_type, (cython.typeof(result), str_type)
return result
-@cython.test_fail_if_path_exists(
- "//SimpleCallNode//NoneCheckNode",
-)
-@cython.test_assert_path_exists(
- "//SimpleCallNode",
- "//SimpleCallNode//AttributeNode[@is_py_attr = false]")
+#@cython.test_fail_if_path_exists(
+# "//SimpleCallNode",
+# "//SimpleCallNode//NoneCheckNode",
+# "//SimpleCallNode//AttributeNode[@is_py_attr = false]")
def literal_join(args):
"""
>>> print(literal_join(list('abcdefg')))
a|b|c|d|e|f|g
"""
result = '|'.join(args)
- assert cython.typeof(result) == 'basestring object', cython.typeof(result)
+ assert cython.typeof(result) == str_type, (cython.typeof(result), str_type)
return result
@@ -125,7 +133,7 @@ def mod_format(str s, values):
>>> mod_format(None, RMod())
123
"""
- assert cython.typeof(s % values) == 'basestring object', cython.typeof(s % values)
+ assert cython.typeof(s % values) == "Python object", cython.typeof(s % values)
return s % values
@@ -138,7 +146,7 @@ def mod_format_literal(values):
>>> mod_format_literal(['sa']) == "abc['sa']def" or mod_format(format1, ['sa'])
True
"""
- assert cython.typeof('abc%sdef' % values) == 'basestring object', cython.typeof('abc%sdef' % values)
+ assert cython.typeof('abc%sdef' % values) == str_type, (cython.typeof('abc%sdef' % values), str_type)
return 'abc%sdef' % values
@@ -150,5 +158,5 @@ def mod_format_tuple(*values):
Traceback (most recent call last):
TypeError: not enough arguments for format string
"""
- assert cython.typeof('abc%sdef' % values) == 'basestring object', cython.typeof('abc%sdef' % values)
+ assert cython.typeof('abc%sdef' % values) == str_type, (cython.typeof('abc%sdef' % values), str_type)
return 'abc%sdef' % values
diff --git a/tests/run/strmethods_ll2.pyx b/tests/run/strmethods_ll2.pyx
new file mode 100644
index 00000000000..63faae059a5
--- /dev/null
+++ b/tests/run/strmethods_ll2.pyx
@@ -0,0 +1,9 @@
+# mode: run
+
+# cython: language_level=2
+
+"""
+Same tests as 'strmethods.pyx', but using 'language_level=2'.
+"""
+
+include "strmethods.pyx"
diff --git a/tests/run/type_inference.pyx b/tests/run/type_inference.pyx
index feb18817e6f..595ef1297be 100644
--- a/tests/run/type_inference.pyx
+++ b/tests/run/type_inference.pyx
@@ -270,6 +270,35 @@ def builtin_type_methods():
append(1)
assert l == [1], str(l)
+ u = u'abc def'
+ split = u.split()
+ assert typeof(split) == 'list object', typeof(split)
+
+ str_result1 = u.upper()
+ assert typeof(str_result1) == 'unicode object', typeof(str_result1)
+ str_result2 = u.upper().lower()
+ assert typeof(str_result2) == 'unicode object', typeof(str_result2)
+ str_result3 = u.upper().lower().strip()
+ assert typeof(str_result3) == 'unicode object', typeof(str_result3)
+ str_result4 = u.upper().lower().strip().lstrip()
+ assert typeof(str_result4) == 'unicode object', typeof(str_result4)
+ str_result5 = u.upper().lower().strip().lstrip().rstrip()
+ assert typeof(str_result5) == 'unicode object', typeof(str_result5)
+ str_result6 = u.upper().lower().strip().lstrip().rstrip().center(20)
+ assert typeof(str_result6) == 'unicode object', typeof(str_result6)
+ str_result7 = u.upper().lower().strip().lstrip().rstrip().center(20).format()
+ assert typeof(str_result7) == 'unicode object', typeof(str_result7)
+ str_result8 = u.upper().lower().strip().lstrip().rstrip().center(20).format().expandtabs(4)
+ assert typeof(str_result8) == 'unicode object', typeof(str_result8)
+ str_result9 = u.upper().lower().strip().lstrip().rstrip().center(20).format().expandtabs(4).swapcase()
+ assert typeof(str_result9) == 'unicode object', typeof(str_result9)
+
+ predicate1 = u.isupper()
+ assert typeof(predicate1) == 'bint', typeof(predicate1)
+ predicate2 = u.istitle()
+ assert typeof(predicate2) == 'bint', typeof(predicate2)
+
+
cdef int cfunc(int x):
return x+1
@@ -544,9 +573,8 @@ def safe_only():
div_res = pyint_val / 7
assert typeof(div_res) == ("double" if IS_LANGUAGE_LEVEL_3 else "Python object"), typeof(div_res)
- # we special-case inference to type str
s = "abc"
- assert typeof(s) == ("unicode object" if IS_LANGUAGE_LEVEL_3 else "Python object"), (typeof(s), str_type)
+ assert typeof(s) == str_type, (typeof(s), str_type)
cdef str t = "def"
assert typeof(t) == str_type, (typeof(t), str_type)
diff --git a/tests/run/unicode_identifiers.pyx b/tests/run/unicode_identifiers.pyx
index c33f9cb6ecf..dd5d23e39f0 100644
--- a/tests/run/unicode_identifiers.pyx
+++ b/tests/run/unicode_identifiers.pyx
@@ -233,3 +233,31 @@ cdef class NormalizeAttrCdef:
self.fi = 5
def get(self):
return self.fi
+
+
+ctypedef long äntägär
+
+def use_typedef(x: äntägär):
+ """
+ >>> use_typedef(5)
+ 10
+ """
+ cdef äntägär i = x
+ return i + x
+
+
+ctypedef fused nümbärs:
+ float
+ äntägär
+
+
+def use_fused_typedef(x: nümbärs):
+ """
+ >>> use_fused_typedef(4)
+ 8
+ >>> use_fused_typedef(4.5)
+ 9.0
+ """
+ cdef nümbärs i = x
+ assert cython.typeof(i) in ('float', 'äntägär'), cython.typeof(i)
+ return i + x
diff --git a/tests/run/unicode_imports.srctree b/tests/run/unicode_imports.srctree
index 41306fe3ab7..f7048d6d4e3 100644
--- a/tests/run/unicode_imports.srctree
+++ b/tests/run/unicode_imports.srctree
@@ -22,7 +22,7 @@ from __future__ import unicode_literals
from Cython.Build import cythonize
-files = ["mymoð.pyx", "from_cy.pyx"]
+files = ["mymoð.pyx", "from_cy.pyx", "测试.py" ]
modules = cythonize(files)
@@ -46,6 +46,11 @@ cdef struct S:
int x
cdef public api void cdef_func() # just to test generation of headers
+
+############ 测试.py #############
+
+def g():
+ return 1
############ from_py.py #########
@@ -53,12 +58,15 @@ cdef public api void cdef_func() # just to test generation of headers
import mymoð
from mymoð import f
+import 测试
__doc__ = """
>>> mymoð.f()
True
>>> f()
True
+>>> 测试.g()
+1
"""
######### from_cy.pyx ##########
@@ -68,6 +76,7 @@ True
import mymoð
from mymoð import f
+import 测试
cimport pxd_moð
from pxd_moð cimport S
@@ -79,6 +88,14 @@ def test_imported():
True
"""
return mymoð.f() and f() # True and True
+
+
+def test_imported2():
+ """
+ >>> test_imported2()
+ 1
+ """
+ return 测试.g()
def test_cimported():