virtuelle Umgebungen teil20 und teil20a

This commit is contained in:
2023-09-01 15:57:01 +02:00
parent 69da4cda15
commit c9aee44812
8197 changed files with 1603063 additions and 0 deletions

View File

@@ -0,0 +1,133 @@
# This file is generated by numpy's setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
os.add_dll_directory(extra_dll_dir)
blas_armpl_info={}
blas_mkl_info={}
blas_ssl2_info={}
blis_info={}
openblas_info={}
accelerate_info={}
atlas_3_10_blas_threads_info={}
atlas_3_10_blas_info={}
atlas_blas_threads_info={}
atlas_blas_info={'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('NO_ATLAS_INFO', -1)], 'libraries': ['f77blas', 'cblas', 'atlas', 'f77blas', 'cblas'], 'library_dirs': ['/usr/lib/arm-linux-gnueabihf']}
blas_opt_info={'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('NO_ATLAS_INFO', -1)], 'libraries': ['f77blas', 'cblas', 'atlas', 'f77blas', 'cblas'], 'library_dirs': ['/usr/lib/arm-linux-gnueabihf']}
lapack_armpl_info={}
lapack_mkl_info={}
lapack_ssl2_info={}
openblas_lapack_info={}
openblas_clapack_info={}
flame_info={}
atlas_3_10_threads_info={}
atlas_3_10_info={}
atlas_threads_info={}
atlas_info={'language': 'f77', 'libraries': ['lapack', 'f77blas', 'cblas', 'atlas', 'f77blas', 'cblas'], 'library_dirs': ['/usr/lib/arm-linux-gnueabihf'], 'define_macros': [('NO_ATLAS_INFO', -1)]}
lapack_opt_info={'language': 'f77', 'libraries': ['lapack', 'f77blas', 'cblas', 'atlas', 'f77blas', 'cblas'], 'library_dirs': ['/usr/lib/arm-linux-gnueabihf'], 'define_macros': [('NO_ATLAS_INFO', -1)]}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
"""
Show libraries in the system on which NumPy was built.
Print information about various resources (libraries, library
directories, include directories, etc.) in the system on which
NumPy was built.
See Also
--------
get_include : Returns the directory containing NumPy C
header files.
Notes
-----
1. Classes specifying the information to be printed are defined
in the `numpy.distutils.system_info` module.
Information may include:
* ``language``: language used to write the libraries (mostly
C or f77)
* ``libraries``: names of libraries found in the system
* ``library_dirs``: directories containing the libraries
* ``include_dirs``: directories containing library header files
* ``src_dirs``: directories containing library source files
* ``define_macros``: preprocessor macros used by
``distutils.setup``
* ``baseline``: minimum CPU features required
* ``found``: dispatched features supported in the system
* ``not found``: dispatched features that are not supported
in the system
2. NumPy BLAS/LAPACK Installation Notes
Installing a numpy wheel (``pip install numpy`` or force it
via ``pip install numpy --only-binary :numpy: numpy``) includes
an OpenBLAS implementation of the BLAS and LAPACK linear algebra
APIs. In this case, ``library_dirs`` reports the original build
time configuration as compiled with gcc/gfortran; at run time
the OpenBLAS library is in
``site-packages/numpy.libs/`` (linux), or
``site-packages/numpy/.dylibs/`` (macOS), or
``site-packages/numpy/.libs/`` (windows).
Installing numpy from source
(``pip install numpy --no-binary numpy``) searches for BLAS and
LAPACK dynamic link libraries at build time as influenced by
environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and
NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER;
or the optional file ``~/.numpy-site.cfg``.
NumPy remembers those locations and expects to load the same
libraries at run-time.
In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS
library) is in the default build-time search order after
'openblas'.
Examples
--------
>>> import numpy as np
>>> np.show_config()
blas_opt_info:
language = c
define_macros = [('HAVE_CBLAS', None)]
libraries = ['openblas', 'openblas']
library_dirs = ['/usr/local/lib']
"""
from numpy.core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
features_found, features_not_found = [], []
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
features_found.append(feature)
else:
features_not_found.append(feature)
print("Supported SIMD extensions in this NumPy install:")
print(" baseline = %s" % (','.join(__cpu_baseline__)))
print(" found = %s" % (','.join(features_found)))
print(" not found = %s" % (','.join(features_not_found)))

View File

@@ -0,0 +1,64 @@
"""
An enhanced distutils, providing support for Fortran compilers, for BLAS,
LAPACK and other common libraries for numerical computing, and more.
Public submodules are::
misc_util
system_info
cpu_info
log
exec_command
For details, please see the *Packaging* and *NumPy Distutils User Guide*
sections of the NumPy Reference Guide.
For configuring the preference for and location of libraries like BLAS and
LAPACK, and for setting include paths and similar build options, please see
``site.cfg.example`` in the root of the NumPy repository or sdist.
"""
import warnings
# Must import local ccompiler ASAP in order to get
# customized CCompiler.spawn effective.
from . import ccompiler
from . import unixccompiler
from .npy_pkg_config import *
warnings.warn("\n\n"
" `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n"
" of the deprecation of `distutils` itself. It will be removed for\n"
" Python >= 3.12. For older Python versions it will remain present.\n"
" It is recommended to use `setuptools < 60.0` for those Python versions.\n"
" For more details, see:\n"
" https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n",
DeprecationWarning, stacklevel=2
)
del warnings
# If numpy is installed, add distutils.test()
try:
from . import __config__
# Normally numpy is installed if the above import works, but an interrupted
# in-place build could also have left a __config__.py. In that case the
# next import may still fail, so keep it inside the try block.
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
except ImportError:
pass
def customized_fcompiler(plat=None, compiler=None):
from numpy.distutils.fcompiler import new_fcompiler
c = new_fcompiler(plat=plat, compiler=compiler)
c.customize()
return c
def customized_ccompiler(plat=None, compiler=None, verbose=1):
c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose)
c.customize('')
return c

View File

@@ -0,0 +1,4 @@
from typing import Any
# TODO: remove when the full numpy namespace is defined
def __getattr__(name: str) -> Any: ...

View File

@@ -0,0 +1,91 @@
"""
Helper functions for interacting with the shell, and consuming shell-style
parameters provided in config files.
"""
import os
import shlex
import subprocess
try:
from shlex import quote
except ImportError:
from pipes import quote
__all__ = ['WindowsParser', 'PosixParser', 'NativeParser']
class CommandLineParser:
"""
An object that knows how to split and join command-line arguments.
It must be true that ``argv == split(join(argv))`` for all ``argv``.
The reverse neednt be true - `join(split(cmd))` may result in the addition
or removal of unnecessary escaping.
"""
@staticmethod
def join(argv):
""" Join a list of arguments into a command line string """
raise NotImplementedError
@staticmethod
def split(cmd):
""" Split a command line string into a list of arguments """
raise NotImplementedError
class WindowsParser:
"""
The parsing behavior used by `subprocess.call("string")` on Windows, which
matches the Microsoft C/C++ runtime.
Note that this is _not_ the behavior of cmd.
"""
@staticmethod
def join(argv):
# note that list2cmdline is specific to the windows syntax
return subprocess.list2cmdline(argv)
@staticmethod
def split(cmd):
import ctypes # guarded import for systems without ctypes
try:
ctypes.windll
except AttributeError:
raise NotImplementedError
# Windows has special parsing rules for the executable (no quotes),
# that we do not care about - insert a dummy element
if not cmd:
return []
cmd = 'dummy ' + cmd
CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p)
CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int))
nargs = ctypes.c_int()
lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs))
args = [lpargs[i] for i in range(nargs.value)]
assert not ctypes.windll.kernel32.LocalFree(lpargs)
# strip the element we inserted
assert args[0] == "dummy"
return args[1:]
class PosixParser:
"""
The parsing behavior used by `subprocess.call("string", shell=True)` on Posix.
"""
@staticmethod
def join(argv):
return ' '.join(quote(arg) for arg in argv)
@staticmethod
def split(cmd):
return shlex.split(cmd, posix=True)
if os.name == 'nt':
NativeParser = WindowsParser
elif os.name == 'posix':
NativeParser = PosixParser

View File

@@ -0,0 +1,26 @@
from distutils.unixccompiler import UnixCCompiler
class ArmCCompiler(UnixCCompiler):
"""
Arm compiler.
"""
compiler_type = 'arm'
cc_exe = 'armclang'
cxx_exe = 'armclang++'
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
cc_compiler = self.cc_exe
cxx_compiler = self.cxx_exe
self.set_executables(compiler=cc_compiler +
' -O3 -fPIC',
compiler_so=cc_compiler +
' -O3 -fPIC',
compiler_cxx=cxx_compiler +
' -O3 -fPIC',
linker_exe=cc_compiler +
' -lamath',
linker_so=cc_compiler +
' -lamath -shared')

View File

@@ -0,0 +1,826 @@
import os
import re
import sys
import platform
import shlex
import time
import subprocess
from copy import copy
from pathlib import Path
from distutils import ccompiler
from distutils.ccompiler import (
compiler_class, gen_lib_options, get_default_compiler, new_compiler,
CCompiler
)
from distutils.errors import (
DistutilsExecError, DistutilsModuleError, DistutilsPlatformError,
CompileError, UnknownFileError
)
from distutils.sysconfig import customize_compiler
from distutils.version import LooseVersion
from numpy.distutils import log
from numpy.distutils.exec_command import (
filepath_from_subprocess_output, forward_bytes_to_stdout
)
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
get_num_build_jobs, \
_commandline_dep_string, \
sanitize_cxx_flags
# globals for parallel build management
import threading
_job_semaphore = None
_global_lock = threading.Lock()
_processing_files = set()
def _needs_build(obj, cc_args, extra_postargs, pp_opts):
"""
Check if an objects needs to be rebuild based on its dependencies
Parameters
----------
obj : str
object file
Returns
-------
bool
"""
# defined in unixcompiler.py
dep_file = obj + '.d'
if not os.path.exists(dep_file):
return True
# dep_file is a makefile containing 'object: dependencies'
# formatted like posix shell (spaces escaped, \ line continuations)
# the last line contains the compiler commandline arguments as some
# projects may compile an extension multiple times with different
# arguments
with open(dep_file) as f:
lines = f.readlines()
cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts)
last_cmdline = lines[-1]
if last_cmdline != cmdline:
return True
contents = ''.join(lines[:-1])
deps = [x for x in shlex.split(contents, posix=True)
if x != "\n" and not x.endswith(":")]
try:
t_obj = os.stat(obj).st_mtime
# check if any of the dependencies is newer than the object
# the dependencies includes the source used to create the object
for f in deps:
if os.stat(f).st_mtime > t_obj:
return True
except OSError:
# no object counts as newer (shouldn't happen if dep_file exists)
return True
return False
def replace_method(klass, method_name, func):
# Py3k does not have unbound method anymore, MethodType does not work
m = lambda self, *args, **kw: func(self, *args, **kw)
setattr(klass, method_name, m)
######################################################################
## Method that subclasses may redefine. But don't call this method,
## it i private to CCompiler class and may return unexpected
## results if used elsewhere. So, you have been warned..
def CCompiler_find_executables(self):
"""
Does nothing here, but is called by the get_version method and can be
overridden by subclasses. In particular it is redefined in the `FCompiler`
class where more documentation can be found.
"""
pass
replace_method(CCompiler, 'find_executables', CCompiler_find_executables)
# Using customized CCompiler.spawn.
def CCompiler_spawn(self, cmd, display=None, env=None):
"""
Execute a command in a sub-process.
Parameters
----------
cmd : str
The command to execute.
display : str or sequence of str, optional
The text to add to the log file kept by `numpy.distutils`.
If not given, `display` is equal to `cmd`.
env : a dictionary for environment variables, optional
Returns
-------
None
Raises
------
DistutilsExecError
If the command failed, i.e. the exit status was not 0.
"""
env = env if env is not None else dict(os.environ)
if display is None:
display = cmd
if is_sequence(display):
display = ' '.join(list(display))
log.info(display)
try:
if self.verbose:
subprocess.check_output(cmd, env=env)
else:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
except subprocess.CalledProcessError as exc:
o = exc.output
s = exc.returncode
except OSError as e:
# OSError doesn't have the same hooks for the exception
# output, but exec_command() historically would use an
# empty string for EnvironmentError (base class for
# OSError)
# o = b''
# still that would make the end-user lost in translation!
o = f"\n\n{e}\n\n\n"
try:
o = o.encode(sys.stdout.encoding)
except AttributeError:
o = o.encode('utf8')
# status previously used by exec_command() for parent
# of OSError
s = 127
else:
# use a convenience return here so that any kind of
# caught exception will execute the default code after the
# try / except block, which handles various exceptions
return None
if is_sequence(cmd):
cmd = ' '.join(list(cmd))
if self.verbose:
forward_bytes_to_stdout(o)
if re.search(b'Too many open files', o):
msg = '\nTry rerunning setup command until build succeeds.'
else:
msg = ''
raise DistutilsExecError('Command "%s" failed with exit status %d%s' %
(cmd, s, msg))
replace_method(CCompiler, 'spawn', CCompiler_spawn)
def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
"""
Return the name of the object files for the given source files.
Parameters
----------
source_filenames : list of str
The list of paths to source files. Paths can be either relative or
absolute, this is handled transparently.
strip_dir : bool, optional
Whether to strip the directory from the returned paths. If True,
the file name prepended by `output_dir` is returned. Default is False.
output_dir : str, optional
If given, this path is prepended to the returned paths to the
object files.
Returns
-------
obj_names : list of str
The list of paths to the object files corresponding to the source
files in `source_filenames`.
"""
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(os.path.normpath(src_name))
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if base.startswith('..'):
# Resolve starting relative path components, middle ones
# (if any) have been handled by os.path.normpath above.
i = base.rfind('..')+2
d = base[:i]
d = os.path.basename(os.path.abspath(d))
base = d + base[i:]
if ext not in self.src_extensions:
raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name))
if strip_dir:
base = os.path.basename(base)
obj_name = os.path.join(output_dir, base + self.obj_extension)
obj_names.append(obj_name)
return obj_names
replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)
def CCompiler_compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""
Compile one or more source files.
Please refer to the Python distutils API reference for more details.
Parameters
----------
sources : list of str
A list of filenames
output_dir : str, optional
Path to the output directory.
macros : list of tuples
A list of macro definitions.
include_dirs : list of str, optional
The directories to add to the default include file search path for
this compilation only.
debug : bool, optional
Whether or not to output debug symbols in or alongside the object
file(s).
extra_preargs, extra_postargs : ?
Extra pre- and post-arguments.
depends : list of str, optional
A list of file names that all targets depend on.
Returns
-------
objects : list of str
A list of object file names, one per source file `sources`.
Raises
------
CompileError
If compilation fails.
"""
global _job_semaphore
jobs = get_num_build_jobs()
# setup semaphore to not exceed number of compile jobs when parallelized at
# extension level (python >= 3.5)
with _global_lock:
if _job_semaphore is None:
_job_semaphore = threading.Semaphore(jobs)
if not sources:
return []
from numpy.distutils.fcompiler import (FCompiler,
FORTRAN_COMMON_FIXED_EXTENSIONS,
has_f90_header)
if isinstance(self, FCompiler):
display = []
for fc in ['f77', 'f90', 'fix']:
fcomp = getattr(self, 'compiler_'+fc)
if fcomp is None:
continue
display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp)))
display = '\n'.join(display)
else:
ccomp = self.compiler_so
display = "C compiler: %s\n" % (' '.join(ccomp),)
log.info(display)
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
display = "compile options: '%s'" % (' '.join(cc_args))
if extra_postargs:
display += "\nextra options: '%s'" % (' '.join(extra_postargs))
log.info(display)
def single_compile(args):
obj, (src, ext) = args
if not _needs_build(obj, cc_args, extra_postargs, pp_opts):
return
# check if we are currently already processing the same object
# happens when using the same source in multiple extensions
while True:
# need explicit lock as there is no atomic check and add with GIL
with _global_lock:
# file not being worked on, start working
if obj not in _processing_files:
_processing_files.add(obj)
break
# wait for the processing to end
time.sleep(0.1)
try:
# retrieve slot from our #job semaphore and build
with _job_semaphore:
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
finally:
# register being done processing
with _global_lock:
_processing_files.remove(obj)
if isinstance(self, FCompiler):
objects_to_build = list(build.keys())
f77_objects, other_objects = [], []
for obj in objects:
if obj in objects_to_build:
src, ext = build[obj]
if self.compiler_type=='absoft':
obj = cyg2win32(obj)
src = cyg2win32(src)
if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \
and not has_f90_header(src):
f77_objects.append((obj, (src, ext)))
else:
other_objects.append((obj, (src, ext)))
# f77 objects can be built in parallel
build_items = f77_objects
# build f90 modules serial, module files are generated during
# compilation and may be used by files later in the list so the
# ordering is important
for o in other_objects:
single_compile(o)
else:
build_items = build.items()
if len(build) > 1 and jobs > 1:
# build parallel
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(jobs) as pool:
res = pool.map(single_compile, build_items)
list(res) # access result to raise errors
else:
# build serial
for o in build_items:
single_compile(o)
# Return *all* object filenames, not just the ones we just built.
return objects
replace_method(CCompiler, 'compile', CCompiler_compile)
def CCompiler_customize_cmd(self, cmd, ignore=()):
"""
Customize compiler using distutils command.
Parameters
----------
cmd : class instance
An instance inheriting from `distutils.cmd.Command`.
ignore : sequence of str, optional
List of `CCompiler` commands (without ``'set_'``) that should not be
altered. Strings that are checked for are:
``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',
'rpath', 'link_objects')``.
Returns
-------
None
"""
log.info('customize %s using %s' % (self.__class__.__name__,
cmd.__class__.__name__))
if (
hasattr(self, 'compiler') and
'clang' in self.compiler[0] and
not (platform.machine() == 'arm64' and sys.platform == 'darwin')
):
# clang defaults to a non-strict floating error point model.
# However, '-ftrapping-math' is not currently supported (2023-04-08)
# for macosx_arm64.
# Since NumPy and most Python libs give warnings for these, override:
self.compiler.append('-ftrapping-math')
self.compiler_so.append('-ftrapping-math')
def allow(attr):
return getattr(cmd, attr, None) is not None and attr not in ignore
if allow('include_dirs'):
self.set_include_dirs(cmd.include_dirs)
if allow('define'):
for (name, value) in cmd.define:
self.define_macro(name, value)
if allow('undef'):
for macro in cmd.undef:
self.undefine_macro(macro)
if allow('libraries'):
self.set_libraries(self.libraries + cmd.libraries)
if allow('library_dirs'):
self.set_library_dirs(self.library_dirs + cmd.library_dirs)
if allow('rpath'):
self.set_runtime_library_dirs(cmd.rpath)
if allow('link_objects'):
self.set_link_objects(cmd.link_objects)
replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)
def _compiler_to_string(compiler):
props = []
mx = 0
keys = list(compiler.executables.keys())
for key in ['version', 'libraries', 'library_dirs',
'object_switch', 'compile_switch',
'include_dirs', 'define', 'undef', 'rpath', 'link_objects']:
if key not in keys:
keys.append(key)
for key in keys:
if hasattr(compiler, key):
v = getattr(compiler, key)
mx = max(mx, len(key))
props.append((key, repr(v)))
fmt = '%-' + repr(mx+1) + 's = %s'
lines = [fmt % prop for prop in props]
return '\n'.join(lines)
def CCompiler_show_customization(self):
"""
Print the compiler customizations to stdout.
Parameters
----------
None
Returns
-------
None
Notes
-----
Printing is only done if the distutils log threshold is < 2.
"""
try:
self.get_version()
except Exception:
pass
if log._global_log.threshold<2:
print('*'*80)
print(self.__class__)
print(_compiler_to_string(self))
print('*'*80)
replace_method(CCompiler, 'show_customization', CCompiler_show_customization)
def CCompiler_customize(self, dist, need_cxx=0):
"""
Do any platform-specific customization of a compiler instance.
This method calls `distutils.sysconfig.customize_compiler` for
platform-specific customization, as well as optionally remove a flag
to suppress spurious warnings in case C++ code is being compiled.
Parameters
----------
dist : object
This parameter is not used for anything.
need_cxx : bool, optional
Whether or not C++ has to be compiled. If so (True), the
``"-Wstrict-prototypes"`` option is removed to prevent spurious
warnings. Default is False.
Returns
-------
None
Notes
-----
All the default options used by distutils can be extracted with::
from distutils import sysconfig
sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
'CCSHARED', 'LDSHARED', 'SO')
"""
# See FCompiler.customize for suggested usage.
log.info('customize %s' % (self.__class__.__name__))
customize_compiler(self)
if need_cxx:
# In general, distutils uses -Wstrict-prototypes, but this option is
# not valid for C++ code, only for C. Remove it if it's there to
# avoid a spurious warning on every compilation.
try:
self.compiler_so.remove('-Wstrict-prototypes')
except (AttributeError, ValueError):
pass
if hasattr(self, 'compiler') and 'cc' in self.compiler[0]:
if not self.compiler_cxx:
if self.compiler[0].startswith('gcc'):
a, b = 'gcc', 'g++'
else:
a, b = 'cc', 'c++'
self.compiler_cxx = [self.compiler[0].replace(a, b)]\
+ self.compiler[1:]
else:
if hasattr(self, 'compiler'):
log.warn("#### %s #######" % (self.compiler,))
if not hasattr(self, 'compiler_cxx'):
log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__)
# check if compiler supports gcc style automatic dependencies
# run on every extension so skip for known good compilers
if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or
'g++' in self.compiler[0] or
'clang' in self.compiler[0]):
self._auto_depends = True
elif os.name == 'posix':
import tempfile
import shutil
tmpdir = tempfile.mkdtemp()
try:
fn = os.path.join(tmpdir, "file.c")
with open(fn, "w") as f:
f.write("int a;\n")
self.compile([fn], output_dir=tmpdir,
extra_preargs=['-MMD', '-MF', fn + '.d'])
self._auto_depends = True
except CompileError:
self._auto_depends = False
finally:
shutil.rmtree(tmpdir)
return
replace_method(CCompiler, 'customize', CCompiler_customize)
def simple_version_match(pat=r'[-.\d]+', ignore='', start=''):
"""
Simple matching of version numbers, for use in CCompiler and FCompiler.
Parameters
----------
pat : str, optional
A regular expression matching version numbers.
Default is ``r'[-.\\d]+'``.
ignore : str, optional
A regular expression matching patterns to skip.
Default is ``''``, in which case nothing is skipped.
start : str, optional
A regular expression matching the start of where to start looking
for version numbers.
Default is ``''``, in which case searching is started at the
beginning of the version string given to `matcher`.
Returns
-------
matcher : callable
A function that is appropriate to use as the ``.version_match``
attribute of a `CCompiler` class. `matcher` takes a single parameter,
a version string.
"""
def matcher(self, version_string):
# version string may appear in the second line, so getting rid
# of new lines:
version_string = version_string.replace('\n', ' ')
pos = 0
if start:
m = re.match(start, version_string)
if not m:
return None
pos = m.end()
while True:
m = re.search(pat, version_string[pos:])
if not m:
return None
if ignore and re.match(ignore, m.group(0)):
pos = m.end()
continue
break
return m.group(0)
return matcher
def CCompiler_get_version(self, force=False, ok_status=[0]):
"""
Return compiler version, or None if compiler is not available.
Parameters
----------
force : bool, optional
If True, force a new determination of the version, even if the
compiler already has a version attribute. Default is False.
ok_status : list of int, optional
The list of status values returned by the version look-up process
for which a version string is returned. If the status value is not
in `ok_status`, None is returned. Default is ``[0]``.
Returns
-------
version : str or None
Version string, in the format of `distutils.version.LooseVersion`.
"""
if not force and hasattr(self, 'version'):
return self.version
self.find_executables()
try:
version_cmd = self.version_cmd
except AttributeError:
return None
if not version_cmd or not version_cmd[0]:
return None
try:
matcher = self.version_match
except AttributeError:
try:
pat = self.version_pattern
except AttributeError:
return None
def matcher(version_string):
m = re.match(pat, version_string)
if not m:
return None
version = m.group('version')
return version
try:
output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
output = exc.output
status = exc.returncode
except OSError:
# match the historical returns for a parent
# exception class caught by exec_command()
status = 127
output = b''
else:
# output isn't actually a filepath but we do this
# for now to match previous distutils behavior
output = filepath_from_subprocess_output(output)
status = 0
version = None
if status in ok_status:
version = matcher(output)
if version:
version = LooseVersion(version)
self.version = version
return version
replace_method(CCompiler, 'get_version', CCompiler_get_version)
def CCompiler_cxx_compiler(self):
"""
Return the C++ compiler.
Parameters
----------
None
Returns
-------
cxx : class instance
The C++ compiler, as a `CCompiler` instance.
"""
if self.compiler_type in ('msvc', 'intelw', 'intelemw'):
return self
cxx = copy(self)
cxx.compiler_cxx = cxx.compiler_cxx
cxx.compiler_so = [cxx.compiler_cxx[0]] + \
sanitize_cxx_flags(cxx.compiler_so[1:])
if (sys.platform.startswith(('aix', 'os400')) and
'ld_so_aix' in cxx.linker_so[0]):
# AIX needs the ld_so_aix script included with Python
cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \
+ cxx.linker_so[2:]
if sys.platform.startswith('os400'):
#This is required by i 7.4 and prievous for PRId64 in printf() call.
cxx.compiler_so.append('-D__STDC_FORMAT_MACROS')
#This a bug of gcc10.3, which failed to handle the TLS init.
cxx.compiler_so.append('-fno-extern-tls-init')
cxx.linker_so.append('-fno-extern-tls-init')
else:
cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]
return cxx
replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)
compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler',
"Intel C Compiler for 32-bit applications")
compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler',
"Intel C Itanium Compiler for Itanium-based applications")
compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',
"Intel C Compiler for 64-bit applications")
compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW',
"Intel C Compiler for 32-bit applications on Windows")
compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW',
"Intel C Compiler for 64-bit applications on Windows")
compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',
"PathScale Compiler for SiCortex-based applications")
compiler_class['arm'] = ('armccompiler', 'ArmCCompiler',
"Arm C Compiler")
compiler_class['fujitsu'] = ('fujitsuccompiler', 'FujitsuCCompiler',
"Fujitsu C Compiler")
ccompiler._default_compilers += (('linux.*', 'intel'),
('linux.*', 'intele'),
('linux.*', 'intelem'),
('linux.*', 'pathcc'),
('nt', 'intelw'),
('nt', 'intelemw'))
if sys.platform == 'win32':
compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"\
"(for MSC built Python)")
if mingw32():
# On windows platforms, we want to default to mingw32 (gcc)
# because msvc can't build blitz stuff.
log.info('Setting mingw32 as default compiler for nt.')
ccompiler._default_compilers = (('nt', 'mingw32'),) \
+ ccompiler._default_compilers
_distutils_new_compiler = new_compiler
def new_compiler (plat=None,
compiler=None,
verbose=None,
dry_run=0,
force=0):
# Try first C compilers from numpy.distutils.
if verbose is None:
verbose = log.get_threshold() <= log.INFO
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError(msg)
module_name = "numpy.distutils." + module_name
try:
__import__ (module_name)
except ImportError as e:
msg = str(e)
log.info('%s in numpy.distutils; trying from distutils',
str(msg))
module_name = module_name[6:]
try:
__import__(module_name)
except ImportError as e:
msg = str(e)
raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \
module_name)
try:
module = sys.modules[module_name]
klass = vars(module)[class_name]
except KeyError:
raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name))
compiler = klass(None, dry_run, force)
compiler.verbose = verbose
log.debug('new_compiler returns %s' % (klass))
return compiler
ccompiler.new_compiler = new_compiler
_distutils_gen_lib_options = gen_lib_options
def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
# the version of this function provided by CPython allows the following
# to return lists, which are unpacked automatically:
# - compiler.runtime_library_dir_option
# our version extends the behavior to:
# - compiler.library_dir_option
# - compiler.library_option
# - compiler.find_library_file
r = _distutils_gen_lib_options(compiler, library_dirs,
runtime_library_dirs, libraries)
lib_opts = []
for i in r:
if is_sequence(i):
lib_opts.extend(list(i))
else:
lib_opts.append(i)
return lib_opts
ccompiler.gen_lib_options = gen_lib_options
# Also fix up the various compiler modules, which do
# from distutils.ccompiler import gen_lib_options
# Don't bother with mwerks, as we don't support Classic Mac.
for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
_m = sys.modules.get('distutils.' + _cc + 'compiler')
if _m is not None:
setattr(_m, 'gen_lib_options', gen_lib_options)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,27 @@
#ifdef _MSC_VER
#include <Intrin.h>
#endif
#include <arm_neon.h>
int main(int argc, char **argv)
{
float *src = (float*)argv[argc-1];
float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
/* MAXMIN */
int ret = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0);
ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0);
/* ROUNDING */
ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0);
#ifdef __aarch64__
{
double *src2 = (double*)argv[argc-1];
float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
/* MAXMIN */
ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0);
ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0);
/* ROUNDING */
ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0);
}
#endif
return ret;
}

View File

@@ -0,0 +1,16 @@
#ifdef _MSC_VER
#include <Intrin.h>
#endif
#include <arm_neon.h>
int main(int argc, char **argv)
{
unsigned char *src = (unsigned char*)argv[argc-1];
uint8x16_t v1 = vdupq_n_u8(src[0]), v2 = vdupq_n_u8(src[1]);
uint32x4_t va = vdupq_n_u32(3);
int ret = (int)vgetq_lane_u32(vdotq_u32(va, v1, v2), 0);
#ifdef __aarch64__
ret += (int)vgetq_lane_u32(vdotq_laneq_u32(va, v1, v2, 0), 0);
#endif
return ret;
}

View File

@@ -0,0 +1,19 @@
#ifdef _MSC_VER
#include <Intrin.h>
#endif
#include <arm_neon.h>
int main(int argc, char **argv)
{
float16_t *src = (float16_t*)argv[argc-1];
float *src2 = (float*)argv[argc-2];
float16x8_t vhp = vdupq_n_f16(src[0]);
float16x4_t vlhp = vdup_n_f16(src[1]);
float32x4_t vf = vdupq_n_f32(src2[0]);
float32x2_t vlf = vdup_n_f32(src2[1]);
int ret = (int)vget_lane_f32(vfmlal_low_f16(vlf, vlhp, vlhp), 0);
ret += (int)vgetq_lane_f32(vfmlslq_high_f16(vf, vhp, vhp), 0);
return ret;
}

View File

@@ -0,0 +1,15 @@
#ifdef _MSC_VER
#include <Intrin.h>
#endif
#include <arm_neon.h>
int main(int argc, char **argv)
{
float16_t *src = (float16_t*)argv[argc-1];
float16x8_t vhp = vdupq_n_f16(src[0]);
float16x4_t vlhp = vdup_n_f16(src[1]);
int ret = (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0);
ret += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0);
return ret;
}

View File

@@ -0,0 +1,20 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#ifndef __AVX__
#error "HOST/ARCH doesn't support AVX"
#endif
#endif
#include <immintrin.h>
int main(int argc, char **argv)
{
__m256 a = _mm256_add_ps(_mm256_loadu_ps((const float*)argv[argc-1]), _mm256_loadu_ps((const float*)argv[1]));
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
}

View File

@@ -0,0 +1,20 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#ifndef __AVX2__
#error "HOST/ARCH doesn't support AVX2"
#endif
#endif
#include <immintrin.h>
int main(int argc, char **argv)
{
__m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1]));
return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
}

View File

@@ -0,0 +1,22 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#ifndef __AVX512VNNI__
#error "HOST/ARCH doesn't support CascadeLake AVX512 features"
#endif
#endif
#include <immintrin.h>
int main(int argc, char **argv)
{
/* VNNI */
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
a = _mm512_dpbusd_epi32(a, _mm512_setzero_si512(), a);
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
}

View File

@@ -0,0 +1,24 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__)
#error "HOST/ARCH doesn't support CannonLake AVX512 features"
#endif
#endif
#include <immintrin.h>
int main(int argc, char **argv)
{
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
/* IFMA */
a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512());
/* VMBI */
a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a);
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
}

View File

@@ -0,0 +1,26 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512BITALG__) || !defined(__AVX512VPOPCNTDQ__)
#error "HOST/ARCH doesn't support IceLake AVX512 features"
#endif
#endif
#include <immintrin.h>
int main(int argc, char **argv)
{
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
/* VBMI2 */
a = _mm512_shrdv_epi64(a, a, _mm512_setzero_si512());
/* BITLAG */
a = _mm512_popcnt_epi8(a);
/* VPOPCNTDQ */
a = _mm512_popcnt_epi64(a);
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
}

View File

@@ -0,0 +1,25 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#if !defined(__AVX512ER__) || !defined(__AVX512PF__)
#error "HOST/ARCH doesn't support Knights Landing AVX512 features"
#endif
#endif
#include <immintrin.h>
int main(int argc, char **argv)
{
int base[128];
__m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]);
/* ER */
__m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad));
/* PF */
_mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1);
return base[0];
}

View File

@@ -0,0 +1,30 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__)
#error "HOST/ARCH doesn't support Knights Mill AVX512 features"
#endif
#endif
#include <immintrin.h>
int main(int argc, char **argv)
{
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
__m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]);
/* 4FMAPS */
b = _mm512_4fmadd_ps(b, b, b, b, b, NULL);
/* 4VNNIW */
a = _mm512_4dpwssd_epi32(a, a, a, a, a, NULL);
/* VPOPCNTDQ */
a = _mm512_popcnt_epi64(a);
a = _mm512_add_epi32(a, _mm512_castps_si512(b));
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
}

View File

@@ -0,0 +1,26 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__)
#error "HOST/ARCH doesn't support SkyLake AVX512 features"
#endif
#endif
#include <immintrin.h>
int main(int argc, char **argv)
{
__m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
/* VL */
__m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1));
/* DQ */
__m512i b = _mm512_broadcast_i32x8(a);
/* BW */
b = _mm512_abs_epi16(b);
return _mm_cvtsi128_si32(_mm512_castsi512_si128(b));
}

View File

@@ -0,0 +1,26 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#if !defined(__AVX512FP16__)
#error "HOST/ARCH doesn't support Sapphire Rapids AVX512FP16 features"
#endif
#endif
#include <immintrin.h>
int main(int argc, char **argv)
{
/* clang has a bug regarding our spr coode, see gh-23730. */
#if __clang__
#error
#endif
__m512h a = _mm512_loadu_ph((void*)argv[argc-1]);
__m512h temp = _mm512_fmadd_ph(a, a, a);
_mm512_storeu_ph((void*)(argv[argc-1]), temp);
return 0;
}

View File

@@ -0,0 +1,20 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#ifndef __AVX512CD__
#error "HOST/ARCH doesn't support AVX512CD"
#endif
#endif
#include <immintrin.h>
int main(int argc, char **argv)
{
__m512i a = _mm512_lzcnt_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
}

View File

@@ -0,0 +1,20 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#ifndef __AVX512F__
#error "HOST/ARCH doesn't support AVX512F"
#endif
#endif
#include <immintrin.h>
int main(int argc, char **argv)
{
__m512i a = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
}

View File

@@ -0,0 +1,22 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#ifndef __F16C__
#error "HOST/ARCH doesn't support F16C"
#endif
#endif
#include <emmintrin.h>
#include <immintrin.h>
int main(int argc, char **argv)
{
__m128 a = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1]));
__m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2]));
return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8)));
}

View File

@@ -0,0 +1,22 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#if !defined(__FMA__) && !defined(__AVX2__)
#error "HOST/ARCH doesn't support FMA3"
#endif
#endif
#include <xmmintrin.h>
#include <immintrin.h>
int main(int argc, char **argv)
{
__m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
a = _mm256_fmadd_ps(a, a, a);
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
}

View File

@@ -0,0 +1,13 @@
#include <immintrin.h>
#ifdef _MSC_VER
#include <ammintrin.h>
#else
#include <x86intrin.h>
#endif
int main(int argc, char **argv)
{
__m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
a = _mm256_macc_ps(a, a, a);
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
}

View File

@@ -0,0 +1,19 @@
#ifdef _MSC_VER
#include <Intrin.h>
#endif
#include <arm_neon.h>
int main(int argc, char **argv)
{
// passing from untraced pointers to avoid optimizing out any constants
// so we can test against the linker.
float *src = (float*)argv[argc-1];
float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0);
#ifdef __aarch64__
double *src2 = (double*)argv[argc-2];
float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0);
#endif
return ret;
}

View File

@@ -0,0 +1,11 @@
#ifdef _MSC_VER
#include <Intrin.h>
#endif
#include <arm_neon.h>
int main(int argc, char **argv)
{
short *src = (short*)argv[argc-1];
float32x4_t v_z4 = vcvt_f32_f16((float16x4_t)vld1_s16(src));
return (int)vgetq_lane_f32(v_z4, 0);
}

View File

@@ -0,0 +1,21 @@
#ifdef _MSC_VER
#include <Intrin.h>
#endif
#include <arm_neon.h>
int main(int argc, char **argv)
{
float *src = (float*)argv[argc-1];
float32x4_t v1 = vdupq_n_f32(src[0]);
float32x4_t v2 = vdupq_n_f32(src[1]);
float32x4_t v3 = vdupq_n_f32(src[2]);
int ret = (int)vgetq_lane_f32(vfmaq_f32(v1, v2, v3), 0);
#ifdef __aarch64__
double *src2 = (double*)argv[argc-2];
float64x2_t vd1 = vdupq_n_f64(src2[0]);
float64x2_t vd2 = vdupq_n_f64(src2[1]);
float64x2_t vd3 = vdupq_n_f64(src2[2]);
ret += (int)vgetq_lane_f64(vfmaq_f64(vd1, vd2, vd3), 0);
#endif
return ret;
}

View File

@@ -0,0 +1,32 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#if !defined(__SSE4_2__) && !defined(__POPCNT__)
#error "HOST/ARCH doesn't support POPCNT"
#endif
#endif
#ifdef _MSC_VER
#include <nmmintrin.h>
#else
#include <popcntintrin.h>
#endif
int main(int argc, char **argv)
{
// To make sure popcnt instructions are generated
// and been tested against the assembler
unsigned long long a = *((unsigned long long*)argv[argc-1]);
unsigned int b = *((unsigned int*)argv[argc-2]);
#if defined(_M_X64) || defined(__x86_64__)
a = _mm_popcnt_u64(a);
#endif
b = _mm_popcnt_u32(b);
return (int)a + b;
}

View File

@@ -0,0 +1,20 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#ifndef __SSE__
#error "HOST/ARCH doesn't support SSE"
#endif
#endif
#include <xmmintrin.h>
int main(void)
{
__m128 a = _mm_add_ps(_mm_setzero_ps(), _mm_setzero_ps());
return (int)_mm_cvtss_f32(a);
}

View File

@@ -0,0 +1,20 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#ifndef __SSE2__
#error "HOST/ARCH doesn't support SSE2"
#endif
#endif
#include <emmintrin.h>
int main(void)
{
__m128i a = _mm_add_epi16(_mm_setzero_si128(), _mm_setzero_si128());
return _mm_cvtsi128_si32(a);
}

View File

@@ -0,0 +1,20 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#ifndef __SSE3__
#error "HOST/ARCH doesn't support SSE3"
#endif
#endif
#include <pmmintrin.h>
int main(void)
{
__m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
return (int)_mm_cvtss_f32(a);
}

View File

@@ -0,0 +1,20 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#ifndef __SSE4_1__
#error "HOST/ARCH doesn't support SSE41"
#endif
#endif
#include <smmintrin.h>
int main(void)
{
__m128 a = _mm_floor_ps(_mm_setzero_ps());
return (int)_mm_cvtss_f32(a);
}

View File

@@ -0,0 +1,20 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#ifndef __SSE4_2__
#error "HOST/ARCH doesn't support SSE42"
#endif
#endif
#include <smmintrin.h>
int main(void)
{
__m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
return (int)_mm_cvtss_f32(a);
}

View File

@@ -0,0 +1,20 @@
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
/*
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
* whether or not the build options for those features are specified.
* Therefore, we must test #definitions of CPU features when option native/host
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
* the test will be broken and leads to enable all possible features.
*/
#ifndef __SSSE3__
#error "HOST/ARCH doesn't support SSSE3"
#endif
#endif
#include <tmmintrin.h>
int main(void)
{
__m128i a = _mm_hadd_epi16(_mm_setzero_si128(), _mm_setzero_si128());
return (int)_mm_cvtsi128_si32(a);
}

View File

@@ -0,0 +1,21 @@
#ifndef __VSX__
#error "VSX is not supported"
#endif
#include <altivec.h>
#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
#define vsx_ld vec_vsx_ld
#define vsx_st vec_vsx_st
#else
#define vsx_ld vec_xl
#define vsx_st vec_xst
#endif
int main(void)
{
unsigned int zout[4];
unsigned int z4[] = {0, 0, 0, 0};
__vector unsigned int v_z4 = vsx_ld(0, z4);
vsx_st(v_z4, 0, zout);
return zout[0];
}

View File

@@ -0,0 +1,13 @@
#ifndef __VSX__
#error "VSX is not supported"
#endif
#include <altivec.h>
typedef __vector unsigned long long v_uint64x2;
int main(void)
{
v_uint64x2 z2 = (v_uint64x2){0, 0};
z2 = (v_uint64x2)vec_cmpeq(z2, z2);
return (int)vec_extract(z2, 0);
}

View File

@@ -0,0 +1,13 @@
#ifndef __VSX__
#error "VSX is not supported"
#endif
#include <altivec.h>
typedef __vector unsigned int v_uint32x4;
int main(void)
{
v_uint32x4 z4 = (v_uint32x4){0, 0, 0, 0};
z4 = vec_absd(z4, z4);
return (int)vec_extract(z4, 0);
}

View File

@@ -0,0 +1,14 @@
#ifndef __VSX__
#error "VSX is not supported"
#endif
#include <altivec.h>
typedef __vector unsigned int v_uint32x4;
int main(void)
{
v_uint32x4 v1 = (v_uint32x4){2, 4, 8, 16};
v_uint32x4 v2 = (v_uint32x4){2, 2, 2, 2};
v_uint32x4 v3 = vec_mod(v1, v2);
return (int)vec_extractm(v3);
}

View File

@@ -0,0 +1,16 @@
#if (__VEC__ < 10301) || (__ARCH__ < 11)
#error VX not supported
#endif
#include <vecintrin.h>
int main(int argc, char **argv)
{
__vector double x = vec_abs(vec_xl(argc, (double*)argv));
__vector double y = vec_load_len((double*)argv, (unsigned int)argc);
x = vec_round(vec_ceil(x) + vec_floor(y));
__vector bool long long m = vec_cmpge(x, y);
__vector long long i = vec_signed(vec_sel(x, y, m));
return (int)vec_extract(i, 0);
}

View File

@@ -0,0 +1,25 @@
#if (__VEC__ < 10302) || (__ARCH__ < 12)
#error VXE not supported
#endif
#include <vecintrin.h>
int main(int argc, char **argv)
{
__vector float x = vec_nabs(vec_xl(argc, (float*)argv));
__vector float y = vec_load_len((float*)argv, (unsigned int)argc);
x = vec_round(vec_ceil(x) + vec_floor(y));
__vector bool int m = vec_cmpge(x, y);
x = vec_sel(x, y, m);
// need to test the existence of intrin "vflls" since vec_doublee
// is vec_doublee maps to wrong intrin "vfll".
// see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100871
#if defined(__GNUC__) && !defined(__clang__)
__vector long long i = vec_signed(__builtin_s390_vflls(x));
#else
__vector long long i = vec_signed(vec_doublee(x));
#endif
return (int)vec_extract(i, 0);
}

View File

@@ -0,0 +1,21 @@
#if (__VEC__ < 10303) || (__ARCH__ < 13)
#error VXE2 not supported
#endif
#include <vecintrin.h>
int main(int argc, char **argv)
{
int val;
__vector signed short large = { 'a', 'b', 'c', 'a', 'g', 'h', 'g', 'o' };
__vector signed short search = { 'g', 'h', 'g', 'o' };
__vector unsigned char len = { 0 };
__vector unsigned char res = vec_search_string_cc(large, search, len, &val);
__vector float x = vec_xl(argc, (float*)argv);
__vector int i = vec_signed(x);
i = vec_srdb(vec_sldb(i, i, 2), i, 3);
val += (int)vec_extract(res, 1);
val += vec_extract(i, 0);
return val;
}

View File

@@ -0,0 +1,12 @@
#include <immintrin.h>
#ifdef _MSC_VER
#include <ammintrin.h>
#else
#include <x86intrin.h>
#endif
int main(void)
{
__m128i a = _mm_comge_epu32(_mm_setzero_si128(), _mm_setzero_si128());
return _mm_cvtsi128_si32(a);
}

View File

@@ -0,0 +1,18 @@
#include <immintrin.h>
/**
* Test BW mask operations due to:
* - MSVC has supported it since vs2019 see,
* https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
* - Clang >= v8.0
* - GCC >= v7.1
*/
int main(void)
{
__mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1));
m64 = _kor_mask64(m64, m64);
m64 = _kxor_mask64(m64, m64);
m64 = _cvtu64_mask64(_cvtmask64_u64(m64));
m64 = _mm512_kunpackd(m64, m64);
m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64);
return (int)_cvtmask64_u64(m64);
}

View File

@@ -0,0 +1,16 @@
#include <immintrin.h>
/**
* Test DQ mask operations due to:
* - MSVC has supported it since vs2019 see,
* https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
* - Clang >= v8.0
* - GCC >= v7.1
*/
int main(void)
{
__mmask8 m8 = _mm512_cmpeq_epi64_mask(_mm512_set1_epi64(1), _mm512_set1_epi64(1));
m8 = _kor_mask8(m8, m8);
m8 = _kxor_mask8(m8, m8);
m8 = _cvtu32_mask8(_cvtmask8_u32(m8));
return (int)_cvtmask8_u32(m8);
}

View File

@@ -0,0 +1,41 @@
#include <immintrin.h>
/**
* The following intrinsics don't have direct native support but compilers
* tend to emulate them.
* They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19
*/
int main(void)
{
__m512 one_ps = _mm512_set1_ps(1.0f);
__m512d one_pd = _mm512_set1_pd(1.0);
__m512i one_i64 = _mm512_set1_epi64(1);
// add
float sum_ps = _mm512_reduce_add_ps(one_ps);
double sum_pd = _mm512_reduce_add_pd(one_pd);
int sum_int = (int)_mm512_reduce_add_epi64(one_i64);
sum_int += (int)_mm512_reduce_add_epi32(one_i64);
// mul
sum_ps += _mm512_reduce_mul_ps(one_ps);
sum_pd += _mm512_reduce_mul_pd(one_pd);
sum_int += (int)_mm512_reduce_mul_epi64(one_i64);
sum_int += (int)_mm512_reduce_mul_epi32(one_i64);
// min
sum_ps += _mm512_reduce_min_ps(one_ps);
sum_pd += _mm512_reduce_min_pd(one_pd);
sum_int += (int)_mm512_reduce_min_epi32(one_i64);
sum_int += (int)_mm512_reduce_min_epu32(one_i64);
sum_int += (int)_mm512_reduce_min_epi64(one_i64);
// max
sum_ps += _mm512_reduce_max_ps(one_ps);
sum_pd += _mm512_reduce_max_pd(one_pd);
sum_int += (int)_mm512_reduce_max_epi32(one_i64);
sum_int += (int)_mm512_reduce_max_epu32(one_i64);
sum_int += (int)_mm512_reduce_max_epi64(one_i64);
// and
sum_int += (int)_mm512_reduce_and_epi32(one_i64);
sum_int += (int)_mm512_reduce_and_epi64(one_i64);
// or
sum_int += (int)_mm512_reduce_or_epi32(one_i64);
sum_int += (int)_mm512_reduce_or_epi64(one_i64);
return (int)sum_ps + (int)sum_pd + sum_int;
}

View File

@@ -0,0 +1,21 @@
#ifndef __VSX__
#error "VSX is not supported"
#endif
#include <altivec.h>
typedef __vector float fv4sf_t;
typedef __vector unsigned char vec_t;
int main(void)
{
__vector_quad acc0;
float a[4] = {0,1,2,3};
float b[4] = {0,1,2,3};
vec_t *va = (vec_t *) a;
vec_t *vb = (vec_t *) b;
__builtin_mma_xvf32ger(&acc0, va[0], vb[0]);
fv4sf_t result[4];
__builtin_mma_disassemble_acc((void *)result, &acc0);
fv4sf_t c0 = result[0];
return (int)((float*)&c0)[0];
}

View File

@@ -0,0 +1,36 @@
/**
* Testing ASM VSX register number fixer '%x<n>'
*
* old versions of CLANG doesn't support %x<n> in the inline asm template
* which fixes register number when using any of the register constraints wa, wd, wf.
*
* xref:
* - https://bugs.llvm.org/show_bug.cgi?id=31837
* - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
*/
#ifndef __VSX__
#error "VSX is not supported"
#endif
#include <altivec.h>
#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
#define vsx_ld vec_vsx_ld
#define vsx_st vec_vsx_st
#else
#define vsx_ld vec_xl
#define vsx_st vec_xst
#endif
int main(void)
{
float z4[] = {0, 0, 0, 0};
signed int zout[] = {0, 0, 0, 0};
__vector float vz4 = vsx_ld(0, z4);
__vector signed int asm_ret = vsx_ld(0, zout);
__asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret));
vsx_st(asm_ret, 0, zout);
return zout[0];
}

View File

@@ -0,0 +1 @@
int test_flags;

View File

@@ -0,0 +1,41 @@
"""distutils.command
Package containing implementation of all the standard Distutils
commands.
"""
def test_na_writable_attributes_deletion():
a = np.NA(2)
attr = ['payload', 'dtype']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $"
distutils_all = [ #'build_py',
'clean',
'install_clib',
'install_scripts',
'bdist',
'bdist_dumb',
'bdist_wininst',
]
__import__('distutils.command', globals(), locals(), distutils_all)
__all__ = ['build',
'config_compiler',
'config',
'build_src',
'build_py',
'build_ext',
'build_clib',
'build_scripts',
'install',
'install_data',
'install_headers',
'install_lib',
'bdist_rpm',
'sdist',
] + distutils_all

View File

@@ -0,0 +1,148 @@
"""This module implements additional tests ala autoconf which can be useful.
"""
import textwrap
# We put them here since they could be easily reused outside numpy.distutils
def check_inline(cmd):
"""Return the inline identifier (may be empty)."""
cmd._check_compiler()
body = textwrap.dedent("""
#ifndef __cplusplus
static %(inline)s int static_func (void)
{
return 0;
}
%(inline)s int nostatic_func (void)
{
return 0;
}
#endif""")
for kw in ['inline', '__inline__', '__inline']:
st = cmd.try_compile(body % {'inline': kw}, None, None)
if st:
return kw
return ''
def check_restrict(cmd):
"""Return the restrict identifier (may be empty)."""
cmd._check_compiler()
body = textwrap.dedent("""
static int static_func (char * %(restrict)s a)
{
return 0;
}
""")
for kw in ['restrict', '__restrict__', '__restrict']:
st = cmd.try_compile(body % {'restrict': kw}, None, None)
if st:
return kw
return ''
def check_compiler_gcc(cmd):
"""Check if the compiler is GCC."""
cmd._check_compiler()
body = textwrap.dedent("""
int
main()
{
#if (! defined __GNUC__)
#error gcc required
#endif
return 0;
}
""")
return cmd.try_compile(body, None, None)
def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0):
"""
Check that the gcc version is at least the specified version."""
cmd._check_compiler()
version = '.'.join([str(major), str(minor), str(patchlevel)])
body = textwrap.dedent("""
int
main()
{
#if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\
(__GNUC_MINOR__ < %(minor)d) || \\
(__GNUC_PATCHLEVEL__ < %(patchlevel)d)
#error gcc >= %(version)s required
#endif
return 0;
}
""")
kw = {'version': version, 'major': major, 'minor': minor,
'patchlevel': patchlevel}
return cmd.try_compile(body % kw, None, None)
def check_gcc_function_attribute(cmd, attribute, name):
"""Return True if the given function attribute is supported."""
cmd._check_compiler()
body = textwrap.dedent("""
#pragma GCC diagnostic error "-Wattributes"
#pragma clang diagnostic error "-Wattributes"
int %s %s(void* unused)
{
return 0;
}
int
main()
{
return 0;
}
""") % (attribute, name)
return cmd.try_compile(body, None, None) != 0
def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code,
include):
"""Return True if the given function attribute is supported with
intrinsics."""
cmd._check_compiler()
body = textwrap.dedent("""
#include<%s>
int %s %s(void)
{
%s;
return 0;
}
int
main()
{
return 0;
}
""") % (include, attribute, name, code)
return cmd.try_compile(body, None, None) != 0
def check_gcc_variable_attribute(cmd, attribute):
"""Return True if the given variable attribute is supported."""
cmd._check_compiler()
body = textwrap.dedent("""
#pragma GCC diagnostic error "-Wattributes"
#pragma clang diagnostic error "-Wattributes"
int %s foo;
int
main()
{
return 0;
}
""") % (attribute, )
return cmd.try_compile(body, None, None) != 0

View File

@@ -0,0 +1,22 @@
import os
import sys
if 'setuptools' in sys.modules:
from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm
else:
from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm
class bdist_rpm(old_bdist_rpm):
def _make_spec_file(self):
spec_file = old_bdist_rpm._make_spec_file(self)
# Replace hardcoded setup.py script name
# with the real setup script name.
setup_py = os.path.basename(sys.argv[0])
if setup_py == 'setup.py':
return spec_file
new_spec_file = []
for line in spec_file:
line = line.replace('setup.py', setup_py)
new_spec_file.append(line)
return new_spec_file

View File

@@ -0,0 +1,62 @@
import os
import sys
from distutils.command.build import build as old_build
from distutils.util import get_platform
from numpy.distutils.command.config_compiler import show_fortran_compilers
class build(old_build):
sub_commands = [('config_cc', lambda *args: True),
('config_fc', lambda *args: True),
('build_src', old_build.has_ext_modules),
] + old_build.sub_commands
user_options = old_build.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('warn-error', None,
"turn all warnings into errors (-Werror)"),
('cpu-baseline=', None,
"specify a list of enabled baseline CPU optimizations"),
('cpu-dispatch=', None,
"specify a list of dispatched CPU optimizations"),
('disable-optimization', None,
"disable CPU optimized code(dispatch,simd,fast...)"),
('simd-test=', None,
"specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
]
help_options = old_build.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
]
def initialize_options(self):
old_build.initialize_options(self)
self.fcompiler = None
self.warn_error = False
self.cpu_baseline = "min"
self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default
self.disable_optimization = False
"""
the '_simd' module is a very large. Adding more dispatched features
will increase binary size and compile time. By default we minimize
the targeted features to those most commonly used by the NumPy SIMD interface(NPYV),
NOTE: any specified features will be ignored if they're:
- part of the baseline(--cpu-baseline)
- not part of dispatch-able features(--cpu-dispatch)
- not supported by compiler or platform
"""
self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F " \
"AVX512_SKX VSX VSX2 VSX3 VSX4 NEON ASIMD VX VXE VXE2"
def finalize_options(self):
build_scripts = self.build_scripts
old_build.finalize_options(self)
plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
if build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts' + plat_specifier)
def run(self):
old_build.run(self)

View File

@@ -0,0 +1,469 @@
""" Modified version of build_clib that handles fortran source files.
"""
import os
from glob import glob
import shutil
from distutils.command.build_clib import build_clib as old_build_clib
from distutils.errors import DistutilsSetupError, DistutilsError, \
DistutilsFileError
from numpy.distutils import log
from distutils.dep_util import newer_group
from numpy.distutils.misc_util import (
filter_sources, get_lib_source_files, get_numpy_include_dirs,
has_cxx_sources, has_f_sources, is_sequence
)
from numpy.distutils.ccompiler_opt import new_ccompiler_opt
# Fix Python distutils bug sf #1718574:
_l = old_build_clib.user_options
for _i in range(len(_l)):
if _l[_i][0] in ['build-clib', 'build-temp']:
_l[_i] = (_l[_i][0] + '=',) + _l[_i][1:]
#
class build_clib(old_build_clib):
description = "build C/C++/F libraries used by Python extensions"
user_options = old_build_clib.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('inplace', 'i', 'Build in-place'),
('parallel=', 'j',
"number of parallel jobs"),
('warn-error', None,
"turn all warnings into errors (-Werror)"),
('cpu-baseline=', None,
"specify a list of enabled baseline CPU optimizations"),
('cpu-dispatch=', None,
"specify a list of dispatched CPU optimizations"),
('disable-optimization', None,
"disable CPU optimized code(dispatch,simd,fast...)"),
]
boolean_options = old_build_clib.boolean_options + \
['inplace', 'warn-error', 'disable-optimization']
def initialize_options(self):
old_build_clib.initialize_options(self)
self.fcompiler = None
self.inplace = 0
self.parallel = None
self.warn_error = None
self.cpu_baseline = None
self.cpu_dispatch = None
self.disable_optimization = None
def finalize_options(self):
if self.parallel:
try:
self.parallel = int(self.parallel)
except ValueError as e:
raise ValueError("--parallel/-j argument must be an integer") from e
old_build_clib.finalize_options(self)
self.set_undefined_options('build',
('parallel', 'parallel'),
('warn_error', 'warn_error'),
('cpu_baseline', 'cpu_baseline'),
('cpu_dispatch', 'cpu_dispatch'),
('disable_optimization', 'disable_optimization')
)
def have_f_sources(self):
for (lib_name, build_info) in self.libraries:
if has_f_sources(build_info.get('sources', [])):
return True
return False
def have_cxx_sources(self):
for (lib_name, build_info) in self.libraries:
if has_cxx_sources(build_info.get('sources', [])):
return True
return False
def run(self):
if not self.libraries:
return
# Make sure that library sources are complete.
languages = []
# Make sure that extension sources are complete.
self.run_command('build_src')
for (lib_name, build_info) in self.libraries:
l = build_info.get('language', None)
if l and l not in languages:
languages.append(l)
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
self.compiler.customize(self.distribution,
need_cxx=self.have_cxx_sources())
if self.warn_error:
self.compiler.compiler.append('-Werror')
self.compiler.compiler_so.append('-Werror')
libraries = self.libraries
self.libraries = None
self.compiler.customize_cmd(self)
self.libraries = libraries
self.compiler.show_customization()
if not self.disable_optimization:
dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h")
dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath)
opt_cache_path = os.path.abspath(
os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py')
)
if hasattr(self, "compiler_opt"):
# By default `CCompilerOpt` update the cache at the exit of
# the process, which may lead to duplicate building
# (see build_extension()/force_rebuild) if run() called
# multiple times within the same os process/thread without
# giving the chance the previous instances of `CCompilerOpt`
# to update the cache.
self.compiler_opt.cache_flush()
self.compiler_opt = new_ccompiler_opt(
compiler=self.compiler, dispatch_hpath=dispatch_hpath,
cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch,
cache_path=opt_cache_path
)
def report(copt):
log.info("\n########### CLIB COMPILER OPTIMIZATION ###########")
log.info(copt.report(full=True))
import atexit
atexit.register(report, self.compiler_opt)
if self.have_f_sources():
from numpy.distutils.fcompiler import new_fcompiler
self._f_compiler = new_fcompiler(compiler=self.fcompiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90='f90' in languages,
c_compiler=self.compiler)
if self._f_compiler is not None:
self._f_compiler.customize(self.distribution)
libraries = self.libraries
self.libraries = None
self._f_compiler.customize_cmd(self)
self.libraries = libraries
self._f_compiler.show_customization()
else:
self._f_compiler = None
self.build_libraries(self.libraries)
if self.inplace:
for l in self.distribution.installed_libraries:
libname = self.compiler.library_filename(l.name)
source = os.path.join(self.build_clib, libname)
target = os.path.join(l.target_dir, libname)
self.mkpath(l.target_dir)
shutil.copy(source, target)
def get_source_files(self):
self.check_library_list(self.libraries)
filenames = []
for lib in self.libraries:
filenames.extend(get_lib_source_files(lib))
return filenames
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
self.build_a_library(build_info, lib_name, libraries)
def assemble_flags(self, in_flags):
""" Assemble flags from flag list
Parameters
----------
in_flags : None or sequence
None corresponds to empty list. Sequence elements can be strings
or callables that return lists of strings. Callable takes `self` as
single parameter.
Returns
-------
out_flags : list
"""
if in_flags is None:
return []
out_flags = []
for in_flag in in_flags:
if callable(in_flag):
out_flags += in_flag(self)
else:
out_flags.append(in_flag)
return out_flags
def build_a_library(self, build_info, lib_name, libraries):
# default compilers
compiler = self.compiler
fcompiler = self._f_compiler
sources = build_info.get('sources')
if sources is None or not is_sequence(sources):
raise DistutilsSetupError(("in 'libraries' option (library '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % lib_name)
sources = list(sources)
c_sources, cxx_sources, f_sources, fmodule_sources \
= filter_sources(sources)
requiref90 = not not fmodule_sources or \
build_info.get('language', 'c') == 'f90'
# save source type information so that build_ext can use it.
source_languages = []
if c_sources:
source_languages.append('c')
if cxx_sources:
source_languages.append('c++')
if requiref90:
source_languages.append('f90')
elif f_sources:
source_languages.append('f77')
build_info['source_languages'] = source_languages
lib_file = compiler.library_filename(lib_name,
output_dir=self.build_clib)
depends = sources + build_info.get('depends', [])
force_rebuild = self.force
if not self.disable_optimization and not self.compiler_opt.is_cached():
log.debug("Detected changes on compiler optimizations")
force_rebuild = True
if not (force_rebuild or newer_group(depends, lib_file, 'newer')):
log.debug("skipping '%s' library (up-to-date)", lib_name)
return
else:
log.info("building '%s' library", lib_name)
config_fc = build_info.get('config_fc', {})
if fcompiler is not None and config_fc:
log.info('using additional config_fc from setup script '
'for fortran compiler: %s'
% (config_fc,))
from numpy.distutils.fcompiler import new_fcompiler
fcompiler = new_fcompiler(compiler=fcompiler.compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90=requiref90,
c_compiler=self.compiler)
if fcompiler is not None:
dist = self.distribution
base_config_fc = dist.get_option_dict('config_fc').copy()
base_config_fc.update(config_fc)
fcompiler.customize(base_config_fc)
# check availability of Fortran compilers
if (f_sources or fmodule_sources) and fcompiler is None:
raise DistutilsError("library %s has Fortran sources"
" but no Fortran compiler found" % (lib_name))
if fcompiler is not None:
fcompiler.extra_f77_compile_args = build_info.get(
'extra_f77_compile_args') or []
fcompiler.extra_f90_compile_args = build_info.get(
'extra_f90_compile_args') or []
macros = build_info.get('macros')
if macros is None:
macros = []
include_dirs = build_info.get('include_dirs')
if include_dirs is None:
include_dirs = []
# Flags can be strings, or callables that return a list of strings.
extra_postargs = self.assemble_flags(
build_info.get('extra_compiler_args'))
extra_cflags = self.assemble_flags(
build_info.get('extra_cflags'))
extra_cxxflags = self.assemble_flags(
build_info.get('extra_cxxflags'))
include_dirs.extend(get_numpy_include_dirs())
# where compiled F90 module files are:
module_dirs = build_info.get('module_dirs') or []
module_build_dir = os.path.dirname(lib_file)
if requiref90:
self.mkpath(module_build_dir)
if compiler.compiler_type == 'msvc':
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
cxx_sources = []
extra_cflags += extra_cxxflags
# filtering C dispatch-table sources when optimization is not disabled,
# otherwise treated as normal sources.
copt_c_sources = []
copt_cxx_sources = []
copt_baseline_flags = []
copt_macros = []
if not self.disable_optimization:
bsrc_dir = self.get_finalized_command("build_src").build_src
dispatch_hpath = os.path.join("numpy", "distutils", "include")
dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath)
include_dirs.append(dispatch_hpath)
copt_build_src = None if self.inplace else bsrc_dir
for _srcs, _dst, _ext in (
((c_sources,), copt_c_sources, ('.dispatch.c',)),
((c_sources, cxx_sources), copt_cxx_sources,
('.dispatch.cpp', '.dispatch.cxx'))
):
for _src in _srcs:
_dst += [
_src.pop(_src.index(s))
for s in _src[:] if s.endswith(_ext)
]
copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
else:
copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
objects = []
if copt_cxx_sources:
log.info("compiling C++ dispatch-able sources")
objects += self.compiler_opt.try_dispatch(
copt_c_sources,
output_dir=self.build_temp,
src_dir=copt_build_src,
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs + extra_cxxflags,
ccompiler=cxx_compiler
)
if copt_c_sources:
log.info("compiling C dispatch-able sources")
objects += self.compiler_opt.try_dispatch(
copt_c_sources,
output_dir=self.build_temp,
src_dir=copt_build_src,
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs + extra_cflags)
if c_sources:
log.info("compiling C sources")
objects += compiler.compile(
c_sources,
output_dir=self.build_temp,
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=(extra_postargs +
copt_baseline_flags +
extra_cflags))
if cxx_sources:
log.info("compiling C++ sources")
cxx_compiler = compiler.cxx_compiler()
cxx_objects = cxx_compiler.compile(
cxx_sources,
output_dir=self.build_temp,
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=(extra_postargs +
copt_baseline_flags +
extra_cxxflags))
objects.extend(cxx_objects)
if f_sources or fmodule_sources:
extra_postargs = []
f_objects = []
if requiref90:
if fcompiler.module_dir_switch is None:
existing_modules = glob('*.mod')
extra_postargs += fcompiler.module_options(
module_dirs, module_build_dir)
if fmodule_sources:
log.info("compiling Fortran 90 module sources")
f_objects += fcompiler.compile(fmodule_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs)
if requiref90 and self._f_compiler.module_dir_switch is None:
# move new compiled F90 module files to module_build_dir
for f in glob('*.mod'):
if f in existing_modules:
continue
t = os.path.join(module_build_dir, f)
if os.path.abspath(f) == os.path.abspath(t):
continue
if os.path.isfile(t):
os.remove(t)
try:
self.move_file(f, module_build_dir)
except DistutilsFileError:
log.warn('failed to move %r to %r'
% (f, module_build_dir))
if f_sources:
log.info("compiling Fortran sources")
f_objects += fcompiler.compile(f_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs)
else:
f_objects = []
if f_objects and not fcompiler.can_ccompiler_link(compiler):
# Default linker cannot link Fortran object files, and results
# need to be wrapped later. Instead of creating a real static
# library, just keep track of the object files.
listfn = os.path.join(self.build_clib,
lib_name + '.fobjects')
with open(listfn, 'w') as f:
f.write("\n".join(os.path.abspath(obj) for obj in f_objects))
listfn = os.path.join(self.build_clib,
lib_name + '.cobjects')
with open(listfn, 'w') as f:
f.write("\n".join(os.path.abspath(obj) for obj in objects))
# create empty "library" file for dependency tracking
lib_fname = os.path.join(self.build_clib,
lib_name + compiler.static_lib_extension)
with open(lib_fname, 'wb') as f:
pass
else:
# assume that default linker is suitable for
# linking Fortran object files
objects.extend(f_objects)
compiler.create_static_lib(objects, lib_name,
output_dir=self.build_clib,
debug=self.debug)
# fix library dependencies
clib_libraries = build_info.get('libraries', [])
for lname, binfo in libraries:
if lname in clib_libraries:
clib_libraries.extend(binfo.get('libraries', []))
if clib_libraries:
build_info['libraries'] = clib_libraries

View File

@@ -0,0 +1,741 @@
""" Modified version of build_ext that handles fortran source files.
"""
import os
import subprocess
from glob import glob
from distutils.dep_util import newer_group
from distutils.command.build_ext import build_ext as old_build_ext
from distutils.errors import DistutilsFileError, DistutilsSetupError,\
DistutilsError
from distutils.file_util import copy_file
from numpy.distutils import log
from numpy.distutils.exec_command import filepath_from_subprocess_output
from numpy.distutils.system_info import combine_paths
from numpy.distutils.misc_util import (
filter_sources, get_ext_source_files, get_numpy_include_dirs,
has_cxx_sources, has_f_sources, is_sequence
)
from numpy.distutils.command.config_compiler import show_fortran_compilers
from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt
class build_ext (old_build_ext):
description = "build C/C++/F extensions (compile/link to build directory)"
user_options = old_build_ext.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
('warn-error', None,
"turn all warnings into errors (-Werror)"),
('cpu-baseline=', None,
"specify a list of enabled baseline CPU optimizations"),
('cpu-dispatch=', None,
"specify a list of dispatched CPU optimizations"),
('disable-optimization', None,
"disable CPU optimized code(dispatch,simd,fast...)"),
('simd-test=', None,
"specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
]
help_options = old_build_ext.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
]
boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization']
def initialize_options(self):
old_build_ext.initialize_options(self)
self.fcompiler = None
self.parallel = None
self.warn_error = None
self.cpu_baseline = None
self.cpu_dispatch = None
self.disable_optimization = None
self.simd_test = None
def finalize_options(self):
if self.parallel:
try:
self.parallel = int(self.parallel)
except ValueError as e:
raise ValueError("--parallel/-j argument must be an integer") from e
# Ensure that self.include_dirs and self.distribution.include_dirs
# refer to the same list object. finalize_options will modify
# self.include_dirs, but self.distribution.include_dirs is used
# during the actual build.
# self.include_dirs is None unless paths are specified with
# --include-dirs.
# The include paths will be passed to the compiler in the order:
# numpy paths, --include-dirs paths, Python include path.
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
incl_dirs = self.include_dirs or []
if self.distribution.include_dirs is None:
self.distribution.include_dirs = []
self.include_dirs = self.distribution.include_dirs
self.include_dirs.extend(incl_dirs)
old_build_ext.finalize_options(self)
self.set_undefined_options('build',
('parallel', 'parallel'),
('warn_error', 'warn_error'),
('cpu_baseline', 'cpu_baseline'),
('cpu_dispatch', 'cpu_dispatch'),
('disable_optimization', 'disable_optimization'),
('simd_test', 'simd_test')
)
CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test
def run(self):
if not self.extensions:
return
# Make sure that extension sources are complete.
self.run_command('build_src')
if self.distribution.has_c_libraries():
if self.inplace:
if self.distribution.have_run.get('build_clib'):
log.warn('build_clib already run, it is too late to '
'ensure in-place build of build_clib')
build_clib = self.distribution.get_command_obj(
'build_clib')
else:
build_clib = self.distribution.get_command_obj(
'build_clib')
build_clib.inplace = 1
build_clib.ensure_finalized()
build_clib.run()
self.distribution.have_run['build_clib'] = 1
else:
self.run_command('build_clib')
build_clib = self.get_finalized_command('build_clib')
self.library_dirs.append(build_clib.build_clib)
else:
build_clib = None
# Not including C libraries to the list of
# extension libraries automatically to prevent
# bogus linking commands. Extensions must
# explicitly specify the C libraries that they use.
from distutils.ccompiler import new_compiler
from numpy.distutils.fcompiler import new_fcompiler
compiler_type = self.compiler
# Initialize C compiler:
self.compiler = new_compiler(compiler=compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
self.compiler.customize(self.distribution)
self.compiler.customize_cmd(self)
if self.warn_error:
self.compiler.compiler.append('-Werror')
self.compiler.compiler_so.append('-Werror')
self.compiler.show_customization()
if not self.disable_optimization:
dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h")
dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath)
opt_cache_path = os.path.abspath(
os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py')
)
if hasattr(self, "compiler_opt"):
# By default `CCompilerOpt` update the cache at the exit of
# the process, which may lead to duplicate building
# (see build_extension()/force_rebuild) if run() called
# multiple times within the same os process/thread without
# giving the chance the previous instances of `CCompilerOpt`
# to update the cache.
self.compiler_opt.cache_flush()
self.compiler_opt = new_ccompiler_opt(
compiler=self.compiler, dispatch_hpath=dispatch_hpath,
cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch,
cache_path=opt_cache_path
)
def report(copt):
log.info("\n########### EXT COMPILER OPTIMIZATION ###########")
log.info(copt.report(full=True))
import atexit
atexit.register(report, self.compiler_opt)
# Setup directory for storing generated extra DLL files on Windows
self.extra_dll_dir = os.path.join(self.build_temp, '.libs')
if not os.path.isdir(self.extra_dll_dir):
os.makedirs(self.extra_dll_dir)
# Create mapping of libraries built by build_clib:
clibs = {}
if build_clib is not None:
for libname, build_info in build_clib.libraries or []:
if libname in clibs and clibs[libname] != build_info:
log.warn('library %r defined more than once,'
' overwriting build_info\n%s... \nwith\n%s...'
% (libname, repr(clibs[libname])[:300], repr(build_info)[:300]))
clibs[libname] = build_info
# .. and distribution libraries:
for libname, build_info in self.distribution.libraries or []:
if libname in clibs:
# build_clib libraries have a precedence before distribution ones
continue
clibs[libname] = build_info
# Determine if C++/Fortran 77/Fortran 90 compilers are needed.
# Update extension libraries, library_dirs, and macros.
all_languages = set()
for ext in self.extensions:
ext_languages = set()
c_libs = []
c_lib_dirs = []
macros = []
for libname in ext.libraries:
if libname in clibs:
binfo = clibs[libname]
c_libs += binfo.get('libraries', [])
c_lib_dirs += binfo.get('library_dirs', [])
for m in binfo.get('macros', []):
if m not in macros:
macros.append(m)
for l in clibs.get(libname, {}).get('source_languages', []):
ext_languages.add(l)
if c_libs:
new_c_libs = ext.libraries + c_libs
log.info('updating extension %r libraries from %r to %r'
% (ext.name, ext.libraries, new_c_libs))
ext.libraries = new_c_libs
ext.library_dirs = ext.library_dirs + c_lib_dirs
if macros:
log.info('extending extension %r defined_macros with %r'
% (ext.name, macros))
ext.define_macros = ext.define_macros + macros
# determine extension languages
if has_f_sources(ext.sources):
ext_languages.add('f77')
if has_cxx_sources(ext.sources):
ext_languages.add('c++')
l = ext.language or self.compiler.detect_language(ext.sources)
if l:
ext_languages.add(l)
# reset language attribute for choosing proper linker
#
# When we build extensions with multiple languages, we have to
# choose a linker. The rules here are:
# 1. if there is Fortran code, always prefer the Fortran linker,
# 2. otherwise prefer C++ over C,
# 3. Users can force a particular linker by using
# `language='c'` # or 'c++', 'f90', 'f77'
# in their config.add_extension() calls.
if 'c++' in ext_languages:
ext_language = 'c++'
else:
ext_language = 'c' # default
has_fortran = False
if 'f90' in ext_languages:
ext_language = 'f90'
has_fortran = True
elif 'f77' in ext_languages:
ext_language = 'f77'
has_fortran = True
if not ext.language or has_fortran:
if l and l != ext_language and ext.language:
log.warn('resetting extension %r language from %r to %r.' %
(ext.name, l, ext_language))
ext.language = ext_language
# global language
all_languages.update(ext_languages)
need_f90_compiler = 'f90' in all_languages
need_f77_compiler = 'f77' in all_languages
need_cxx_compiler = 'c++' in all_languages
# Initialize C++ compiler:
if need_cxx_compiler:
self._cxx_compiler = new_compiler(compiler=compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
compiler = self._cxx_compiler
compiler.customize(self.distribution, need_cxx=need_cxx_compiler)
compiler.customize_cmd(self)
compiler.show_customization()
self._cxx_compiler = compiler.cxx_compiler()
else:
self._cxx_compiler = None
# Initialize Fortran 77 compiler:
if need_f77_compiler:
ctype = self.fcompiler
self._f77_compiler = new_fcompiler(compiler=self.fcompiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90=False,
c_compiler=self.compiler)
fcompiler = self._f77_compiler
if fcompiler:
ctype = fcompiler.compiler_type
fcompiler.customize(self.distribution)
if fcompiler and fcompiler.get_version():
fcompiler.customize_cmd(self)
fcompiler.show_customization()
else:
self.warn('f77_compiler=%s is not available.' %
(ctype))
self._f77_compiler = None
else:
self._f77_compiler = None
# Initialize Fortran 90 compiler:
if need_f90_compiler:
ctype = self.fcompiler
self._f90_compiler = new_fcompiler(compiler=self.fcompiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90=True,
c_compiler=self.compiler)
fcompiler = self._f90_compiler
if fcompiler:
ctype = fcompiler.compiler_type
fcompiler.customize(self.distribution)
if fcompiler and fcompiler.get_version():
fcompiler.customize_cmd(self)
fcompiler.show_customization()
else:
self.warn('f90_compiler=%s is not available.' %
(ctype))
self._f90_compiler = None
else:
self._f90_compiler = None
# Build extensions
self.build_extensions()
# Copy over any extra DLL files
# FIXME: In the case where there are more than two packages,
# we blindly assume that both packages need all of the libraries,
# resulting in a larger wheel than is required. This should be fixed,
# but it's so rare that I won't bother to handle it.
pkg_roots = {
self.get_ext_fullname(ext.name).split('.')[0]
for ext in self.extensions
}
for pkg_root in pkg_roots:
shared_lib_dir = os.path.join(pkg_root, '.libs')
if not self.inplace:
shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir)
for fn in os.listdir(self.extra_dll_dir):
if not os.path.isdir(shared_lib_dir):
os.makedirs(shared_lib_dir)
if not fn.lower().endswith('.dll'):
continue
runtime_lib = os.path.join(self.extra_dll_dir, fn)
copy_file(runtime_lib, shared_lib_dir)
def swig_sources(self, sources, extensions=None):
# Do nothing. Swig sources have been handled in build_src command.
return sources
def build_extension(self, ext):
sources = ext.sources
if sources is None or not is_sequence(sources):
raise DistutilsSetupError(
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name)
sources = list(sources)
if not sources:
return
fullname = self.get_ext_fullname(ext.name)
if self.inplace:
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
base = modpath[-1]
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
ext_filename = os.path.join(package_dir,
self.get_ext_filename(base))
else:
ext_filename = os.path.join(self.build_lib,
self.get_ext_filename(fullname))
depends = sources + ext.depends
force_rebuild = self.force
if not self.disable_optimization and not self.compiler_opt.is_cached():
log.debug("Detected changes on compiler optimizations")
force_rebuild = True
if not (force_rebuild or newer_group(depends, ext_filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
extra_args = ext.extra_compile_args or []
extra_cflags = getattr(ext, 'extra_c_compile_args', None) or []
extra_cxxflags = getattr(ext, 'extra_cxx_compile_args', None) or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
c_sources, cxx_sources, f_sources, fmodule_sources = \
filter_sources(ext.sources)
if self.compiler.compiler_type == 'msvc':
if cxx_sources:
# Needed to compile kiva.agg._agg extension.
extra_args.append('/Zm1000')
extra_cflags += extra_cxxflags
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
cxx_sources = []
# Set Fortran/C++ compilers for compilation and linking.
if ext.language == 'f90':
fcompiler = self._f90_compiler
elif ext.language == 'f77':
fcompiler = self._f77_compiler
else: # in case ext.language is c++, for instance
fcompiler = self._f90_compiler or self._f77_compiler
if fcompiler is not None:
fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(
ext, 'extra_f77_compile_args') else []
fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(
ext, 'extra_f90_compile_args') else []
cxx_compiler = self._cxx_compiler
# check for the availability of required compilers
if cxx_sources and cxx_compiler is None:
raise DistutilsError("extension %r has C++ sources"
"but no C++ compiler found" % (ext.name))
if (f_sources or fmodule_sources) and fcompiler is None:
raise DistutilsError("extension %r has Fortran sources "
"but no Fortran compiler found" % (ext.name))
if ext.language in ['f77', 'f90'] and fcompiler is None:
self.warn("extension %r has Fortran libraries "
"but no Fortran linker found, using default linker" % (ext.name))
if ext.language == 'c++' and cxx_compiler is None:
self.warn("extension %r has C++ libraries "
"but no C++ linker found, using default linker" % (ext.name))
kws = {'depends': ext.depends}
output_dir = self.build_temp
include_dirs = ext.include_dirs + get_numpy_include_dirs()
# filtering C dispatch-table sources when optimization is not disabled,
# otherwise treated as normal sources.
copt_c_sources = []
copt_cxx_sources = []
copt_baseline_flags = []
copt_macros = []
if not self.disable_optimization:
bsrc_dir = self.get_finalized_command("build_src").build_src
dispatch_hpath = os.path.join("numpy", "distutils", "include")
dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath)
include_dirs.append(dispatch_hpath)
copt_build_src = None if self.inplace else bsrc_dir
for _srcs, _dst, _ext in (
((c_sources,), copt_c_sources, ('.dispatch.c',)),
((c_sources, cxx_sources), copt_cxx_sources,
('.dispatch.cpp', '.dispatch.cxx'))
):
for _src in _srcs:
_dst += [
_src.pop(_src.index(s))
for s in _src[:] if s.endswith(_ext)
]
copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
else:
copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
c_objects = []
if copt_cxx_sources:
log.info("compiling C++ dispatch-able sources")
c_objects += self.compiler_opt.try_dispatch(
copt_cxx_sources,
output_dir=output_dir,
src_dir=copt_build_src,
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_args + extra_cxxflags,
ccompiler=cxx_compiler,
**kws
)
if copt_c_sources:
log.info("compiling C dispatch-able sources")
c_objects += self.compiler_opt.try_dispatch(
copt_c_sources,
output_dir=output_dir,
src_dir=copt_build_src,
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_args + extra_cflags,
**kws)
if c_sources:
log.info("compiling C sources")
c_objects += self.compiler.compile(
c_sources,
output_dir=output_dir,
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=(extra_args + copt_baseline_flags +
extra_cflags),
**kws)
if cxx_sources:
log.info("compiling C++ sources")
c_objects += cxx_compiler.compile(
cxx_sources,
output_dir=output_dir,
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=(extra_args + copt_baseline_flags +
extra_cxxflags),
**kws)
extra_postargs = []
f_objects = []
if fmodule_sources:
log.info("compiling Fortran 90 module sources")
module_dirs = ext.module_dirs[:]
module_build_dir = os.path.join(
self.build_temp, os.path.dirname(
self.get_ext_filename(fullname)))
self.mkpath(module_build_dir)
if fcompiler.module_dir_switch is None:
existing_modules = glob('*.mod')
extra_postargs += fcompiler.module_options(
module_dirs, module_build_dir)
f_objects += fcompiler.compile(fmodule_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs,
depends=ext.depends)
if fcompiler.module_dir_switch is None:
for f in glob('*.mod'):
if f in existing_modules:
continue
t = os.path.join(module_build_dir, f)
if os.path.abspath(f) == os.path.abspath(t):
continue
if os.path.isfile(t):
os.remove(t)
try:
self.move_file(f, module_build_dir)
except DistutilsFileError:
log.warn('failed to move %r to %r' %
(f, module_build_dir))
if f_sources:
log.info("compiling Fortran sources")
f_objects += fcompiler.compile(f_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs,
depends=ext.depends)
if f_objects and not fcompiler.can_ccompiler_link(self.compiler):
unlinkable_fobjects = f_objects
objects = c_objects
else:
unlinkable_fobjects = []
objects = c_objects + f_objects
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
libraries = self.get_libraries(ext)[:]
library_dirs = ext.library_dirs[:]
linker = self.compiler.link_shared_object
# Always use system linker when using MSVC compiler.
if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'):
# expand libraries with fcompiler libraries as we are
# not using fcompiler linker
self._libs_with_msvc_and_fortran(
fcompiler, libraries, library_dirs)
if ext.runtime_library_dirs:
# gcc adds RPATH to the link. On windows, copy the dll into
# self.extra_dll_dir instead.
for d in ext.runtime_library_dirs:
for f in glob(d + '/*.dll'):
copy_file(f, self.extra_dll_dir)
ext.runtime_library_dirs = []
elif ext.language in ['f77', 'f90'] and fcompiler is not None:
linker = fcompiler.link_shared_object
if ext.language == 'c++' and cxx_compiler is not None:
linker = cxx_compiler.link_shared_object
if fcompiler is not None:
objects, libraries = self._process_unlinkable_fobjects(
objects, libraries,
fcompiler, library_dirs,
unlinkable_fobjects)
linker(objects, ext_filename,
libraries=libraries,
library_dirs=library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=ext.language)
def _add_dummy_mingwex_sym(self, c_sources):
build_src = self.get_finalized_command("build_src").build_src
build_clib = self.get_finalized_command("build_clib").build_clib
objects = self.compiler.compile([os.path.join(build_src,
"gfortran_vs2003_hack.c")],
output_dir=self.build_temp)
self.compiler.create_static_lib(
objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
def _process_unlinkable_fobjects(self, objects, libraries,
fcompiler, library_dirs,
unlinkable_fobjects):
libraries = list(libraries)
objects = list(objects)
unlinkable_fobjects = list(unlinkable_fobjects)
# Expand possible fake static libraries to objects;
# make sure to iterate over a copy of the list as
# "fake" libraries will be removed as they are
# encountered
for lib in libraries[:]:
for libdir in library_dirs:
fake_lib = os.path.join(libdir, lib + '.fobjects')
if os.path.isfile(fake_lib):
# Replace fake static library
libraries.remove(lib)
with open(fake_lib) as f:
unlinkable_fobjects.extend(f.read().splitlines())
# Expand C objects
c_lib = os.path.join(libdir, lib + '.cobjects')
with open(c_lib) as f:
objects.extend(f.read().splitlines())
# Wrap unlinkable objects to a linkable one
if unlinkable_fobjects:
fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects]
wrapped = fcompiler.wrap_unlinkable_objects(
fobjects, output_dir=self.build_temp,
extra_dll_dir=self.extra_dll_dir)
objects.extend(wrapped)
return objects, libraries
def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries,
c_library_dirs):
if fcompiler is None:
return
for libname in c_libraries:
if libname.startswith('msvc'):
continue
fileexists = False
for libdir in c_library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists:
continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in c_library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(self.build_temp, libname + '.lib')
copy_file(libfile, libfile2)
if self.build_temp not in c_library_dirs:
c_library_dirs.append(self.build_temp)
fileexists = True
break
if fileexists:
continue
log.warn('could not find library %r in directories %s'
% (libname, c_library_dirs))
# Always use system linker when using MSVC compiler.
f_lib_dirs = []
for dir in fcompiler.library_dirs:
# correct path when compiling in Cygwin but with normal Win
# Python
if dir.startswith('/usr/lib'):
try:
dir = subprocess.check_output(['cygpath', '-w', dir])
except (OSError, subprocess.CalledProcessError):
pass
else:
dir = filepath_from_subprocess_output(dir)
f_lib_dirs.append(dir)
c_library_dirs.extend(f_lib_dirs)
# make g77-compiled static libs available to MSVC
for lib in fcompiler.libraries:
if not lib.startswith('msvc'):
c_libraries.append(lib)
p = combine_paths(f_lib_dirs, 'lib' + lib + '.a')
if p:
dst_name = os.path.join(self.build_temp, lib + '.lib')
if not os.path.isfile(dst_name):
copy_file(p[0], dst_name)
if self.build_temp not in c_library_dirs:
c_library_dirs.append(self.build_temp)
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
for ext in self.extensions:
filenames.extend(get_ext_source_files(ext))
return filenames
def get_outputs(self):
self.check_extensions_list(self.extensions)
outputs = []
for ext in self.extensions:
if not ext.sources:
continue
fullname = self.get_ext_fullname(ext.name)
outputs.append(os.path.join(self.build_lib,
self.get_ext_filename(fullname)))
return outputs

Some files were not shown because too many files have changed in this diff Show More