Plenty of changes to the test system

rocketry
Richard Meadows 2015-03-13 19:07:24 +00:00
rodzic 2fd86529c7
commit 647e6bb3ca
86 zmienionych plików z 10720 dodań i 558 usunięć

2
.gitignore vendored
Wyświetl plik

@ -1,4 +1,6 @@
datasheets
firmware/test/ctypesgen/.svn
firmware/test/main.py*
*.s#*
*.b#*

@ -1 +1 @@
Subproject commit b401f510d0365f1f65669c919bcdf48196a11459
Subproject commit 82d98592fa104795f94b93f14341fd2aae29a887

Wyświetl plik

@ -68,6 +68,7 @@ TR := tr
TOOLCHAIN := arm-none-eabi
AS := $(TOOLCHAIN)-as
CC := $(TOOLCHAIN)-gcc
DB := $(TOOLCHAIN)-gdb
CXX := $(TOOLCHAIN)-g++
OBJCOPY := $(TOOLCHAIN)-objcopy
OBJDUMP := $(TOOLCHAIN)-objdump
@ -109,12 +110,12 @@ endif
#
#
SYSTEM ?= chip/system_samd20.c chip/startup_samd20.c
INCLUDE_PATH += chip/ chip/cmsis/ samd20/ samd20/component/
INCLUDE_PATH += chip/ chip/cmsis/ samd20/ samd20/component/ test/tc/
# Verification suite code
#
#
SYSTEM += tools/verification/verification_tc.c
SYSTEM += test/main.c
# Linker Scripts
#
@ -151,7 +152,7 @@ TAGFILES = $(SOURCES) $(shell $(CAT) $(OBJECTS:.o=.d) \
# Default target
#
#
all: $(TARGET).elf etags verification
all: $(TARGET).elf etags
# Rule for generating object and dependancy files from source files.
#
@ -242,18 +243,22 @@ etags: $(TAGFILES)
emacs:
@emacs23 $(TAGFILES) Makefile config.mk README.md
# Makes the verification tool
# Test
#
verification: tools/verification/verification_tc.py
TESTCASES := $(shell $(FIND) test/tc -name '*.[h]')
# Ctypesgen for verification tool
#
%.py: %.c
.PHONY: test
test: test/main.py
@echo "Running tests..."
>/dev/null $(DB) -q -x test/tests.py
# Ctypesgen for test
test/main.py: test/main.c $(TESTCASES)
@echo "Generating Python Wrappers...."
@echo
tools/verification/ctypesgen/ctypesgen.py -o $@ \
test/ctypesgen/ctypesgen.py -o $@ \
--cpp="$(CC) -E -DCTYPESGEN $(CPPFLAGS) $(CFLAGS) \
$(addprefix -I,$(INCLUDE_PATH))" $<
$(addprefix -I,$(INCLUDE_PATH))" $^
# Removes everything in the output directory
#
@ -264,3 +269,4 @@ clean:
$(RM) $(OUTPUT_PATH)*
$(RM) gdbscript
$(RM) TAGS
$(RM) test/main.py*

Wyświetl plik

@ -0,0 +1,26 @@
Copyright (c) 2007-2008, Ctypesgen Developers
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the <ORGANIZATION> nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

Wyświetl plik

@ -0,0 +1,26 @@
ctypesgen
---------
(c) Ctypesgen developers 2007-2011
http://code.google.com/p/ctypesgen/
ctypesgen is a pure-python ctypes wrapper generator. It can also
output JSON, which can be used with Mork, which generates bindings for
Lua, using the alien module (which binds libffi to Lua).
ctypesgen is distributed under the New (2-clause) BSD License:
http://www.opensource.org/licenses/bsd-license.php
libffi is a portable Foreign Function Interface library:
http://sources.redhat.com/libffi/
Mork, the friendly alien, can be found at:
https://github.com/rrthomas/mork
Usage
-----
To get JSON output, use --output-language=json. When outputting JSON,
you will probably want to use --all-headers --builtin-symbols
--no-stddef-types --no-gnu-types --no-python-types too.

Wyświetl plik

@ -0,0 +1,170 @@
#!/usr/bin/env python
# -*- coding: us-ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
def find_names_in_modules(modules):
names = set()
for module in modules:
try:
mod = __import__(module)
except:
pass
else:
names.union(dir(module))
return names
import optparse, sys
def option_callback_W(option, opt, value, parser):
# Options preceded by a "-Wl," are simply treated as though the "-Wl,"
# is not there? I don't understand the purpose of this code...
if len(value) < 4 or value[0:3] != 'l,-':
raise optparse.BadOptionError("not in '-Wl,<opt>' form: %s%s"
% (opt, value))
opt = value[2:]
if opt not in ['-L', '-R', '--rpath']:
raise optparse.BadOptionError("-Wl option must be -L, -R"
" or --rpath, not " + value[2:])
# Push the linker option onto the list for further parsing.
parser.rargs.insert(0, value)
def option_callback_libdir(option, opt, value, parser):
# There are two sets of linker search paths: those for use at compile time
# and those for use at runtime. Search paths specified with -L, -R, or
# --rpath are added to both sets.
parser.values.compile_libdirs.append(value)
parser.values.runtime_libdirs.append(value)
import ctypesgencore
import ctypesgencore.messages as msgs
if __name__=="__main__":
usage = 'usage: %prog [options] /path/to/header.h ...'
op = optparse.OptionParser(usage=usage)
# Parameters
op.add_option('-o', '--output', dest='output', metavar='FILE',
help='write wrapper to FILE [default stdout]')
op.add_option('-l', '--library', dest='libraries', action='append',
default=[], metavar='LIBRARY', help='link to LIBRARY')
op.add_option('', '--include', dest='other_headers', action='append',
default=[], metavar='HEADER',
help='include system header HEADER (e.g. stdio.h or stdlib.h)')
op.add_option('-m', '--module', '--link-module', action='append',
dest='modules', metavar='MODULE', default=[],
help='use symbols from Python module MODULE')
op.add_option('-I', '--includedir', dest='include_search_paths',
action='append', default=[], metavar='INCLUDEDIR',
help='add INCLUDEDIR as a directory to search for headers')
op.add_option('-W', action="callback", callback=option_callback_W,
metavar="l,OPTION", type="str",
help="where OPTION is -L, -R, or --rpath")
op.add_option("-L", "-R", "--rpath", "--libdir", action="callback",
callback=option_callback_libdir, metavar="LIBDIR", type="str",
help="Add LIBDIR to the search path (both compile-time and run-time)")
op.add_option('', "--compile-libdir", action="append",
dest="compile_libdirs", metavar="LIBDIR", default=[],
help="Add LIBDIR to the compile-time library search path.")
op.add_option('', "--runtime-libdir", action="append",
dest="runtime_libdirs", metavar="LIBDIR", default=[],
help="Add LIBDIR to the run-time library search path.")
# Parser options
op.add_option('', '--cpp', dest='cpp', default='gcc -E',
help='The command to invoke the c preprocessor, including any ' \
'necessary options (default: gcc -E)')
op.add_option('', '--save-preprocessed-headers', metavar='FILENAME',
dest='save_preprocessed_headers', default=None,
help='Save the preprocessed headers to the specified FILENAME')
# Processor options
op.add_option('-a', '--all-headers', action='store_true',
dest='all_headers', default=False,
help='include symbols from all headers, including system headers')
op.add_option('', '--builtin-symbols', action='store_true',
dest='builtin_symbols', default=False,
help='include symbols automatically generated by the preprocessor')
op.add_option('', '--no-macros', action='store_false', dest='include_macros',
default=True, help="Don't output macros.")
op.add_option('-i', '--include-symbols', dest='include_symbols',
default=None, help='regular expression for symbols to always include')
op.add_option('-x', '--exclude-symbols', dest='exclude_symbols',
default=None, help='regular expression for symbols to exclude')
op.add_option('', '--no-stddef-types', action='store_true',
dest='no_stddef_types', default=False,
help='Do not support extra C types from stddef.h')
op.add_option('', '--no-gnu-types', action='store_true',
dest='no_gnu_types', default=False,
help='Do not support extra GNU C types')
op.add_option('', '--no-python-types', action='store_true',
dest='no_python_types', default=False,
help='Do not support extra C types built in to Python')
# Printer options
op.add_option('', '--header-template', dest='header_template', default=None,
metavar='TEMPLATE',
help='Use TEMPLATE as the header template in the output file.')
op.add_option('', '--strip-build-path', dest='strip_build_path',
default=None, metavar='BUILD_PATH',
help='Strip build path from header paths in the wrapper file.')
op.add_option('', '--insert-file', dest='inserted_files', default=[],
action='append', metavar='FILENAME',
help='Add the contents of FILENAME to the end of the wrapper file.')
op.add_option('', '--output-language', dest='output_language', metavar='LANGUAGE',
default='python',
help="Choose output language (`json' or `python' [default])")
# Error options
op.add_option('', "--all-errors", action="store_true", default=False,
dest="show_all_errors", help="Display all warnings and errors even " \
"if they would not affect output.")
op.add_option('', "--show-long-errors", action="store_true", default=False,
dest="show_long_errors", help="Display long error messages " \
"instead of abbreviating error messages.")
op.add_option('', "--no-macro-warnings", action="store_false", default=True,
dest="show_macro_warnings", help="Do not print macro warnings.")
op.set_defaults(**ctypesgencore.options.default_values)
(options, args) = op.parse_args(list(sys.argv[1:]))
options.headers = args
# Figure out what names will be defined by imported Python modules
options.other_known_names = find_names_in_modules(options.modules)
# Required parameters
if len(args) < 1:
msgs.error_message('No header files specified', cls='usage')
sys.exit(1)
if len(options.libraries) == 0:
msgs.warning_message('No libraries specified', cls='usage')
# Check output language
printer = None
if options.output_language == "python":
printer = ctypesgencore.printer_python.WrapperPrinter
elif options.output_language == "json":
printer = ctypesgencore.printer_json.WrapperPrinter
else:
msgs.error_message("No such output language `" + options.output_language + "'", cls='usage')
sys.exit(1)
# Step 1: Parse
descriptions=ctypesgencore.parser.parse(options.headers,options)
# Step 2: Process
ctypesgencore.processor.process(descriptions,options)
# Step 3: Print
printer(options.output,options,descriptions)
msgs.status_message("Wrapping complete.")
# Correct what may be a common mistake
if descriptions.all == []:
if not options.all_headers:
msgs.warning_message("There wasn't anything of use in the " \
"specified header file(s). Perhaps you meant to run with " \
"--all-headers to include objects from included sub-headers? ",
cls = 'usage')

Wyświetl plik

@ -0,0 +1,72 @@
#!/usr/bin/env python
# -*- coding: us-ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
"""
Ctypesgencore is the module that contains the main body of ctypesgen - in fact,
it contains everything but the command-line interface.
ctypesgen's job is divided into three steps:
Step 1: Parse
Ctypesgen reads the input header files and parses them. It generates a list of
function, variable, struct, union, enum, constant, typedef, and macro
descriptions from the input files. These descriptions are encapsulated as
ctypesgen.descriptions.Description objects.
The package ctypesgen.parser is responsible for the parsing stage.
Step 2: Process
Ctypesgen processes the list of descriptions from the parsing stage. This is
the stage where ctypesgen resolves name conflicts and filters descriptions using
the regexes specified on the command line. Other processing steps take place
at this stage, too. When processing is done, ctypesgen finalizes which
descriptions will be included in the output file.
The package ctypesgen.processor is responsible for the processing stage.
Step 3: Print
Ctypesgen writes the descriptions to the output file, along with a header.
The package ctypesgen.printer is responsible for the printing stage.
There are three modules in ctypesgencore that describe the format that the
parser, processor, and printer modules use to pass information. They are:
* descriptions: Classes to represent the descriptions.
* ctypedecls: Classes to represent C types.
* expressions: Classes to represent an expression in a language-independent
format.
"""
__version__ = '0.0'
VERSION = __version__
__all__ = ["parser","processor","printer",
"descriptions","ctypedescs","expressions",
"messages","options"]
# Workhorse modules
import parser
import processor
import printer_python
try:
import printer_json
except ImportError:
pass
# Modules describing internal format
import descriptions
import ctypedescs
import expressions
# Helper modules
import messages
import options
printer = printer_python # Default the printer to generating Python

Wyświetl plik

@ -0,0 +1,318 @@
#!/usr/bin/env python
'''
ctypesgencore.ctypedescs contains classes to represent a C type. All of them
classes are subclasses of CtypesType.
Unlike in previous versions of ctypesgen, CtypesType and its subclasses are
completely independent of the parser module.
The most important method of CtypesType and its subclasses is the py_string
method. str(ctype) returns a string which, when evaluated in the wrapper
at runtime, results in a ctypes type object.
For example, a CtypesType
representing an array of four integers could be created using:
>>> ctype = CtypesArray(CtypesSimple("int",True,0),4)
str(ctype) would evaluate to "c_int * 4".
'''
import warnings
__docformat__ = 'restructuredtext'
ctypes_type_map = {
# typename signed longs
('void', True, 0): 'None',
('int', True, 0): 'c_int',
('int', False, 0): 'c_uint',
('int', True, 1): 'c_long',
('int', False, 1): 'c_ulong',
('char', True, 0): 'c_char',
('char', False, 0): 'c_ubyte',
('short', True, 0): 'c_short',
('short', False, 0): 'c_ushort',
('float', True, 0): 'c_float',
('double', True, 0): 'c_double',
('int8_t', True, 0): 'c_int8',
('int16_t', True, 0): 'c_int16',
('int32_t', True, 0): 'c_int32',
('int64_t', True, 0): 'c_int64',
('uint8_t', True, 0): 'c_uint8',
('uint16_t',True, 0): 'c_uint16',
('uint32_t',True, 0): 'c_uint32',
('uint64_t',True, 0): 'c_uint64',
('_Bool', True, 0): 'c_bool',
}
ctypes_type_map_python_builtin = {
('int', True, 2): 'c_longlong',
('int', False, 2): 'c_ulonglong',
('size_t', True, 0): 'c_size_t',
('apr_int64_t',True,0): 'c_int64',
('off64_t', True, 0): 'c_int64',
('apr_uint64_t',True,0): 'c_uint64',
('wchar_t', True, 0): 'c_wchar',
('ptrdiff_t',True, 0): 'c_ptrdiff_t', # Requires definition in preamble
('ssize_t', True, 0): 'c_ptrdiff_t', # Requires definition in preamble
('va_list', True, 0): 'c_void_p',
}
# This protocol is used for walking type trees.
class CtypesTypeVisitor(object):
def visit_struct(self, struct):
pass
def visit_enum(self, enum):
pass
def visit_typedef(self, name):
pass
def visit_error(self, error, cls):
pass
def visit_identifier(self, identifier):
# This one comes from inside ExpressionNodes. There may be
# ExpressionNode objects in array count expressions.
pass
def visit_type_and_collect_info(ctype):
class Visitor(CtypesTypeVisitor):
def visit_struct(self,struct):
structs.append(struct)
def visit_enum(self,enum):
enums.append(enum)
def visit_typedef(self,typedef):
typedefs.append(typedef)
def visit_error(self,error,cls):
errors.append((error,cls))
def visit_identifier(self,identifier):
identifiers.append(identifier)
structs = []
enums = []
typedefs = []
errors = []
identifiers = []
v = Visitor()
ctype.visit(v)
return structs,enums,typedefs,errors,identifiers
# Remove one level of indirection from funtion pointer; needed for typedefs
# and function parameters.
def remove_function_pointer(t):
if type(t) == CtypesPointer and type(t.destination) == CtypesFunction:
return t.destination
elif type(t) == CtypesPointer:
t.destination = remove_function_pointer(t.destination)
return t
else:
return t
class CtypesType(object):
def __init__(self):
self.errors=[]
def __repr__(self):
return "<Ctype \"%s\">" % self.py_string()
def error(self,message,cls=None):
self.errors.append((message,cls))
def visit(self,visitor):
for error,cls in self.errors:
visitor.visit_error(error,cls)
class CtypesSimple(CtypesType):
"""Represents a builtin type, like "char" or "int"."""
def __init__(self, name, signed, longs):
CtypesType.__init__(self)
self.name = name
self.signed = signed
self.longs = longs
def py_string(self):
return ctypes_type_map[(self.name,self.signed,self.longs)]
class CtypesSpecial(CtypesType):
def __init__(self,name):
CtypesType.__init__(self)
self.name = name
def py_string(self):
return self.name
class CtypesTypedef(CtypesType):
"""Represents a type defined by a typedef."""
def __init__(self, name):
CtypesType.__init__(self)
self.name = name
def visit(self,visitor):
if not self.errors:
visitor.visit_typedef(self.name)
CtypesType.visit(self,visitor)
def py_string(self):
return self.name
class CtypesBitfield(CtypesType):
def __init__(self, base, bitfield):
CtypesType.__init__(self)
self.base = base
self.bitfield = bitfield
def visit(self,visitor):
self.base.visit(visitor)
CtypesType.visit(self,visitor)
def py_string(self):
return self.base.py_string()
class CtypesPointer(CtypesType):
def __init__(self, destination, qualifiers):
CtypesType.__init__(self)
self.destination = destination
self.qualifiers = qualifiers
def visit(self,visitor):
if self.destination:
self.destination.visit(visitor)
CtypesType.visit(self,visitor)
def py_string(self):
return 'POINTER(%s)' % self.destination.py_string()
class CtypesArray(CtypesType):
def __init__(self, base, count):
CtypesType.__init__(self)
self.base = base
self.count = count
def visit(self,visitor):
self.base.visit(visitor)
if self.count:
self.count.visit(visitor)
CtypesType.visit(self,visitor)
def py_string(self):
if self.count is None:
return 'POINTER(%s)' % self.base.py_string()
if type(self.base) == CtypesArray:
return '(%s) * %s' % (self.base.py_string(),
self.count.py_string(False))
else:
return '%s * %s' % (self.base.py_string(),
self.count.py_string(False))
class CtypesFunction(CtypesType):
def __init__(self, restype, parameters, variadic=False):
CtypesType.__init__(self)
self.restype = restype
# Don't allow POINTER(None) (c_void_p) as a restype... causes errors
# when ctypes automagically returns it as an int.
# Instead, convert to POINTER(c_void). c_void is not a ctypes type,
# you can make it any arbitrary type.
if type(self.restype) == CtypesPointer and \
type(self.restype.destination) == CtypesSimple and \
self.restype.destination.name == 'None':
self.restype = CtypesPointer(CtypesSpecial('c_void'), ())
# Return "String" instead of "POINTER(c_char)"
if self.restype.py_string() == 'POINTER(c_char)':
self.restype = CtypesSpecial('String')
self.argtypes = [remove_function_pointer(p) for p in parameters]
self.variadic = variadic
def visit(self,visitor):
self.restype.visit(visitor)
for a in self.argtypes:
a.visit(visitor)
CtypesType.visit(self,visitor)
def py_string(self):
return 'CFUNCTYPE(UNCHECKED(%s), %s)' % (self.restype.py_string(),
', '.join([a.py_string() for a in self.argtypes]))
last_tagnum = 0
def anonymous_struct_tag():
global last_tagnum
last_tagnum += 1
return 'anon_%d' % last_tagnum
class CtypesStruct(CtypesType):
def __init__(self, tag, variety, members, src=None):
CtypesType.__init__(self)
self.tag = tag
self.variety = variety # "struct" or "union"
self.members = members
if not self.tag:
self.tag = anonymous_struct_tag()
self.anonymous = True
else:
self.anonymous = False
if self.members==None:
self.opaque = True
else:
self.opaque = False
self.src = src
def get_required_types(self):
types = CtypesType.get_required_types(self)
types.add((self.variety,self.tag))
return types
def visit(self,visitor):
visitor.visit_struct(self)
if not self.opaque:
for name,ctype in self.members:
ctype.visit(visitor)
CtypesType.visit(self,visitor)
def get_subtypes(self):
if self.opaque:
return set()
else:
return set([m[1] for m in self.members])
def py_string(self):
return "%s_%s" % (self.variety,self.tag)
last_tagnum = 0
def anonymous_enum_tag():
global last_tagnum
last_tagnum += 1
return 'anon_%d' % last_tagnum
class CtypesEnum(CtypesType):
def __init__(self, tag, enumerators, src=None):
CtypesType.__init__(self)
self.tag = tag
self.enumerators = enumerators
if not self.tag:
self.tag = anonymous_enum_tag()
self.anonymous = True
else:
self.anonymous = False
if self.enumerators==None:
self.opaque = True
else:
self.opaque = False
self.src = src
def visit(self,visitor):
visitor.visit_enum(self)
CtypesType.visit(self,visitor)
def py_string(self):
return 'enum_%s' % self.tag

Wyświetl plik

@ -0,0 +1,189 @@
#!/usr/bin/env python
"""
ctypesgencore.descriptions contains classes to represent a description of a
struct, union, enum, function, constant, variable, or macro. All the
description classes are subclassed from an abstract base class, Description.
The descriptions module also contains a class, DescriptionCollection, to hold
lists of Description objects.
"""
class DescriptionCollection(object):
"""Represents a collection of Descriptions."""
def __init__(self,constants,typedefs,structs,enums,functions,variables,
macros,all,output_order):
self.constants=constants
self.typedefs=typedefs
self.structs=structs
self.enums=enums
self.functions=functions
self.variables=variables
self.macros=macros
self.all=all
self.output_order=output_order
class Description(object):
"""Represents a constant, typedef, struct, function, variable, enum,
or macro description. Description is an abstract base class."""
def __init__(self,src=None):
self.src=src # A tuple of (filename, lineno)
# If object will be included in output file. Values are "yes", "never",
# and "if_needed".
self.include_rule="yes"
# A word about requirements, and dependents:
# If X requires Y, Y is in X.requirements.
# If X is in Y.requirements, then Y is in X.dependents.
self.requirements=set()
self.dependents=set()
# If the processor module finds a fatal error that prevents a
# a description from being output, then it appends a string describing
# the problem to 'errors'. If it finds a nonfatal error, it appends a
# string to 'warnings'. If the description would have been output, then
# the errors and warnings are printed.
# If there is anything in 'errors' after processing is complete, the
# description is not output.
self.errors=[]
self.warnings=[]
def add_requirements(self,reqs):
self.requirements = self.requirements.union(reqs)
for req in reqs:
req.dependents.add(self)
def error(self,msg,cls = None):
self.errors.append((msg,cls))
def warning(self,msg,cls = None):
self.warnings.append((msg,cls))
def __repr__(self):
return "<Description: %s>" % self.casual_name()
def casual_name(self):
"""Return a name to show the user."""
def py_name(self):
"""Return the name associated with this description in Python code."""
def c_name(self):
"""Return the name associated with this description in C code."""
class ConstantDescription(Description):
"""Simple class to contain information about a constant."""
def __init__(self,name,value,src=None):
Description.__init__(self,src)
# Name of constant, a string
self.name=name
# Value of constant, as an ExpressionNode object
self.value=value
def casual_name(self):
return "Constant \"%s\""%self.name
def py_name(self):
return self.name
def c_name(self):
return self.name
class TypedefDescription(Description):
"""Simple container class for a type definition."""
def __init__(self,name,ctype,src=None):
Description.__init__(self,src)
self.name=name # Name, a string
self.ctype=ctype # The base type as a ctypedescs.CtypeType object
def casual_name(self):
return "Typedef \"%s\""%self.name
def py_name(self):
return self.name
def c_name(self):
return self.name
class StructDescription(Description):
"""Simple container class for a structure or union definition."""
def __init__(self,tag,variety,members,opaque,ctype,src=None):
Description.__init__(self,src)
# The name of the structure minus the "struct" or "union"
self.tag=tag
# A string "struct" or "union"
self.variety=variety
# A list of pairs of (name,ctype)
self.members=members
# True if struct body was not specified in header file
self.opaque=opaque
# The original CtypeStruct that created the struct
self.ctype=ctype
def casual_name(self):
return "%s \"%s\""%(self.variety.capitalize(),self.tag)
def py_name(self):
return "%s_%s"%(self.variety,self.tag)
def c_name(self):
return "%s %s"%(self.variety,self.tag)
class EnumDescription(Description):
"""Simple container class for an enum definition."""
def __init__(self,tag,members,ctype,src=None):
Description.__init__(self,src)
# The name of the enum, minus the "enum"
self.tag=tag
# A list of (name,value) pairs where value is a number
self.members=members
# The original CtypeEnum that created the enum
self.ctype=ctype
def casual_name(self):
return "Enum \"%s\""%self.tag
def py_name(self):
return "enum_%s"%self.tag
def c_name(self):
return "enum %s"%self.tag
class FunctionDescription(Description):
"""Simple container class for a C function."""
def __init__(self,name,restype,argtypes,variadic=False,src=None):
Description.__init__(self,src)
# Name, a string
self.name=name
# Name according to C - stored in case description is renamed
self.cname=name
# A ctype representing return type
self.restype=restype
# A list of ctypes representing the argument types
self.argtypes=argtypes
# Does this function accept a variable number of arguments?
self.variadic=variadic
def casual_name(self):
return "Function \"%s\""%self.name
def py_name(self):
return self.name
def c_name(self):
return self.cname
class VariableDescription(Description):
"""Simple container class for a C variable declaration."""
def __init__(self,name,ctype,src=None):
Description.__init__(self,src)
# Name, a string
self.name=name
# Name according to C - stored in case description is renamed
self.cname=name
# The type of the variable
self.ctype=ctype
def casual_name(self):
return "Variable \"%s\""%self.name
def py_name(self):
return self.name
def c_name(self):
return self.cname
class MacroDescription(Description):
"""Simple container class for a C macro."""
def __init__(self,name,params,expr,src=None):
Description.__init__(self,src)
self.name = name
self.params = params
self.expr = expr # ExpressionNode for the macro's body
def casual_name(self):
return "Macro \"%s\""%self.name
def py_name(self):
return self.name
def c_name(self):
return self.name

Wyświetl plik

@ -0,0 +1,309 @@
#!/usr/bin/env python
'''
The expressions module contains classes to represent an expression. The main
class is ExpressionNode. ExpressionNode's most useful method is py_string(),
which returns a Python string representing that expression.
'''
import sys
from ctypedescs import *
import keyword
# Right now, the objects in this module are all oriented toward evaluation.
# However, they don't have to be, since ctypes objects are mutable. For example,
# shouldn't it be possible to translate the macro:
#
# #define INCREMENT(x) ++x
#
# into Python? The resulting code should be:
#
# def INCREMENT(x):
# x.value+=1
# return x.value
#
# On the other hand, this would be a challenge to write.
class EvaluationContext(object):
'''Interface for evaluating expression nodes.
'''
def evaluate_identifier(self, name):
warnings.warn('Attempt to evaluate identifier "%s" failed' % name)
return 0
def evaluate_sizeof(self, type):
warnings.warn('Attempt to evaluate sizeof "%s" failed' % str(type))
return 0
def evaluate_sizeof(self, object):
warnings.warn('Attempt to evaluate sizeof object "%s" failed' % str(object))
return 0
def evaluate_parameter(self, name):
warnings.warn('Attempt to evaluate parameter "%s" failed' % name)
return 0
class ExpressionNode(object):
def __init__(self):
self.errors = []
def error(self,message,cls = None):
self.errors.append((message,cls))
def __repr__(self):
try:
string = repr(self.py_string(True))
except ValueError:
string = "<error in expression node>"
return "<ExpressionNode: %s>" % string
def visit(self,visitor):
for error,cls in self.errors:
visitor.visit_error(error,cls)
class ConstantExpressionNode(ExpressionNode):
def __init__(self, value):
ExpressionNode.__init__(self)
self.value = value
def evaluate(self, context):
return self.value
def py_string(self, can_be_ctype):
if sys.platform != 'win32' or (sys.platform == 'win32' and sys.version_info >= (2, 6)):
# Windows python did not get infinity support until 2.6
if self.value == float('inf'):
return "float('inf')"
elif self.value == float('-inf'):
return "float('-inf')"
return repr(self.value)
class IdentifierExpressionNode(ExpressionNode):
def __init__(self, name):
ExpressionNode.__init__(self)
self.name = name
def evaluate(self, context):
return context.evaluate_identifier(self.name)
def visit(self, visitor):
visitor.visit_identifier(self.name)
ExpressionNode.visit(self,visitor)
def py_string(self, can_be_ctype):
# Errors will be thrown in generated code if identifier evaluates
# to a ctypes object, and can_be_ctype is False.
return self.name
class ParameterExpressionNode(ExpressionNode):
def __init__(self, name):
ExpressionNode.__init__(self)
self.name = name
def evaluate(self, context):
return context.evaluate_parameter(self.name)
def visit(self, visitor):
ExpressionNode.visit(self,visitor)
def py_string(self, can_be_ctype):
# Errors will be thrown in generated code if parameter is
# a ctypes object, and can_be_ctype is False.
return self.name
class UnaryExpressionNode(ExpressionNode):
def __init__(self, name, op, format, child_can_be_ctype, child):
ExpressionNode.__init__(self)
self.name = name
self.op = op
self.format = format
self.child_can_be_ctype = child_can_be_ctype
self.child = child
def visit(self, visitor):
self.child.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self, context):
if self.op:
return self.op(self.child.evaluate(context))
else:
raise ValueError("The C operator \"%s\" can't be evaluated right " \
"now" % self.name)
def py_string(self, can_be_ctype):
return self.format % \
self.child.py_string(self.child_can_be_ctype and can_be_ctype)
class SizeOfExpressionNode(ExpressionNode):
def __init__(self, child):
ExpressionNode.__init__(self)
self.child = child
def visit(self, visitor):
self.child.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self, context):
if isinstance(self.child, CtypesType):
return context.evaluate_sizeof(self.child)
else:
return context.evaluate_sizeof_object(self.child)
def py_string(self, can_be_ctype):
if isinstance(self.child, CtypesType):
return 'sizeof(%s)' % self.child.py_string()
else:
return 'sizeof(%s)' % self.child.py_string(True)
class BinaryExpressionNode(ExpressionNode):
def __init__(self, name, op, format, can_be_ctype, left, right):
ExpressionNode.__init__(self)
self.name = name
self.op = op
self.format = format
self.can_be_ctype = can_be_ctype
self.left = left
self.right = right
def visit(self, visitor):
self.left.visit(visitor)
self.right.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self, context):
if self.op:
return self.op(self.left.evaluate(context),
self.right.evaluate(context))
else:
raise ValueError("The C operator \"%s\" can't be evaluated right " \
"now" % self.name)
def py_string(self, can_be_ctype):
return self.format % \
(self.left.py_string(self.can_be_ctype[0] and can_be_ctype),
self.right.py_string(self.can_be_ctype[0] and can_be_ctype))
class ConditionalExpressionNode(ExpressionNode):
def __init__(self, cond, yes, no):
ExpressionNode.__init__(self)
self.cond = cond
self.yes = yes
self.no = no
def visit(self, visitor):
self.cond.visit(visitor)
self.yes.visit(visitor)
self.no.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self, context):
if self.cond.evaluate(context):
return self.yes.evaluate(context)
else:
return self.no.evaluate(context)
def py_string(self, can_be_ctype):
return "%s and %s or %s" % \
(self.cond.py_string(True),
self.yes.py_string(can_be_ctype),
self.no.py_string(can_be_ctype))
class AttributeExpressionNode(ExpressionNode):
def __init__(self, op, format, base, attribute):
ExpressionNode.__init__(self)
self.op = op
self.format = format
self.base = base
self.attribute = attribute
# Attribute access will raise parse errors if you don't do this.
# Fortunately, the processor module does the same thing to
# the struct member name.
if self.attribute in keyword.kwlist:
self.attribute = "_"+self.attribute
def visit(self,visitor):
self.base.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self, context):
return self.op(self.base.evalute(context),self.attribute)
def py_string(self, can_be_ctype):
if can_be_ctype:
return self.format % (self.base.py_string(can_be_ctype),
self.attribute)
else:
return "(%s.value)" % (self.format % \
(self.base.py_string(can_be_ctype), self.attribute))
class CallExpressionNode(ExpressionNode):
def __init__(self,function,arguments):
ExpressionNode.__init__(self)
self.function = function
self.arguments = arguments
def visit(self,visitor):
self.function.visit(visitor)
for arg in self.arguments:
arg.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self,context):
arguments = [arg.evaluate(context) for arg in self.arguments]
return self.function.evaluate(context)(*arguments)
def py_string(self, can_be_ctype):
function = self.function.py_string(can_be_ctype)
arguments = [x.py_string(can_be_ctype) for x in self.arguments]
if can_be_ctype:
return '(%s (%s))' % (function,", ".join(arguments))
else:
return '((%s (%s)).value)' % (function,", ".join(arguments))
# There seems not to be any reasonable way to translate C typecasts
# into Python. Ctypesgen doesn't try, except for the special case of NULL.
class TypeCastExpressionNode(ExpressionNode):
def __init__(self, base, ctype):
ExpressionNode.__init__(self)
self.base = base
self.ctype = ctype
self.isnull = isinstance(ctype, CtypesPointer) and \
isinstance(base, ConstantExpressionNode) and \
base.value == 0
def visit(self,visitor):
# No need to visit ctype because it isn't actually used
self.base.visit(visitor)
ExpressionNode.visit(self,visitor)
def evaluate(self,context):
if self.isnull:
return None
else:
return self.base.evaluate(context)
def py_string(self, can_be_ctype):
if self.isnull:
return "None"
else:
return self.base.py_string(can_be_ctype)
class UnsupportedExpressionNode(ExpressionNode):
def __init__(self,message):
ExpressionNode.__init__(self)
self.message = message
self.error(message,'unsupported-type')
def evaluate(self,context):
raise ValueError("Tried to evaluate an unsupported expression " \
"node: %s" % self.message)
def __repr__(self):
return "<UnsupportedExpressionNode>"
def py_string(self, can_be_ctype):
raise ValueError("Called py_string() an unsupported expression " \
"node: %s" % self.message)

Wyświetl plik

@ -0,0 +1,290 @@
# ----------------------------------------------------------------------------
# Copyright (c) 2008 David James
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import os.path, re, sys, glob
import platform
import ctypes
import ctypes.util
def _environ_path(name):
if name in os.environ:
return os.environ[name].split(":")
else:
return []
class LibraryLoader(object):
def __init__(self):
self.other_dirs=[]
def load_library(self,libname):
"""Given the name of a library, load it."""
paths = self.getpaths(libname)
for path in paths:
if os.path.exists(path):
return self.load(path)
raise ImportError("%s not found." % libname)
def load(self,path):
"""Given a path to a library, load it."""
try:
# Darwin requires dlopen to be called with mode RTLD_GLOBAL instead
# of the default RTLD_LOCAL. Without this, you end up with
# libraries not being loadable, resulting in "Symbol not found"
# errors
if sys.platform == 'darwin':
return ctypes.CDLL(path, ctypes.RTLD_GLOBAL)
else:
return ctypes.cdll.LoadLibrary(path)
except OSError,e:
raise ImportError(e)
def getpaths(self,libname):
"""Return a list of paths where the library might be found."""
if os.path.isabs(libname):
yield libname
else:
# FIXME / TODO return '.' and os.path.dirname(__file__)
for path in self.getplatformpaths(libname):
yield path
path = ctypes.util.find_library(libname)
if path: yield path
def getplatformpaths(self, libname):
return []
# Darwin (Mac OS X)
class DarwinLibraryLoader(LibraryLoader):
name_formats = ["lib%s.dylib", "lib%s.so", "lib%s.bundle", "%s.dylib",
"%s.so", "%s.bundle", "%s"]
def getplatformpaths(self,libname):
if os.path.pathsep in libname:
names = [libname]
else:
names = [format % libname for format in self.name_formats]
for dir in self.getdirs(libname):
for name in names:
yield os.path.join(dir,name)
def getdirs(self,libname):
'''Implements the dylib search as specified in Apple documentation:
http://developer.apple.com/documentation/DeveloperTools/Conceptual/
DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html
Before commencing the standard search, the method first checks
the bundle's ``Frameworks`` directory if the application is running
within a bundle (OS X .app).
'''
dyld_fallback_library_path = _environ_path("DYLD_FALLBACK_LIBRARY_PATH")
if not dyld_fallback_library_path:
dyld_fallback_library_path = [os.path.expanduser('~/lib'),
'/usr/local/lib', '/usr/lib']
dirs = []
if '/' in libname:
dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
else:
dirs.extend(_environ_path("LD_LIBRARY_PATH"))
dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
dirs.extend(self.other_dirs)
dirs.append(".")
dirs.append(os.path.dirname(__file__))
if hasattr(sys, 'frozen') and sys.frozen == 'macosx_app':
dirs.append(os.path.join(
os.environ['RESOURCEPATH'],
'..',
'Frameworks'))
dirs.extend(dyld_fallback_library_path)
return dirs
# Posix
class PosixLibraryLoader(LibraryLoader):
_ld_so_cache = None
def _create_ld_so_cache(self):
# Recreate search path followed by ld.so. This is going to be
# slow to build, and incorrect (ld.so uses ld.so.cache, which may
# not be up-to-date). Used only as fallback for distros without
# /sbin/ldconfig.
#
# We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
directories = []
for name in ("LD_LIBRARY_PATH",
"SHLIB_PATH", # HPUX
"LIBPATH", # OS/2, AIX
"LIBRARY_PATH", # BE/OS
):
if name in os.environ:
directories.extend(os.environ[name].split(os.pathsep))
directories.extend(self.other_dirs)
directories.append(".")
directories.append(os.path.dirname(__file__))
try: directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')])
except IOError: pass
unix_lib_dirs_list = ['/lib', '/usr/lib', '/lib64', '/usr/lib64']
if sys.platform.startswith('linux'):
# Try and support multiarch work in Ubuntu
# https://wiki.ubuntu.com/MultiarchSpec
bitage = platform.architecture()[0]
if bitage.startswith('32'):
# Assume Intel/AMD x86 compat
unix_lib_dirs_list += ['/lib/i386-linux-gnu', '/usr/lib/i386-linux-gnu']
elif bitage.startswith('64'):
# Assume Intel/AMD x86 compat
unix_lib_dirs_list += ['/lib/x86_64-linux-gnu', '/usr/lib/x86_64-linux-gnu']
else:
# guess...
unix_lib_dirs_list += glob.glob('/lib/*linux-gnu')
directories.extend(unix_lib_dirs_list)
cache = {}
lib_re = re.compile(r'lib(.*)\.s[ol]')
ext_re = re.compile(r'\.s[ol]$')
for dir in directories:
try:
for path in glob.glob("%s/*.s[ol]*" % dir):
file = os.path.basename(path)
# Index by filename
if file not in cache:
cache[file] = path
# Index by library name
match = lib_re.match(file)
if match:
library = match.group(1)
if library not in cache:
cache[library] = path
except OSError:
pass
self._ld_so_cache = cache
def getplatformpaths(self, libname):
if self._ld_so_cache is None:
self._create_ld_so_cache()
result = self._ld_so_cache.get(libname)
if result: yield result
path = ctypes.util.find_library(libname)
if path: yield os.path.join("/lib",path)
# Windows
class _WindowsLibrary(object):
def __init__(self, path):
self.cdll = ctypes.cdll.LoadLibrary(path)
self.windll = ctypes.windll.LoadLibrary(path)
def __getattr__(self, name):
try: return getattr(self.cdll,name)
except AttributeError:
try: return getattr(self.windll,name)
except AttributeError:
raise
class WindowsLibraryLoader(LibraryLoader):
name_formats = ["%s.dll", "lib%s.dll", "%slib.dll"]
def load_library(self, libname):
try:
result = LibraryLoader.load_library(self, libname)
except ImportError:
result = None
if os.path.sep not in libname:
for name in self.name_formats:
try:
result = getattr(ctypes.cdll, name % libname)
if result:
break
except WindowsError:
result = None
if result is None:
try:
result = getattr(ctypes.cdll, libname)
except WindowsError:
result = None
if result is None:
raise ImportError("%s not found." % libname)
return result
def load(self, path):
return _WindowsLibrary(path)
def getplatformpaths(self, libname):
if os.path.sep not in libname:
for name in self.name_formats:
dll_in_current_dir = os.path.abspath(name % libname)
if os.path.exists(dll_in_current_dir):
yield dll_in_current_dir
path = ctypes.util.find_library(name % libname)
if path:
yield path
# Platform switching
# If your value of sys.platform does not appear in this dict, please contact
# the Ctypesgen maintainers.
loaderclass = {
"darwin": DarwinLibraryLoader,
"cygwin": WindowsLibraryLoader,
"win32": WindowsLibraryLoader
}
loader = loaderclass.get(sys.platform, PosixLibraryLoader)()
def add_library_search_dirs(other_dirs):
loader.other_dirs = other_dirs
load_library = loader.load_library
del loaderclass

Wyświetl plik

@ -0,0 +1,44 @@
#!/usr/bin/env python
"""
ctypesgencore.messages contains functions to display status, error, or warning
messages to the user. Warning and error messages are also associated
with a "message class", which is a string, which currently has no effect.
Error classes are:
'usage' - there was something funny about the command-line parameters
'cparser' - there was a syntax error in the header file
'missing-library' - a library could not be loaded
'macro' - a macro could not be translated to Python
'unsupported-type' - there was a type in the header that ctypes cannot use, like
"long double".
'other' - catchall.
Warning classes are:
'usage' - there was something funny about the command-line parameters
'rename' - a description has been renamed to avoid a name conflict
'other' - catchall.
"""
import sys
import logging
__all__ = ["error_message","warning_message","status_message"]
log = logging.getLogger('ctypesgen')
ch = logging.StreamHandler() # use stdio
logging_fmt_str = "%(levelname)s: %(message)s"
formatter = logging.Formatter(logging_fmt_str)
ch.setFormatter(formatter)
log.addHandler(ch)
log.setLevel(logging.INFO) # default level that ctypesgen was using with original version
def error_message(msg,cls=None):
log.error('%s', msg)
def warning_message(msg,cls=None):
log.warn('%s', msg)
def status_message(msg):
log.info('Status: %s', msg)

Wyświetl plik

@ -0,0 +1,311 @@
# ----------------------------------------------------------------------------
# Copyright (c) 2008 David James
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import os
import re
import sys
import ctypes
import ctypes.util
_debug_lib = False
_debug_trace = False
class _TraceFunction(object):
def __init__(self, func):
self.__dict__['_func'] = func
def __str__(self):
return self._func.__name__
def __call__(self, *args, **kwargs):
return self._func(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._func, name)
def __setattr__(self, name, value):
setattr(self._func, name, value)
class _TraceLibrary(object):
def __init__(self, library):
self._library = library
print library
def __getattr__(self, name):
func = getattr(self._library, name)
f = _TraceFunction(func)
return f
class _WindowsLibrary(object):
def __init__(self, path):
self._libraries = [
ctypes.cdll.LoadLibrary(path),
ctypes.windll.LoadLibrary(path)
]
def __getattr__(self, name):
for i in range(len(self._libraries)):
try:
func = getattr(self._libraries[i], name)
f = _TraceFunction(func)
return f
except AttributeError:
if i > 0:
raise
class LibraryLoader(object):
def load_library(self, *names, **kwargs):
'''Find and load a library.
More than one name can be specified, they will be tried in order.
Platform-specific library names (given as kwargs) are tried first.
Raises ImportError if library is not found.
'''
if 'framework' in kwargs and self.platform == 'darwin':
return self.load_framework(kwargs['framework'])
platform_names = kwargs.get(self.platform, [])
if type(platform_names) in (str, unicode):
platform_names = [platform_names]
elif type(platform_names) is tuple:
platform_names = list(platform_names)
if self.platform == 'linux2':
platform_names.extend(['lib%s.so' % n for n in names])
elif self.platform == 'win32':
platform_names.extend(['%s.dll' % n for n in names])
platform_names.extend(['lib%s.dll' % n for n in names])
elif self.platform == 'darwin':
platform_names.extend(['%s.dylib' % n for n in names])
platform_names.extend(['lib%s.dylib' % n for n in names])
platform_names.extend(names)
for name in platform_names:
path = self.find_library(name)
if path:
try:
if self.platform == 'win32':
lib = _WindowsLibrary(path)
else:
lib = ctypes.cdll.LoadLibrary(path)
if _debug_lib:
print path
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
except OSError,e:
pass
raise ImportError('Library "%s" not found.' % names[0])
find_library = lambda self, name: ctypes.util.find_library(name)
platform = sys.platform
if platform == 'cygwin':
platform = 'win32'
def load_framework(self, path):
raise RuntimeError("Can't load framework on this platform.")
class MachOLibraryLoader(LibraryLoader):
def __init__(self):
if 'LD_LIBRARY_PATH' in os.environ:
self.ld_library_path = os.environ['LD_LIBRARY_PATH'].split(':')
else:
self.ld_library_path = []
if 'DYLD_LIBRARY_PATH' in os.environ:
self.dyld_library_path = os.environ['DYLD_LIBRARY_PATH'].split(':')
else:
self.dyld_library_path = []
if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:
self.dyld_fallback_library_path = \
os.environ['DYLD_FALLBACK_LIBRARY_PATH'].split(':')
else:
self.dyld_fallback_library_path = [
os.path.expanduser('~/lib'),
'/usr/local/lib',
'/usr/lib']
def find_library(self, path):
'''Implements the dylib search as specified in Apple documentation:
http://developer.apple.com/documentation/DeveloperTools/Conceptual/DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html
Before commencing the standard search, the method first checks
the bundle's ``Frameworks`` directory if the application is running
within a bundle (OS X .app).
'''
libname = os.path.basename(path)
search_path = []
if hasattr(sys, 'frozen') and sys.frozen == 'macosx_app':
search_path.append(os.path.join(
os.environ['RESOURCEPATH'],
'..',
'Frameworks',
libname))
if '/' in path:
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_library_path])
search_path.append(path)
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_fallback_library_path])
else:
search_path.extend(
[os.path.join(p, libname) \
for p in self.ld_library_path])
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_library_path])
search_path.append(path)
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_fallback_library_path])
for path in search_path:
if os.path.exists(path):
return path
return None
def find_framework(self, path):
'''Implement runtime framework search as described by:
http://developer.apple.com/documentation/MacOSX/Conceptual/BPFrameworks/Concepts/FrameworkBinding.html
'''
# e.g. path == '/System/Library/Frameworks/OpenGL.framework'
# name == 'OpenGL'
# return '/System/Library/Frameworks/OpenGL.framework/OpenGL'
name = os.path.splitext(os.path.split(path)[1])[0]
realpath = os.path.join(path, name)
if os.path.exists(realpath):
return realpath
for dir in ('/Library/Frameworks',
'/System/Library/Frameworks'):
realpath = os.path.join(dir, '%s.framework' % name, name)
if os.path.exists(realpath):
return realpath
return None
def load_framework(self, path):
realpath = self.find_framework(path)
if realpath:
lib = ctypes.cdll.LoadLibrary(realpath)
if _debug_lib:
print realpath
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
raise ImportError("Can't find framework %s." % path)
class LinuxLibraryLoader(LibraryLoader):
_ld_so_cache = None
def _create_ld_so_cache(self):
# Recreate search path followed by ld.so. This is going to be
# slow to build, and incorrect (ld.so uses ld.so.cache, which may
# not be up-to-date). Used only as fallback for distros without
# /sbin/ldconfig.
#
# We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
directories = []
try:
directories.extend(os.environ['LD_LIBRARY_PATH'].split(':'))
except KeyError:
pass
try:
directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')])
except IOError:
pass
directories.extend(['/lib', '/usr/lib'])
cache = {}
lib_re = re.compile(r'lib(.*)\.so$')
for dir in directories:
try:
for file in os.listdir(dir):
if '.so' not in file:
continue
# Index by filename
path = os.path.join(dir, file)
if file not in cache:
cache[file] = path
# Index by library name
match = lib_re.match(file)
if match:
library = match.group(1)
if library not in cache:
cache[library] = path
except OSError:
pass
self._ld_so_cache = cache
def find_library(self, path):
# Implement the ld-linux.so search path as described in
# the man page.
if self._ld_so_cache is None:
self._create_ld_so_cache()
return self._ld_so_cache.get(path)
if sys.platform == 'darwin':
loader = MachOLibraryLoader()
elif sys.platform == 'linux2':
loader = LinuxLibraryLoader()
else:
loader = LibraryLoader()
load_library = loader.load_library

Wyświetl plik

@ -0,0 +1,41 @@
#!/usr/bin/env python
"""
All of the components of ctypegencore require an argument called "options".
In command-line usage, this would be an optparser.Values object. However, if
ctypesgencore is used as a standard Python module, constructing this object
would be a pain. So this module exists to provide a "default" options object
for convenience.
"""
import optparse, copy
default_values={
"other_headers": [],
"modules": [],
"include_search_paths": [],
"compile_libdirs": [],
"runtime_libdirs": [],
"cpp": "gcc -E",
"save_preprocessed_headers": None,
"all_headers": False,
"builtin_symbols": False,
"include_symbols": None,
"exclude_symbols": None,
"show_all_errors": False,
"show_long_errors": False,
"show_macro_warnings": True,
"header_template": None,
"inserted_files": [],
"other_known_names": [],
"include_macros": True,
"libraries": [],
"strip_build_path": None,
"output_language": "python",
"no_stddef_types": False,
"no_gnu_types": False,
"no_python_types": False,
}
def get_default_options():
return optparse.Values(copy.deepcopy(default_values))

Wyświetl plik

@ -0,0 +1,24 @@
#!/usr/bin/env python
"""
This package parses C header files and generates lists of functions, typedefs,
variables, structs, unions, enums, macros, and constants. This package knows
nothing about the libraries themselves.
The public interface for this package is the function "parse". Use as follows:
>>> descriptions = parse(["inputfile1.h","inputfile2.h"], options)
where "options" is an optparse.Values object.
parse() returns a DescriptionCollection object. See ctypesgencore.descriptions
for more information.
"""
from datacollectingparser import DataCollectingParser
def parse(headers, options):
parser=DataCollectingParser(headers, options)
parser.parse()
return parser.data()
__all__ = ["parse"]

Wyświetl plik

@ -0,0 +1,174 @@
#!/usr/bin/env python
'''
This file contains classes that represent C declarations. cparser produces
declarations in this format, and ctypesparser reformats them into a format that
is not C-specific. The other modules don't need to touch these.
'''
__docformat__ = 'restructuredtext'
# --------------------------------------------------------------------------
# C Object Model
# --------------------------------------------------------------------------
class Declaration(object):
def __init__(self):
self.declarator = None
self.type = Type()
self.storage = None
def __repr__(self):
d = {
'declarator': self.declarator,
'type': self.type,
}
if self.storage:
d['storage'] = self.storage
l = ['%s=%r' % (k, v) for k, v in d.items()]
return 'Declaration(%s)' % ', '.join(l)
class Declarator(object):
pointer = None
def __init__(self):
self.identifier = None
self.initializer = None
self.array = None
self.parameters = None
self.bitfield = None
# make pointer read-only to catch mistakes early
pointer = property(lambda self: None)
def __repr__(self):
s = self.identifier or ''
if self.bitfield:
s += ":%d" % self.bitfield
if self.array:
s += repr(self.array)
if self.initializer:
s += ' = %r' % self.initializer
if self.parameters is not None:
s += '(' + ', '.join([repr(p) for p in self.parameters]) + ')'
return s
class Pointer(Declarator):
pointer = None
def __init__(self):
super(Pointer, self).__init__()
self.qualifiers = []
def __repr__(self):
q = ''
if self.qualifiers:
q = '<%s>' % ' '.join(self.qualifiers)
return 'POINTER%s(%r)' % (q, self.pointer) + \
super(Pointer, self).__repr__()
class Array(object):
def __init__(self):
self.size = None
self.array = None
def __repr__(self):
if self.size:
a = '[%r]' % self.size
else:
a = '[]'
if self.array:
return repr(self.array) + a
else:
return a
class Parameter(object):
def __init__(self):
self.type = Type()
self.storage = None
self.declarator = None
def __repr__(self):
d = {
'type': self.type,
}
if self.declarator:
d['declarator'] = self.declarator
if self.storage:
d['storage'] = self.storage
l = ['%s=%r' % (k, v) for k, v in d.items()]
return 'Parameter(%s)' % ', '.join(l)
class Type(object):
def __init__(self):
self.qualifiers = []
self.specifiers = []
def __repr__(self):
return ' '.join(self.qualifiers + [str(s) for s in self.specifiers])
# These are used only internally.
class StorageClassSpecifier(str):
pass
class TypeSpecifier(str):
pass
class StructTypeSpecifier(object):
def __init__(self, is_union, tag, declarations):
self.is_union = is_union
self.tag = tag
self.declarations = declarations
def __repr__(self):
if self.is_union:
s = 'union'
else:
s = 'struct'
if self.tag:
s += ' %s' % self.tag
if self.declarations:
s += ' {%s}' % '; '.join([repr(d) for d in self.declarations])
return s
class EnumSpecifier(object):
def __init__(self, tag, enumerators, src=None):
self.tag = tag
self.enumerators = enumerators
self.src=src
def __repr__(self):
s = 'enum'
if self.tag:
s += ' %s' % self.tag
if self.enumerators:
s += ' {%s}' % ', '.join([repr(e) for e in self.enumerators])
return s
class Enumerator(object):
def __init__(self, name, expression):
self.name = name
self.expression = expression
def __repr__(self):
s = self.name
if self.expression:
s += ' = %r' % self.expression
return s
class TypeQualifier(str):
pass
def apply_specifiers(specifiers, declaration):
'''Apply specifiers to the declaration (declaration may be
a Parameter instead).'''
for s in specifiers:
if type(s) == StorageClassSpecifier:
if declaration.storage:
# Multiple storage classes, technically an error... ignore it
pass
declaration.storage = s
elif type(s) in (TypeSpecifier, StructTypeSpecifier, EnumSpecifier):
declaration.type.specifiers.append(s)
elif type(s) == TypeQualifier:
declaration.type.qualifiers.append(s)

Wyświetl plik

@ -0,0 +1,208 @@
#!/usr/bin/env python
'''
Parse a C source file.
To use, subclass CParser and override its handle_* methods. Then instantiate
the class with a string to parse.
'''
__docformat__ = 'restructuredtext'
import operator
import os.path
import re
import sys
import time
import warnings
import preprocessor
import yacc
import cgrammar
import cdeclarations
# --------------------------------------------------------------------------
# Lexer
# --------------------------------------------------------------------------
class CLexer(object):
def __init__(self, cparser):
self.cparser = cparser
self.type_names = set()
self.in_define = False
def input(self, tokens):
self.tokens = tokens
self.pos = 0
def token(self):
while self.pos < len(self.tokens):
t = self.tokens[self.pos]
self.pos += 1
if not t:
break
if t.type == 'PP_DEFINE':
self.in_define = True
elif t.type == 'PP_END_DEFINE':
self.in_define = False
# Transform PP tokens into C tokens
elif t.type == 'LPAREN':
t.type = '('
elif t.type == 'PP_NUMBER':
t.type = 'CONSTANT'
elif t.type == 'IDENTIFIER' and t.value in cgrammar.keywords:
t.type = t.value.upper()
elif t.type == 'IDENTIFIER' and t.value in self.type_names:
if (self.pos < 2 or self.tokens[self.pos-2].type not in
('ENUM', 'STRUCT', 'UNION')):
t.type = 'TYPE_NAME'
t.lexer = self
t.clexpos = self.pos - 1
return t
return None
# --------------------------------------------------------------------------
# Parser
# --------------------------------------------------------------------------
class CParser(object):
'''Parse a C source file.
Subclass and override the handle_* methods. Call `parse` with a string
to parse.
'''
def __init__(self, options):
self.preprocessor_parser = preprocessor.PreprocessorParser(options,self)
self.parser = yacc.Parser()
prototype = yacc.yacc(method = 'LALR',
debug = False,
module = cgrammar,
write_tables = True,
outputdir = os.path.dirname(__file__),
optimize = True)
# If yacc is reading tables from a file, then it won't find the error
# function... need to set it manually
prototype.errorfunc = cgrammar.p_error
prototype.init_parser(self.parser)
self.parser.cparser = self
self.lexer = CLexer(self)
if not options.no_stddef_types:
self.lexer.type_names.add('wchar_t')
self.lexer.type_names.add('ptrdiff_t')
self.lexer.type_names.add('size_t')
if not options.no_gnu_types:
self.lexer.type_names.add('__builtin_va_list')
if sys.platform == 'win32' and not options.no_python_types:
self.lexer.type_names.add('__int64')
def parse(self, filename, debug=False):
'''Parse a file.
If `debug` is True, parsing state is dumped to stdout.
'''
self.handle_status('Preprocessing %s' % filename)
self.preprocessor_parser.parse(filename)
self.lexer.input(self.preprocessor_parser.output)
self.handle_status('Parsing %s' % filename)
self.parser.parse(lexer=self.lexer, debug=debug)
# ----------------------------------------------------------------------
# Parser interface. Override these methods in your subclass.
# ----------------------------------------------------------------------
def handle_error(self, message, filename, lineno):
'''A parse error occured.
The default implementation prints `lineno` and `message` to stderr.
The parser will try to recover from errors by synchronising at the
next semicolon.
'''
print >> sys.stderr, '%s:%s %s' % (filename, lineno, message)
def handle_pp_error(self, message):
'''The C preprocessor emitted an error.
The default implementatin prints the error to stderr. If processing
can continue, it will.
'''
print >> sys.stderr, 'Preprocessor:', message
def handle_status(self, message):
'''Progress information.
The default implementationg prints message to stderr.
'''
print >> sys.stderr, message
def handle_define(self, name, params, value, filename, lineno):
'''#define `name` `value`
or #define `name`(`params`) `value`
name is a string
params is None or a list of strings
value is a ...?
'''
def handle_define_constant(self, name, value, filename, lineno):
'''#define `name` `value`
name is a string
value is an ExpressionNode or None
'''
def handle_define_macro(self, name, params, value, filename, lineno):
'''#define `name`(`params`) `value`
name is a string
params is a list of strings
value is an ExpressionNode or None
'''
def impl_handle_declaration(self, declaration, filename, lineno):
'''Internal method that calls `handle_declaration`. This method
also adds any new type definitions to the lexer's list of valid type
names, which affects the parsing of subsequent declarations.
'''
if declaration.storage == 'typedef':
declarator = declaration.declarator
if not declarator:
# XXX TEMPORARY while struct etc not filled
return
while declarator.pointer:
declarator = declarator.pointer
self.lexer.type_names.add(declarator.identifier)
self.handle_declaration(declaration, filename, lineno)
def handle_declaration(self, declaration, filename, lineno):
'''A declaration was encountered.
`declaration` is an instance of Declaration. Where a declaration has
multiple initialisers, each is returned as a separate declaration.
'''
pass
class DebugCParser(CParser):
'''A convenience class that prints each invocation of a handle_* method to
stdout.
'''
def handle_define(self, name, value, filename, lineno):
print '#define name=%r, value=%r' % (name, value)
def handle_define_constant(self, name, value, filename, lineno):
print '#define constant name=%r, value=%r' % (name, value)
def handle_declaration(self, declaration, filename, lineno):
print declaration
if __name__ == '__main__':
DebugCParser().parse(sys.argv[1], debug=True)

Wyświetl plik

@ -0,0 +1,220 @@
#!/usr/bin/env python
'''
ctypesgencore.parser.ctypesparser contains a class, CtypesParser, which is a
subclass of ctypesgencore.parser.cparser.CParser. CtypesParser overrides the
handle_declaration() method of CParser. It turns the low-level type declarations
produced by CParser into CtypesType instances and breaks the parser's general
declarations into function, variable, typedef, constant, and type descriptions.
'''
__docformat__ = 'restructuredtext'
__all__ = ["CtypesParser"]
from cparser import *
from ctypesgencore.ctypedescs import *
from cdeclarations import *
from ctypesgencore.expressions import *
def make_enum_from_specifier(specifier):
tag = specifier.tag
enumerators = []
last_name = None
for e in specifier.enumerators:
if e.expression:
value = e.expression
else:
if last_name:
value = BinaryExpressionNode("addition", (lambda x,y:x+y),
"(%s + %s)", (False,False),
IdentifierExpressionNode(last_name),
ConstantExpressionNode(1))
else:
value = ConstantExpressionNode(0)
enumerators.append((e.name,value))
last_name = e.name
return CtypesEnum(tag, enumerators,
src=(specifier.filename,specifier.lineno))
def get_decl_id(decl):
"""Return the identifier of a given declarator"""
while isinstance(decl, Pointer):
decl = decl.pointer
p_name = ""
if decl is not None and decl.identifier is not None:
p_name = decl.identifier
return p_name
class CtypesParser(CParser):
'''Parse a C file for declarations that can be used by ctypes.
Subclass and override the handle_ctypes_* methods.
'''
def __init__ (self, options):
super(CtypesParser, self).__init__(options)
self.type_map = ctypes_type_map
if not options.no_python_types:
self.type_map.update(ctypes_type_map_python_builtin)
def make_struct_from_specifier(self, specifier):
variety = {True:"union", False:"struct"}[specifier.is_union]
tag = specifier.tag
if specifier.declarations:
members = []
for declaration in specifier.declarations:
t = self.get_ctypes_type(declaration.type,
declaration.declarator,
check_qualifiers=True)
declarator = declaration.declarator
if declarator is None:
# Anonymous field in nested union/struct (C11/GCC).
name = None
else:
while declarator.pointer:
declarator = declarator.pointer
name = declarator.identifier
members.append((name, remove_function_pointer(t)))
else:
members = None
return CtypesStruct(tag,variety,members,
src=(specifier.filename,specifier.lineno))
def get_ctypes_type(self, typ, declarator, check_qualifiers=False):
signed = True
typename = 'int'
longs = 0
t = None
for specifier in typ.specifiers:
if isinstance(specifier, StructTypeSpecifier):
t = self.make_struct_from_specifier(specifier)
elif isinstance(specifier, EnumSpecifier):
t = make_enum_from_specifier(specifier)
elif specifier == 'signed':
signed = True
elif specifier == 'unsigned':
signed = False
elif specifier == 'long':
longs += 1
else:
typename = str(specifier)
if not t:
# It is a numeric type of some sort
if (typename,signed,longs) in self.type_map:
t = CtypesSimple(typename,signed,longs)
elif signed and not longs:
t = CtypesTypedef(typename)
else:
name = " ".join(typ.specifiers)
if typename in [x[0] for x in self.type_map.keys()]:
# It's an unsupported variant of a builtin type
error = "Ctypes does not support the type \"%s\"." % name
else:
error = "Ctypes does not support adding additional " \
"specifiers to typedefs, such as \"%s\"" % name
t = CtypesTypedef(name)
t.error(error,cls='unsupported-type')
if declarator and declarator.bitfield:
t = CtypesBitfield(t,declarator.bitfield)
qualifiers = []
qualifiers.extend(typ.qualifiers)
while declarator and declarator.pointer:
if declarator.parameters is not None:
variadic = "..." in declarator.parameters
params = []
for param in declarator.parameters:
if param=="...":
break
param_name = get_decl_id(param.declarator)
ct = self.get_ctypes_type(param.type, param.declarator)
ct.identifier = param_name
params.append(ct)
t = CtypesFunction(t, params, variadic)
a = declarator.array
while a:
t = CtypesArray(t, a.size)
a = a.array
qualifiers.extend(declarator.qualifiers)
t = CtypesPointer(t, declarator.qualifiers)
declarator = declarator.pointer
if declarator and declarator.parameters is not None:
variadic = "..." in declarator.parameters
params = []
for param in declarator.parameters:
if param=="...":
break
param_name = get_decl_id(param.declarator)
ct = self.get_ctypes_type(param.type, param.declarator)
ct.identifier = param_name
params.append(ct)
t = CtypesFunction(t, params, variadic)
if declarator:
a = declarator.array
while a:
t = CtypesArray(t, a.size)
a = a.array
if isinstance(t, CtypesPointer) and \
isinstance(t.destination, CtypesSimple) and \
t.destination.name=="char" and \
t.destination.signed:
t = CtypesSpecial("String")
return t
def handle_declaration(self, declaration, filename, lineno):
t = self.get_ctypes_type(declaration.type, declaration.declarator)
if type(t) in (CtypesStruct, CtypesEnum):
self.handle_ctypes_new_type(
remove_function_pointer(t), filename, lineno)
declarator = declaration.declarator
if declarator is None:
# XXX TEMPORARY while struct with no typedef not filled in
return
while declarator.pointer:
declarator = declarator.pointer
name = declarator.identifier
if declaration.storage == 'typedef':
self.handle_ctypes_typedef(
name, remove_function_pointer(t), filename, lineno)
elif type(t) == CtypesFunction:
self.handle_ctypes_function(
name, t.restype, t.argtypes, t.variadic, filename, lineno)
elif declaration.storage != 'static':
self.handle_ctypes_variable(name, t, filename, lineno)
# ctypes parser interface. Override these methods in your subclass.
def handle_ctypes_new_type(self, ctype, filename, lineno):
pass
def handle_ctypes_typedef(self, name, ctype, filename, lineno):
pass
def handle_ctypes_function(self, name, restype, argtypes, filename, lineno):
pass
def handle_ctypes_variable(self, name, ctype, filename, lineno):
pass

Wyświetl plik

@ -0,0 +1,329 @@
#!/usr/bin/env python
"""
DataCollectingParser subclasses ctypesparser.CtypesParser and builds Description
objects from the CtypesType objects and other information from CtypesParser.
After parsing is complete, a DescriptionCollection object can be retrieved by
calling DataCollectingParser.data().
"""
import ctypesparser
from ctypesgencore.descriptions import *
from ctypesgencore.ctypedescs import *
from ctypesgencore.expressions import *
from ctypesgencore.messages import *
from tempfile import mkstemp
import os
class DataCollectingParser(ctypesparser.CtypesParser,
ctypesparser.CtypesTypeVisitor):
"""Main class for the Parser component. Steps for use:
p=DataCollectingParser(names_of_header_files,options)
p.parse()
data=p.data() #A dictionary of constants, enums, structs, functions, etc.
"""
def __init__(self,headers,options):
ctypesparser.CtypesParser.__init__(self,options)
self.headers=headers
self.options=options
self.constants=[]
self.typedefs=[]
self.structs=[]
self.enums=[]
self.functions=[]
self.variables=[]
self.macros=[]
self.all=[]
self.output_order=[]
# NULL is a useful macro to have defined
null = ConstantExpressionNode(None)
nullmacro = ConstantDescription("NULL",null,("<built-in>",1))
self.constants.append(nullmacro)
self.all.append(nullmacro)
self.output_order.append(("constant", nullmacro))
# A list of tuples describing macros; saved to be processed after
# everything else has been parsed
self.saved_macros = []
# A set of structs that are already known
self.already_seen_structs=set()
# A dict of structs that have only been seen in opaque form
self.already_seen_opaque_structs={}
# A set of enums that are already known
self.already_seen_enums=set()
# A dict of enums that have only been seen in opaque form
self.already_seen_opaque_enums={}
def parse(self):
fd, fname = mkstemp(suffix=".h")
f = os.fdopen(fd, 'w')
for header in self.options.other_headers:
print >>f, '#include <%s>' % header
for header in self.headers:
print >>f, '#include "%s"' % os.path.abspath(header)
f.flush()
f.close()
ctypesparser.CtypesParser.parse(self, fname, None)
os.unlink(fname)
for name, params, expr, (filename,lineno) in self.saved_macros:
self.handle_macro(name, params, expr, filename, lineno)
def handle_define_constant(self, name, expr, filename, lineno):
# Called by CParser
# Save to handle later
self.saved_macros.append((name, None, expr, (filename, lineno)))
def handle_define_unparseable(self, name, params, value, filename, lineno):
# Called by CParser
if params:
original_string = "#define %s(%s) %s" % \
(name, ",".join(params), " ".join(value))
else:
original_string = "#define %s %s" % \
(name, " ".join(value))
macro = MacroDescription(name, params, None,
src = (filename,lineno))
macro.error("Could not parse macro \"%s\"" % original_string,
cls = 'macro')
macro.original_string = original_string
self.macros.append(macro)
self.all.append(macro)
self.output_order.append(('macro',macro))
def handle_define_macro(self, name, params, expr, filename, lineno):
# Called by CParser
# Save to handle later
self.saved_macros.append((name, params, expr, (filename,lineno)))
def handle_ctypes_typedef(self, name, ctype, filename, lineno):
# Called by CtypesParser
ctype.visit(self)
typedef=TypedefDescription(name,
ctype,
src=(filename,repr(lineno)))
self.typedefs.append(typedef)
self.all.append(typedef)
self.output_order.append(('typedef',typedef))
def handle_ctypes_new_type(self, ctype, filename, lineno):
# Called by CtypesParser
if isinstance(ctype,ctypesparser.CtypesEnum):
self.handle_enum(ctype, filename, lineno)
else:
self.handle_struct(ctype, filename, lineno)
def handle_ctypes_function(self, name, restype, argtypes, variadic,
filename, lineno):
# Called by CtypesParser
restype.visit(self)
for argtype in argtypes:
argtype.visit(self)
function=FunctionDescription(name,
restype,
argtypes,
variadic = variadic,
src=(filename,repr(lineno)))
self.functions.append(function)
self.all.append(function)
self.output_order.append(('function',function))
def handle_ctypes_variable(self, name, ctype, filename, lineno):
# Called by CtypesParser
ctype.visit(self)
variable=VariableDescription(name,
ctype,
src=(filename,repr(lineno)))
self.variables.append(variable)
self.all.append(variable)
self.output_order.append(('variable',variable))
def handle_struct(self, ctypestruct, filename, lineno):
# Called from within DataCollectingParser
# When we find an opaque struct, we make a StructDescription for it
# and record it in self.already_seen_opaque_structs. If we later
# find a transparent struct with the same tag, we fill in the
# opaque struct with the information from the transparent struct and
# move the opaque struct to the end of the struct list.
name = "%s %s"%(ctypestruct.variety,ctypestruct.tag)
if name in self.already_seen_structs:
return
if ctypestruct.opaque:
if name not in self.already_seen_opaque_structs:
struct = StructDescription(ctypestruct.tag,
ctypestruct.variety,
None, # No members
True, # Opaque
ctypestruct,
src=(filename,str(lineno)))
self.already_seen_opaque_structs[name]=struct
self.structs.append(struct)
self.all.append(struct)
self.output_order.append(('struct',struct))
else:
for (membername,ctype) in ctypestruct.members:
ctype.visit(self)
if name in self.already_seen_opaque_structs:
# Fill in older version
struct=self.already_seen_opaque_structs[name]
struct.opaque = False
struct.members = ctypestruct.members
struct.ctype = ctypestruct
struct.src = ctypestruct.src
self.output_order.append(('struct-body',struct))
del self.already_seen_opaque_structs[name]
else:
struct = StructDescription(ctypestruct.tag,
ctypestruct.variety,
ctypestruct.members,
False, # Not opaque
src=(filename,str(lineno)),
ctype=ctypestruct)
self.structs.append(struct)
self.all.append(struct)
self.output_order.append(('struct',struct))
self.output_order.append(('struct-body',struct))
self.already_seen_structs.add(name)
def handle_enum(self, ctypeenum, filename, lineno):
# Called from within DataCollectingParser.
# Process for handling opaque enums is the same as process for opaque
# structs. See handle_struct() for more details.
tag = ctypeenum.tag
if tag in self.already_seen_enums:
return
if ctypeenum.opaque:
if tag not in self.already_seen_opaque_enums:
enum=EnumDescription(ctypeenum.tag,
None,
ctypeenum,
src = (filename,str(lineno)))
enum.opaque = True
self.already_seen_opaque_enums[tag]=enum
self.enums.append(enum)
self.all.append(enum)
self.output_order.append(('enum',enum))
else:
if tag in self.already_seen_opaque_enums:
# Fill in older opaque version
enum = self.already_seen_opaque_enums[tag]
enum.opaque = False
enum.ctype = ctypeenum
enum.src = ctypeenum.src
enum.members = ctypeenum.enumerators
del self.already_seen_opaque_enums[tag]
else:
enum=EnumDescription(ctypeenum.tag,
ctypeenum.enumerators,
src=(filename,str(lineno)),
ctype=ctypeenum)
enum.opaque = False
self.enums.append(enum)
self.all.append(enum)
self.output_order.append(('enum',enum))
self.already_seen_enums.add(tag)
for (enumname,expr) in ctypeenum.enumerators:
constant=ConstantDescription(enumname, expr,
src=(filename,lineno))
self.constants.append(constant)
self.all.append(constant)
self.output_order.append(('constant',constant))
def handle_macro(self, name, params, expr, filename, lineno):
# Called from within DataCollectingParser
src = (filename,lineno)
if expr==None:
expr = ConstantExpressionNode(True)
constant = ConstantDescription(name, expr, src)
self.constants.append(constant)
self.all.append(constant)
return
expr.visit(self)
if isinstance(expr,CtypesType):
if params:
macro = MacroDescription(name, "", src)
macro.error("%s has parameters but evaluates to a type. " \
"Ctypesgen does not support it." % macro.casual_name(),
cls = 'macro')
self.macros.append(macro)
self.all.append(macro)
self.output_order.append(('macro',macro))
else:
typedef = TypedefDescription(name, expr, src)
self.typedefs.append(typedef)
self.all.append(typedef)
self.output_order.append(('typedef',typedef))
else:
macro = MacroDescription(name, params, expr, src)
self.macros.append(macro)
self.all.append(macro)
self.output_order.append(('macro',macro))
# Macros could possibly contain things like __FILE__, __LINE__, etc...
# This could be supported, but it would be a lot of work. It would
# probably also bloat the Preamble considerably.
def handle_error(self, message, filename, lineno):
# Called by CParser
error_message("%s:%d: %s" % (filename,lineno,message), cls='cparser')
def handle_pp_error(self, message):
# Called by PreprocessorParser
error_message("%s: %s" % (self.options.cpp, message), cls = 'cparser')
def handle_status(self, message):
# Called by CParser
status_message(message)
def visit_struct(self, struct):
self.handle_struct(struct, struct.src[0], struct.src[1])
def visit_enum(self,enum):
self.handle_enum(enum, enum.src[0], enum.src[1])
def data(self):
return DescriptionCollection(self.constants,
self.typedefs,
self.structs,
self.enums,
self.functions,
self.variables,
self.macros,
self.all,
self.output_order)

Wyświetl plik

@ -0,0 +1,878 @@
#-----------------------------------------------------------------------------
# ply: lex.py
#
# Author: David M. Beazley (dave@dabeaz.com)
# Modification for pyglet by Alex Holkner (alex.holkner@gmail.com)
# Modification for ctypesgen by Tim Maxwell (timmaxw@gmail.com) <tm>
#
# Copyright (C) 2001-2006, David M. Beazley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See the file LICENSE for a complete copy of the LGPL.
#-----------------------------------------------------------------------------
__version__ = "2.2"
import re, sys, types, os.path
# Regular expression used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Available instance types. This is used when lexers are defined by a class.
# It's a little funky because I want to preserve backwards compatibility
# with Python 2.0 where types.ObjectType is undefined.
try:
_INSTANCETYPE = (types.InstanceType, types.ObjectType)
except AttributeError:
_INSTANCETYPE = types.InstanceType
class object: pass # Note: needed if no new-style classes present
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
def skip(self,n):
self.lexer.skip(n)
# -----------------------------------------------------------------------------
# Lexer class
#
# This class encapsulates all of the methods and data associated with a lexer.
#
# input() - Store a new string in the lexer
# token() - Get the next token
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexdebug = 0 # Debugging mode
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = Lexer()
c.lexstatere = self.lexstatere
c.lexstateinfo = self.lexstateinfo
c.lexstateretext = self.lexstateretext
c.lexstate = self.lexstate
c.lexstatestack = self.lexstatestack
c.lexstateignore = self.lexstateignore
c.lexstateerrorf = self.lexstateerrorf
c.lexreflags = self.lexreflags
c.lexdata = self.lexdata
c.lexpos = self.lexpos
c.lexlen = self.lexlen
c.lextokens = self.lextokens
c.lexdebug = self.lexdebug
c.lineno = self.lineno
c.lexoptimize = self.lexoptimize
c.lexliterals = self.lexliterals
c.lexmodule = self.lexmodule
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
# Set up other attributes
c.begin(c.lexstate)
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
# <tm> 25 June 2008 added 'outputdir'
def writetab(self,tabfile,outputdir=''):
tf = open(os.path.join(outputdir,tabfile)+".py","w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
exec "import %s as lextab" % tabfile
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
if not (isinstance(s,types.StringType) or isinstance(s,types.UnicodeType)):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if state not in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# token() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Set last match in lexer so that rules can access it if they want
self.lexmatch = m
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.groups = m.groups()
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
lexpos = m.end()
i = m.lastindex
func,tok.type = lexindexfunc[i]
self.lexpos = lexpos
if not func:
# If no token type was set, it's an ignored token
if tok.type: return tok
break
# if func not callable, it means it's an ignored token
if not hasattr(func, '__call__'):
break
# If token is processed by a function, call it
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
# Added for pyglet/tools/wrapper/cparser.py by Alex
# Holkner on 20/Jan/2007
lexdata = self.lexdata
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
# Allow any single-character literal also for
# pyglet/tools/wrapper/cparser.py by Alex Holkner on
# 20/Jan/2007
if newtok.type not in self.lextokens and len(newtok.type) > 1:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.func_code.co_filename, func.func_code.co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.lexer = self
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# -----------------------------------------------------------------------------
# _validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the filename.
# -----------------------------------------------------------------------------
def _validate_file(filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return 1 # Oh well
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
noerror = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
print "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
noerror = 0
linen += 1
return noerror
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist):
result = []
for f in funclist:
if f and f[0]:
result.append((f[0].__name__,f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,handle.__name__[2:])
elif handle is not None:
# If rule was specified as a string, we build an anonymous
# callback function to carry out the action
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
print "IGNORE", f
else:
lexindexfunc[i] = (None, f[2:])
return [(lexre,lexindexfunc)],[regex]
except Exception,e:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre = _form_master_re(relist[:m],reflags,ldict)
rlist, rre = _form_master_re(relist[m:],reflags,ldict)
return llist+rlist, lre+rre
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if parts[i] not in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names.keys())
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
# cls added for pyglet/tools/wrapper/cparser.py by Alex Holkner on 22/Jan/2007
# <tm> 25 June 2008 added 'outputdir'
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir='',cls=Lexer):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
error = 0
files = { }
lexobj = cls()
lexobj.lexdebug = debug
lexobj.lexoptimize = optimize
global token,input
if nowarn: warn = 0
else: warn = 1
if object: module = object
if module:
# User supplied a module object.
if isinstance(module, types.ModuleType):
ldict = module.__dict__
elif isinstance(module, _INSTANCETYPE):
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = { }
for (i,v) in _items:
ldict[i] = v
else:
raise ValueError("Expected a module or instance")
lexobj.lexmodule = module
else:
# No module given. We might be able to get information from the caller.
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
f = f.f_back # Walk out to our calling function
ldict = f.f_globals # Grab its globals dictionary
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Get the tokens, states, and literals variables (if any)
if (module and isinstance(module,_INSTANCETYPE)):
tokens = getattr(module,"tokens",None)
states = getattr(module,"states",None)
literals = getattr(module,"literals","")
else:
tokens = ldict.get("tokens",None)
states = ldict.get("states",None)
literals = ldict.get("literals","")
if not tokens:
raise SyntaxError("lex: module does not define 'tokens'")
if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
raise SyntaxError("lex: tokens must be a list or tuple.")
# Build a dictionary of valid token names
lexobj.lextokens = { }
if not optimize:
for n in tokens:
if not _is_identifier.match(n):
print "lex: Bad token name '%s'" % n
error = 1
if warn and n in lexobj.lextokens:
print "lex: Warning. Token '%s' multiply defined." % n
lexobj.lextokens[n] = None
else:
for n in tokens: lexobj.lextokens[n] = None
if debug:
print "lex: tokens = '%s'" % lexobj.lextokens.keys()
try:
for c in literals:
if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
print "lex: Invalid literal %s. Must be a single character" % repr(c)
error = 1
continue
except TypeError:
print "lex: Invalid literals specification. literals must be a sequence of characters."
error = 1
lexobj.lexliterals = literals
# Build statemap
if states:
if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
print "lex: states must be defined as a tuple or list."
error = 1
else:
for s in states:
if not isinstance(s,types.TupleType) or len(s) != 2:
print "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
error = 1
continue
name, statetype = s
if not isinstance(name,types.StringType):
print "lex: state name %s must be a string" % repr(name)
error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
print "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
error = 1
continue
if name in stateinfo:
print "lex: state '%s' already defined." % name
error = 1
continue
stateinfo[name] = statetype
# Get a list of symbols with the t_ or s_ prefix
tsymbols = [f for f in ldict.keys() if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
funcsym = { } # Symbols defined as functions
strsym = { } # Symbols defined as strings
toknames = { } # Mapping of symbols to token names
for s in stateinfo.keys():
funcsym[s] = []
strsym[s] = []
ignore = { } # Ignore strings by state
errorf = { } # Error functions by state
if len(tsymbols) == 0:
raise SyntaxError("lex: no rules of the form t_rulename are defined.")
for f in tsymbols:
t = ldict[f]
states, tokname = _statetoken(f,stateinfo)
toknames[f] = tokname
if hasattr(t, '__call__'):
for s in states: funcsym[s].append((f,t))
elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
for s in states: strsym[s].append((f,t))
else:
print "lex: %s not defined as a function or string" % f
error = 1
# Sort the functions by line number
for f in funcsym.values():
f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
# Sort the strings by regular expression length
for s in strsym.values():
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
regexs = { }
# Build the master regular expressions
for state in stateinfo.keys():
regex_list = []
# Add rules defined by functions first
for fname, f in funcsym[state]:
line = f.func_code.co_firstlineno
file = f.func_code.co_filename
files[file] = None
tokname = toknames[fname]
ismethod = isinstance(f, types.MethodType)
if not optimize:
nargs = f.func_code.co_argcount
if ismethod:
reqargs = 2
else:
reqargs = 1
if nargs > reqargs:
print "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
error = 1
continue
if nargs < reqargs:
print "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
error = 1
continue
if tokname == 'ignore':
print "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
error = 1
continue
if tokname == 'error':
errorf[state] = f
continue
if f.__doc__:
if not optimize:
try:
c = re.compile("(?P<%s>%s)" % (f.__name__,f.__doc__), re.VERBOSE | reflags)
if c.match(""):
print "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
error = 1
continue
except re.error,e:
print "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
if '#' in f.__doc__:
print "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)
error = 1
continue
if debug:
print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
# Okay. The regular expression seemed okay. Let's append it to the master regular
# expression we're building
regex_list.append("(?P<%s>%s)" % (f.__name__,f.__doc__))
else:
print "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
# Now add all of the simple rules
for name,r in strsym[state]:
tokname = toknames[name]
if tokname == 'ignore':
ignore[state] = r
continue
if not optimize:
if tokname == 'error':
raise SyntaxError("lex: Rule '%s' must be defined as a function" % name)
error = 1
continue
if tokname not in lexobj.lextokens and tokname.find("ignore_") < 0:
print "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
if (c.match("")):
print "lex: Regular expression for rule '%s' matches empty string." % name
error = 1
continue
except re.error,e:
print "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
if '#' in r:
print "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
error = 1
continue
if debug:
print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
regex_list.append("(?P<%s>%s)" % (name,r))
if not regex_list:
print "lex: No rules defined for state '%s'" % state
error = 1
regexs[state] = regex_list
if not optimize:
for f in files.keys():
if not _validate_file(f):
error = 1
if error:
raise SyntaxError("lex: Unable to build lexer.")
# From this point forward, we're reasonably confident that we can build the lexer.
# No more errors will be generated, but there might be some warning messages.
# Build the master regular expressions
for state in regexs.keys():
lexre, re_text = _form_master_re(regexs[state],reflags,ldict)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
if debug:
for i in range(len(re_text)):
print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
# For inclusive states, we need to add the INITIAL state
for state,type in stateinfo.items():
if state != "INITIAL" and type == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
# Set up ignore variables
lexobj.lexstateignore = ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = errorf
lexobj.lexerrorf = errorf.get("INITIAL",None)
if warn and not lexobj.lexerrorf:
print "lex: Warning. no t_error rule is defined."
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if warn and s not in errorf:
print "lex: Warning. no error rule is defined for exclusive state '%s'" % s
if warn and s not in ignore and lexobj.lexignore:
print "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
elif stype == 'inclusive':
if s not in errorf:
errorf[s] = errorf.get("INITIAL",None)
if s not in ignore:
ignore[s] = ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
print "Reading from standard input (type EOF to end):"
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok: break
print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN

Wyświetl plik

@ -0,0 +1,8 @@
# lextab.py. This file automatically created by PLY (version 2.2). Don't edit!
_lextokens = {'RIGHT_OP': None, 'RIGHT_ASSIGN': None, 'DEC_OP': None, 'PP_MACRO_PARAM': None, 'DIV_ASSIGN': None, 'PP_DEFINE': None, 'PP_END_DEFINE': None, 'PP_DEFINE_MACRO_NAME': None, 'HEADER_NAME': None, 'NEWLINE': None, 'CHARACTER_CONSTANT': None, 'PP_STRINGIFY': None, 'AND_ASSIGN': None, 'PTR_OP': None, 'ELLIPSIS': None, 'IDENTIFIER': None, 'ADD_ASSIGN': None, 'PERIOD': None, 'AND_OP': None, 'OTHER': None, 'LPAREN': None, 'LEFT_OP': None, 'LE_OP': None, 'OR_OP': None, 'SUB_ASSIGN': None, 'MOD_ASSIGN': None, 'STRING_LITERAL': None, 'PP_IDENTIFIER_PASTE': None, 'PP_NUMBER': None, 'PP_DEFINE_NAME': None, 'XOR_ASSIGN': None, 'OR_ASSIGN': None, 'GE_OP': None, 'MUL_ASSIGN': None, 'LEFT_ASSIGN': None, 'INC_OP': None, 'NE_OP': None, 'EQ_OP': None}
_lexreflags = 0
_lexliterals = ''
_lexstateinfo = {'INITIAL': 'inclusive', 'DEFINE': 'exclusive'}
_lexstatere = {'INITIAL': [('(?P<t_ANY_directive>\\#\\s+(\\d+)\\s+"([^"]+)"[ \\d]*\\n)|(?P<t_ANY_punctuator>(\\.\\.\\.|\\|\\||\\+\\+|\\*=|\\^=|<<=|>>=|\\|=|\\+=|>=|>>|<<|<=|<:|%=|:>|<%|!=|\\)|\\+|\\*|\\.|\\?|==|&=|&&|\\[|\\^|--|/=|%>|-=|->|\\||!|%|&|-|,|/|;|:|=|>|]|<|{|}|~))', [None, ('t_ANY_directive', 'ANY_directive'), None, None, ('t_ANY_punctuator', 'ANY_punctuator')]), ('(?P<t_INITIAL_identifier>[a-zA-Z_]([a-zA-Z_]|[0-9])*)', [None, ('t_INITIAL_identifier', 'INITIAL_identifier')]), ('(?P<t_ANY_float>(?P<p1>[0-9]+)?(?P<dp>[.]?)(?P<p2>(?(p1)[0-9]*|[0-9]+))(?P<exp>(?:[Ee][+-]?[0-9]+)?)(?P<suf>[FflL]?)(?!\\w))', [None, ('t_ANY_float', 'ANY_float'), None, None, None, None, None]), ('(?P<t_ANY_int>(?P<p1>(?:0x[a-fA-F0-9]+)|(?:[0-9]+))(?P<suf>[uUlL]*))', [None, ('t_ANY_int', 'ANY_int'), None, None]), ('(?P<t_ANY_character_constant>L?\'(\\\\.|[^\\\\\'])+\')|(?P<t_ANY_string_literal>L?"(\\\\.|[^\\\\"])*")|(?P<t_ANY_lparen>\\()|(?P<t_INITIAL_newline>\\n)|(?P<t_INITIAL_pp_define>\\#define)', [None, ('t_ANY_character_constant', 'ANY_character_constant'), None, ('t_ANY_string_literal', 'ANY_string_literal'), None, ('t_ANY_lparen', 'ANY_lparen'), ('t_INITIAL_newline', 'INITIAL_newline'), ('t_INITIAL_pp_define', 'INITIAL_pp_define')])], 'DEFINE': [('(?P<t_ANY_directive>\\#\\s+(\\d+)\\s+"([^"]+)"[ \\d]*\\n)|(?P<t_ANY_punctuator>(\\.\\.\\.|\\|\\||\\+\\+|\\*=|\\^=|<<=|>>=|\\|=|\\+=|>=|>>|<<|<=|<:|%=|:>|<%|!=|\\)|\\+|\\*|\\.|\\?|==|&=|&&|\\[|\\^|--|/=|%>|-=|->|\\||!|%|&|-|,|/|;|:|=|>|]|<|{|}|~))', [None, ('t_ANY_directive', 'ANY_directive'), None, None, ('t_ANY_punctuator', 'ANY_punctuator')]), ('(?P<t_DEFINE_identifier>[a-zA-Z_]([a-zA-Z_]|[0-9])*)', [None, ('t_DEFINE_identifier', 'DEFINE_identifier')]), ('(?P<t_ANY_float>(?P<p1>[0-9]+)?(?P<dp>[.]?)(?P<p2>(?(p1)[0-9]*|[0-9]+))(?P<exp>(?:[Ee][+-]?[0-9]+)?)(?P<suf>[FflL]?)(?!\\w))', [None, ('t_ANY_float', 'ANY_float'), None, None, None, None, None]), ('(?P<t_ANY_int>(?P<p1>(?:0x[a-fA-F0-9]+)|(?:[0-9]+))(?P<suf>[uUlL]*))', [None, ('t_ANY_int', 'ANY_int'), None, None]), ('(?P<t_ANY_character_constant>L?\'(\\\\.|[^\\\\\'])+\')|(?P<t_ANY_string_literal>L?"(\\\\.|[^\\\\"])*")|(?P<t_ANY_lparen>\\()|(?P<t_DEFINE_newline>\\n)|(?P<t_DEFINE_pp_param_op>(\\#\\#)|(\\#))', [None, ('t_ANY_character_constant', 'ANY_character_constant'), None, ('t_ANY_string_literal', 'ANY_string_literal'), None, ('t_ANY_lparen', 'ANY_lparen'), ('t_DEFINE_newline', 'DEFINE_newline'), ('t_DEFINE_pp_param_op', 'DEFINE_pp_param_op')])]}
_lexstateignore = {'INITIAL': ' \t\x0b\x0c\r', 'DEFINE': ' \t\x0b\x0c\r'}
_lexstateerrorf = {'INITIAL': 't_INITIAL_error', 'DEFINE': 't_DEFINE_error'}

File diff suppressed because one or more lines are too long

Wyświetl plik

@ -0,0 +1,288 @@
#!/usr/bin/env python
'''Preprocess a C source file using gcc and convert the result into
a token stream
Reference is C99:
* http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf
'''
__docformat__ = 'restructuredtext'
import os, re, shlex, sys, tokenize, lex, yacc, traceback
import ctypes
from lex import TOKEN
tokens = (
'HEADER_NAME', 'IDENTIFIER', 'PP_NUMBER', 'CHARACTER_CONSTANT',
'STRING_LITERAL', 'OTHER',
'PTR_OP', 'INC_OP', 'DEC_OP', 'LEFT_OP', 'RIGHT_OP', 'LE_OP', 'GE_OP',
'EQ_OP', 'NE_OP', 'AND_OP', 'OR_OP', 'MUL_ASSIGN', 'DIV_ASSIGN',
'MOD_ASSIGN', 'ADD_ASSIGN', 'SUB_ASSIGN', 'LEFT_ASSIGN', 'RIGHT_ASSIGN',
'AND_ASSIGN', 'XOR_ASSIGN', 'OR_ASSIGN', 'PERIOD', 'ELLIPSIS',
'LPAREN', 'NEWLINE',
'PP_DEFINE', 'PP_DEFINE_NAME', 'PP_DEFINE_MACRO_NAME', 'PP_MACRO_PARAM',
'PP_STRINGIFY', 'PP_IDENTIFIER_PASTE', 'PP_END_DEFINE'
)
states = [('DEFINE',"exclusive")]
subs = {
'D': '[0-9]',
'L': '[a-zA-Z_]',
'H': '[a-fA-F0-9]',
'E': '[Ee][+-]?\s*{D}+',
'FS': '[FflL]',
'IS': '[uUlL]*',
}
# Helper: substitute {foo} with subs[foo] in string (makes regexes more lexy)
sub_pattern = re.compile('{([^}]*)}')
def sub_repl_match(m):
return subs[m.groups()[0]]
def sub(s):
return sub_pattern.sub(sub_repl_match, s)
# --------------------------------------------------------------------------
# Token value types
# --------------------------------------------------------------------------
# Numbers represented as int and float types.
# For all other tokens, type is just str representation.
class StringLiteral(str):
def __new__(cls, value):
assert value[0] == '"' and value[-1] == '"'
# Unescaping probably not perfect but close enough.
value = value[1:-1].decode('string_escape')
return str.__new__(cls, value)
# --------------------------------------------------------------------------
# Token declarations
# --------------------------------------------------------------------------
punctuators = {
# value: (regex, type)
r'...': (r'\.\.\.', 'ELLIPSIS'),
r'>>=': (r'>>=', 'RIGHT_ASSIGN'),
r'<<=': (r'<<=', 'LEFT_ASSIGN'),
r'+=': (r'\+=', 'ADD_ASSIGN'),
r'-=': (r'-=', 'SUB_ASSIGN'),
r'*=': (r'\*=', 'MUL_ASSIGN'),
r'/=': (r'/=', 'DIV_ASSIGN'),
r'%=': (r'%=', 'MOD_ASSIGN'),
r'&=': (r'&=', 'AND_ASSIGN'),
r'^=': (r'\^=', 'XOR_ASSIGN'),
r'|=': (r'\|=', 'OR_ASSIGN'),
r'>>': (r'>>', 'RIGHT_OP'),
r'<<': (r'<<', 'LEFT_OP'),
r'++': (r'\+\+', 'INC_OP'),
r'--': (r'--', 'DEC_OP'),
r'->': (r'->', 'PTR_OP'),
r'&&': (r'&&', 'AND_OP'),
r'||': (r'\|\|', 'OR_OP'),
r'<=': (r'<=', 'LE_OP'),
r'>=': (r'>=', 'GE_OP'),
r'==': (r'==', 'EQ_OP'),
r'!=': (r'!=', 'NE_OP'),
r'<:': (r'<:', '['),
r':>': (r':>', ']'),
r'<%': (r'<%', '{'),
r'%>': (r'%>', '}'),
r';': (r';', ';'),
r'{': (r'{', '{'),
r'}': (r'}', '}'),
r',': (r',', ','),
r':': (r':', ':'),
r'=': (r'=', '='),
r')': (r'\)', ')'),
r'[': (r'\[', '['),
r']': (r']', ']'),
r'.': (r'\.', 'PERIOD'),
r'&': (r'&', '&'),
r'!': (r'!', '!'),
r'~': (r'~', '~'),
r'-': (r'-', '-'),
r'+': (r'\+', '+'),
r'*': (r'\*', '*'),
r'/': (r'/', '/'),
r'%': (r'%', '%'),
r'<': (r'<', '<'),
r'>': (r'>', '>'),
r'^': (r'\^', '^'),
r'|': (r'\|', '|'),
r'?': (r'\?', '?')
}
def punctuator_regex(punctuators):
punctuator_regexes = [v[0] for v in punctuators.values()]
punctuator_regexes.sort(lambda a, b: -cmp(len(a), len(b)))
return '(%s)' % '|'.join(punctuator_regexes)
# Process line-number directives from the preprocessor
# See http://docs.freebsd.org/info/cpp/cpp.info.Output.html
DIRECTIVE = r'\#\s+(\d+)\s+"([^"]+)"[ \d]*\n'
@TOKEN(DIRECTIVE)
def t_ANY_directive(t):
t.lexer.filename = t.groups[2]
t.lexer.lineno = int(t.groups[1])
return None
@TOKEN(punctuator_regex(punctuators))
def t_ANY_punctuator(t):
t.type = punctuators[t.value][1]
return t
IDENTIFIER = sub('{L}({L}|{D})*')
@TOKEN(IDENTIFIER)
def t_INITIAL_identifier(t):
t.type = 'IDENTIFIER'
return t
@TOKEN(IDENTIFIER)
def t_DEFINE_identifier(t):
if t.lexer.next_is_define_name:
# This identifier is the name of a macro
# We need to look ahead and see if this macro takes parameters or not.
if t.lexpos + len(t.value) < t.lexer.lexlen and \
t.lexer.lexdata[t.lexpos + len(t.value)] == '(':
t.type = 'PP_DEFINE_MACRO_NAME'
# Look ahead and read macro parameter list
lexdata = t.lexer.lexdata
pos = t.lexpos + len(t.value) + 1
while lexdata[pos] not in '\n)':
pos+=1
params = lexdata[t.lexpos+len(t.value)+1 : pos]
paramlist = [x.strip() for x in params.split(",") if x.strip()]
t.lexer.macro_params = paramlist
else:
t.type = 'PP_DEFINE_NAME'
t.lexer.next_is_define_name = False
elif t.value in t.lexer.macro_params:
t.type = 'PP_MACRO_PARAM'
else:
t.type = 'IDENTIFIER'
return t
FLOAT_LITERAL = sub(r"(?P<p1>{D}+)?(?P<dp>[.]?)(?P<p2>(?(p1){D}*|{D}+))" \
r"(?P<exp>(?:[Ee][+-]?{D}+)?)(?P<suf>{FS}?)(?!\w)")
@TOKEN(FLOAT_LITERAL)
def t_ANY_float(t):
t.type = 'PP_NUMBER'
m = t.lexer.lexmatch
p1 = m.group("p1")
dp = m.group("dp")
p2 = m.group("p2")
exp = m.group("exp")
suf = m.group("suf")
if dp or exp or (suf and suf in ("Ff")):
s = m.group(0)
if suf:
s = s[:-1]
# Attach a prefix so the parser can figure out if should become an
# integer, float, or long
t.value = "f" + s
elif (suf and suf in ("Ll")):
t.value = "l" + p1
else:
t.value = "i" + p1
return t
INT_LITERAL = sub(r"(?P<p1>(?:0x{H}+)|(?:{D}+))(?P<suf>{IS})")
@TOKEN(INT_LITERAL)
def t_ANY_int(t):
t.type = 'PP_NUMBER'
m = t.lexer.lexmatch
if "L" in m.group(3) or "l" in m.group(2):
prefix = "l"
else:
prefix = "i"
g1 = m.group(2)
if g1.startswith("0x"):
# Convert base from hexadecimal
g1 = str(long(g1[2:],16))
elif g1[0]=="0":
# Convert base from octal
g1 = str(long(g1,8))
t.value = prefix + g1
return t
CHARACTER_CONSTANT = sub(r"L?'(\\.|[^\\'])+'")
@TOKEN(CHARACTER_CONSTANT)
def t_ANY_character_constant(t):
t.type = 'CHARACTER_CONSTANT'
return t
STRING_LITERAL = sub(r'L?"(\\.|[^\\"])*"')
@TOKEN(STRING_LITERAL)
def t_ANY_string_literal(t):
t.type = 'STRING_LITERAL'
t.value = StringLiteral(t.value)
return t
@TOKEN(r'\(')
def t_ANY_lparen(t):
if t.lexpos == 0 or t.lexer.lexdata[t.lexpos-1] not in (' \t\f\v\n'):
t.type = 'LPAREN'
else:
t.type = '('
return t
@TOKEN(r'\n')
def t_INITIAL_newline(t):
t.lexer.lineno += 1
return None
@TOKEN(r'\#define')
def t_INITIAL_pp_define(t):
t.type = 'PP_DEFINE'
t.lexer.begin("DEFINE")
t.lexer.next_is_define_name = True
t.lexer.macro_params = set()
return t
@TOKEN(r'\n')
def t_DEFINE_newline(t):
t.type = 'PP_END_DEFINE'
t.lexer.begin("INITIAL")
t.lexer.lineno += 1
del t.lexer.macro_params
# Damage control in case the token immediately after the #define failed
# to handle this
t.lexer.next_is_define_name = False
return t
@TOKEN(r'(\#\#)|(\#)')
def t_DEFINE_pp_param_op(t):
if t.value=='#':
t.type = 'PP_STRINGIFY'
else:
t.type = 'PP_IDENTIFIER_PASTE'
return t
def t_INITIAL_error(t):
t.type = 'OTHER'
return t
def t_DEFINE_error(t):
t.type = 'OTHER'
t.value = t.value[0]
t.lexer.lexpos+=1 # Skip it if it's an error in a #define
return t
t_ANY_ignore = ' \t\v\f\r'

Wyświetl plik

@ -0,0 +1,204 @@
#!/usr/bin/env python
'''Preprocess a C source file using gcc and convert the result into
a token stream
Reference is C99:
* http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf
'''
__docformat__ = 'restructuredtext'
import os, re, shlex, sys, tokenize, lex, yacc, traceback, subprocess
import ctypes
from lex import TOKEN
import pplexer
# --------------------------------------------------------------------------
# Lexers
# --------------------------------------------------------------------------
class PreprocessorLexer(lex.Lexer):
def __init__(self):
lex.Lexer.__init__(self)
self.filename = '<input>'
self.in_define = False
def input(self, data, filename=None):
if filename:
self.filename = filename
self.lasttoken = None
self.input_stack = []
lex.Lexer.input(self, data)
def push_input(self, data, filename):
self.input_stack.append(
(self.lexdata, self.lexpos, self.filename, self.lineno))
self.lexdata = data
self.lexpos = 0
self.lineno = 1
self.filename = filename
self.lexlen = len(self.lexdata)
def pop_input(self):
self.lexdata, self.lexpos, self.filename, self.lineno = \
self.input_stack.pop()
self.lexlen = len(self.lexdata)
def token(self):
result = lex.Lexer.token(self)
while result is None and self.input_stack:
self.pop_input()
result = lex.Lexer.token(self)
if result:
self.lasttoken = result.type
result.filename = self.filename
else:
self.lasttoken = None
return result
class TokenListLexer(object):
def __init__(self, tokens):
self.tokens = tokens
self.pos = 0
def token(self):
if self.pos < len(self.tokens):
t = self.tokens[self.pos]
self.pos += 1
return t
else:
return None
def symbol_to_token(sym):
if isinstance(sym, yacc.YaccSymbol):
return sym.value
elif isinstance(sym, lex.LexToken):
return sym
else:
assert False, 'Not a symbol: %r' % sym
def create_token(type, value, production=None):
'''Create a token of type and value, at the position where 'production'
was reduced. Don't specify production if the token is built-in'''
t = lex.LexToken()
t.type = type
t.value = value
t.lexpos = -1
if production:
t.lineno = production.slice[1].lineno
t.filename = production.slice[1].filename
else:
t.lineno = -1
t.filename = '<builtin>'
return t
# --------------------------------------------------------------------------
# Grammars
# --------------------------------------------------------------------------
class PreprocessorParser(object):
def __init__(self,options,cparser):
self.defines = ["inline=", "__inline__=", "__extension__=",
"__const=const", "__asm__(x)=",
"__asm(x)=", "CTYPESGEN=1"]
# On OSX, explicitly add these defines to keep from getting syntax
# errors in the OSX standard headers.
if sys.platform == 'darwin':
self.defines += ["__uint16_t=uint16_t",
"__uint32_t=uint32_t",
"__uint64_t=uint64_t"]
self.matches = []
self.output = []
self.lexer = lex.lex(cls=PreprocessorLexer,
optimize=1,
lextab='lextab',
outputdir=os.path.dirname(__file__),
module=pplexer)
self.options = options
self.cparser = cparser # An instance of CParser
def parse(self, filename):
"""Parse a file and save its output"""
cmd = self.options.cpp
cmd += " -U __GNUC__ -dD"
# This fixes Issue #6 where OS X 10.6+ adds a C extension that breaks
# the parser. Blocks shouldn't be needed for ctypesgen support anyway.
if sys.platform == 'darwin':
cmd += " -U __BLOCKS__"
for path in self.options.include_search_paths:
cmd += " -I%s" % path
for define in self.defines:
cmd += ' "-D%s"' % define
cmd += ' "' + filename + '"'
self.cparser.handle_status(cmd)
pp = subprocess.Popen(cmd,
shell = True,
universal_newlines=True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
ppout, pperr = pp.communicate()
for line in pperr.split("\n"):
if line:
self.cparser.handle_pp_error(line)
# We separate lines that are #defines and lines that are source code
# We put all the source lines first, then all the #define lines.
source_lines= []
define_lines = []
for line in ppout.split("\n"):
line = line + "\n"
if line.startswith("# "):
# Line number information has to go with both groups
source_lines.append(line)
define_lines.append(line)
elif line.startswith("#define"):
source_lines.append("\n")
define_lines.append(line)
elif line.startswith("#"):
# It's a directive, but not a #define. Remove it
source_lines.append("\n")
define_lines.append("\n")
else:
source_lines.append(line)
define_lines.append("\n")
text = "".join(source_lines + define_lines)
if self.options.save_preprocessed_headers:
self.cparser.handle_status("Saving preprocessed headers to %s." % \
self.options.save_preprocessed_headers)
try:
f = file(self.options.save_preprocessed_headers, "w")
f.write(text)
f.close()
except IOError:
self.cparser.handle_error("Couldn't save headers.")
self.lexer.input(text)
self.output = []
while True:
token = self.lexer.token()
if token is not None:
self.output.append(token)
else:
break

Wyświetl plik

@ -0,0 +1,10 @@
#!/usr/bin/env python
"""
This module is the backend to ctypesgen; it contains classes to
produce the final .py output files.
"""
from printer import WrapperPrinter
__all__ = ["WrapperPrinter"]

Wyświetl plik

@ -0,0 +1,148 @@
#!/usr/bin/env python
import os, sys, time, json
from ctypesgencore.descriptions import *
from ctypesgencore.ctypedescs import *
from ctypesgencore.messages import *
import ctypesgencore.libraryloader # So we can get the path to it
import test # So we can find the path to local files in the printer package
def path_to_local_file(name,known_local_module = test):
basedir=os.path.dirname(known_local_module.__file__)
return os.path.join(basedir,name)
# From http://stackoverflow.com/questions/1036409/recursively-convert-python-object-graph-to-dictionary
def todict(obj, classkey="Klass"):
if isinstance(obj, dict):
for k in obj.keys():
obj[k] = todict(obj[k], classkey)
return obj
elif hasattr(obj, "__iter__"):
return [todict(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict([(key, todict(value, classkey))
for key, value in obj.__dict__.iteritems()
if not callable(value) and not key.startswith('_')])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj
class WrapperPrinter:
def __init__(self,outpath,options,data):
status_message("Writing to %s." % (outpath or "stdout"))
self.file=outpath and file(outpath,"w") or sys.stdout
self.options=options
if self.options.strip_build_path and \
self.options.strip_build_path[-1] != os.path.sep:
self.options.strip_build_path += os.path.sep
self.print_group(self.options.libraries,"libraries",self.print_library)
method_table = {
'function': self.print_function,
'macro': self.print_macro,
'struct': self.print_struct,
'struct-body': self.print_struct_members,
'typedef': self.print_typedef,
'variable': self.print_variable,
'enum': self.print_enum,
'constant': self.print_constant
}
res = []
for kind,desc in data.output_order:
if desc.included:
item = method_table[kind](desc)
if item: res.append(item)
print >>self.file, json.dumps(res, sort_keys=True, indent=4)
def print_group(self,list,name,function):
if list:
return [function(obj) for obj in list]
def print_library(self,library):
return {'load_library': library}
def print_constant(self,constant):
return {'type': 'constant',
'name': constant.name,
'value': constant.value.py_string(False),
}
def print_typedef(self,typedef):
return {'type': 'typedef',
'name': typedef.name,
'ctype': todict(typedef.ctype),
}
def print_struct(self, struct):
res = {'type': struct.variety,
'name': struct.tag,
}
if not struct.opaque:
res['fields'] = []
for name, ctype in struct.members:
field = {'name': name,
'ctype': todict(ctype),
}
if isinstance(ctype, CtypesBitfield):
field['bitfield'] = ctype.bitfield.py_string(False)
res['fields'].append(field)
return res
def print_struct_members(self, struct):
pass
def print_enum(self,enum):
res = {'type': 'enum',
'name': enum.tag,
}
if not enum.opaque:
res['fields'] = []
for name, ctype in enum.members:
field = {'name': name,
'ctype': todict(ctype),
}
res['fields'].append(field)
return res
def print_function(self, function):
res = {'type': 'function',
'name': function.c_name(),
'variadic': function.variadic,
'args': todict(function.argtypes),
'return': todict(function.restype),
}
if function.source_library:
res['source'] = function.source_library
return res
def print_variable(self, variable):
res = {'type': 'variable',
'ctype': todict(variable.ctype),
'name': variable.c_name(),
}
if variable.source_library:
res['source'] = variable.source_library
return res
def print_macro(self, macro):
if macro.params:
return {'type': 'macro_function',
'name': macro.name,
'args': macro.params,
'body': macro.expr.py_string(True),
}
else:
# The macro translator makes heroic efforts but it occasionally fails.
# Beware the contents of the value!
return {'type': 'macro',
'name': macro.name,
'value': macro.expr.py_string(True),
}

Wyświetl plik

@ -0,0 +1,6 @@
"""
ctypesgencore.printer.printer imports this module so that it can find the path
to defaulttemplate.py and defaultloader.py.
"""
pass

Wyświetl plik

@ -0,0 +1,10 @@
#!/usr/bin/env python
"""
This module is the backend to ctypesgen; it contains classes to
produce the final .py output files.
"""
from printer import WrapperPrinter
__all__ = ["WrapperPrinter"]

Wyświetl plik

@ -0,0 +1,9 @@
'''Wrapper for %(name)s
Generated with:
%(argv)s
Do not modify this file.
'''
__docformat__ = 'restructuredtext'

Wyświetl plik

@ -0,0 +1,289 @@
import ctypes, os, sys
from ctypes import *
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
del t
del _int_types
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
def POINTER(obj):
p = ctypes.POINTER(obj)
# Convert None to a real NULL pointer to work around bugs
# in how ctypes handles None on 64-bit platforms
if not isinstance(p.from_param, classmethod):
def from_param(cls, x):
if x is None:
return cls()
else:
return x
p.from_param = classmethod(from_param)
return p
class UserString:
def __init__(self, seq):
if isinstance(seq, basestring):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, basestring):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=sys.maxint):
return self.data.count(sub, start, end)
def decode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.decode(encoding, errors))
else:
return self.__class__(self.data.decode(encoding))
else:
return self.__class__(self.data.decode())
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxint):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=sys.maxint):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=sys.maxint):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=sys.maxint):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=sys.maxint):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=sys.maxint):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
class MutableString(UserString):
"""mutable string objects
Python strings are immutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
self.data = string
def __hash__(self):
raise TypeError("unhashable type (it is mutable)")
def __setitem__(self, index, sub):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + sub + self.data[index+1:]
def __delitem__(self, index):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + self.data[index+1:]
def __setslice__(self, start, end, sub):
start = max(start, 0); end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start]+sub.data+self.data[end:]
elif isinstance(sub, basestring):
self.data = self.data[:start]+sub+self.data[end:]
else:
self.data = self.data[:start]+str(sub)+self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, basestring):
self.data += other
else:
self.data += str(other)
return self
def __imul__(self, n):
self.data *= n
return self
class String(MutableString, Union):
_fields_ = [('raw', POINTER(c_char)),
('data', c_char_p)]
def __init__(self, obj=""):
if isinstance(obj, (str, unicode, UserString)):
self.data = str(obj)
else:
self.raw = obj
def __len__(self):
return self.data and len(self.data) or 0
def from_param(cls, obj):
# Convert None or 0
if obj is None or obj == 0:
return cls(POINTER(c_char)())
# Convert from String
elif isinstance(obj, String):
return obj
# Convert from str
elif isinstance(obj, str):
return cls(obj)
# Convert from c_char_p
elif isinstance(obj, c_char_p):
return obj
# Convert from POINTER(c_char)
elif isinstance(obj, POINTER(c_char)):
return obj
# Convert from raw pointer
elif isinstance(obj, int):
return cls(cast(obj, POINTER(c_char)))
# Convert from object
else:
return String.from_param(obj._as_parameter_)
from_param = classmethod(from_param)
def ReturnString(obj, func=None, arguments=None):
return String.from_param(obj)
# As of ctypes 1.0, ctypes does not support custom error-checking
# functions on callbacks, nor does it support custom datatypes on
# callbacks, so we must ensure that all callbacks return
# primitive datatypes.
#
# Non-primitive return values wrapped with UNCHECKED won't be
# typechecked, and will be converted to c_void_p.
def UNCHECKED(type):
if (hasattr(type, "_type_") and isinstance(type._type_, str)
and type._type_ != "P"):
return type
else:
return c_void_p
# ctypes doesn't have direct support for variadic functions, so we have to write
# our own wrapper class
class _variadic_function(object):
def __init__(self,func,restype,argtypes):
self.func=func
self.func.restype=restype
self.argtypes=argtypes
def _as_parameter_(self):
# So we can pass this variadic function as a function pointer
return self.func
def __call__(self,*args):
fixed_args=[]
i=0
for argtype in self.argtypes:
# Typecheck what we can
fixed_args.append(argtype.from_param(args[i]))
i+=1
return self.func(*fixed_args+list(args[i:]))

Wyświetl plik

@ -0,0 +1,343 @@
#!/usr/bin/env python
import os, sys, time
from ctypesgencore.descriptions import *
from ctypesgencore.ctypedescs import *
from ctypesgencore.messages import *
import ctypesgencore.libraryloader # So we can get the path to it
import test # So we can find the path to local files in the printer package
def path_to_local_file(name,known_local_module = test):
basedir=os.path.dirname(known_local_module.__file__)
return os.path.join(basedir,name)
class WrapperPrinter:
def __init__(self,outpath,options,data):
status_message("Writing to %s." % (outpath or "stdout"))
self.file=outpath and file(outpath,"w") or sys.stdout
self.options=options
if self.options.strip_build_path and \
self.options.strip_build_path[-1] != os.path.sep:
self.options.strip_build_path += os.path.sep
self.print_header()
print >>self.file
self.print_preamble()
print >>self.file
self.print_loader()
print >>self.file
self.print_group(self.options.libraries,"libraries",self.print_library)
self.print_group(self.options.modules,"modules",self.print_module)
method_table = {
'function': self.print_function,
'macro': self.print_macro,
'struct': self.print_struct,
'struct-body': self.print_struct_members,
'typedef': self.print_typedef,
'variable': self.print_variable,
'enum': self.print_enum,
'constant': self.print_constant
}
for kind,desc in data.output_order:
if desc.included:
method_table[kind](desc)
print >>self.file
self.print_group(self.options.inserted_files,"inserted files",
self.insert_file)
def print_group(self,list,name,function):
if list:
print >>self.file,"# Begin %s" % name
print >>self.file
for obj in list:
function(obj)
print >>self.file
print >>self.file,"# %d %s" % (len(list),name)
print >>self.file,"# End %s" % name
else:
print >>self.file,"# No %s" % name
print >>self.file
def srcinfo(self,src):
if src==None:
print >>self.file
else:
filename,lineno = src
if filename in ("<built-in>","<command line>"):
print >>self.file, "# %s" % filename
else:
if self.options.strip_build_path and \
filename.startswith(self.options.strip_build_path):
filename = filename[len(self.options.strip_build_path):]
print >>self.file, "# %s: %s" % (filename, lineno)
def template_subs(self):
template_subs={
'date': time.ctime(),
'argv': ' '.join([x for x in sys.argv if not x.startswith("--strip-build-path")]),
'name': os.path.basename(self.options.headers[0])
}
for opt,value in self.options.__dict__.iteritems():
if type(value)==str:
template_subs[opt]=value
elif isinstance(value,(list,tuple)):
template_subs[opt]=(os.path.sep).join(value)
else:
template_subs[opt]=repr(value)
return template_subs
def print_header(self):
template_file = None
if self.options.header_template:
path = self.options.header_template
try:
template_file = file(path,"r")
except IOError:
error_message("Cannot load header template from file \"%s\" " \
" - using default template." % path, cls = 'missing-file')
if not template_file:
path = path_to_local_file("defaultheader.py")
template_file = file(path,"r")
template_subs=self.template_subs()
self.file.write(template_file.read() % template_subs)
template_file.close()
def print_preamble(self):
path = path_to_local_file("preamble.py")
print >>self.file, "# Begin preamble"
print >>self.file
preamble_file=file(path,"r")
self.file.write(preamble_file.read())
preamble_file.close()
print >>self.file
print >>self.file, "# End preamble"
def print_loader(self):
print >>self.file, "_libs = {}"
print >>self.file, "_libdirs = %s" % self.options.compile_libdirs
print >>self.file
print >>self.file, "# Begin loader"
print >>self.file
path = path_to_local_file("libraryloader.py",
ctypesgencore.libraryloader)
loader_file=file(path,"r")
self.file.write(loader_file.read())
loader_file.close()
print >>self.file
print >>self.file, "# End loader"
print >>self.file
print >>self.file, "add_library_search_dirs([%s])" % \
", ".join([repr(d) for d in self.options.runtime_libdirs])
def print_library(self,library):
print >>self.file, '_libs["%s"] = load_library("%s")'%(library,library)
def print_module(self,module):
print >>self.file, 'from %s import *' % name
def print_constant(self,constant):
print >>self.file, '%s = %s' % \
(constant.name,constant.value.py_string(False)),
self.srcinfo(constant.src)
def print_typedef(self,typedef):
print >>self.file, '%s = %s' % \
(typedef.name,typedef.ctype.py_string()),
self.srcinfo(typedef.src)
def print_struct(self, struct):
self.srcinfo(struct.src)
base = {'union': 'Union', 'struct': 'Structure'}[struct.variety]
print >>self.file, 'class %s_%s(%s):' % \
(struct.variety, struct.tag, base)
print >>self.file, ' pass'
def print_struct_members(self, struct):
if struct.opaque: return
# handle unnamed fields.
unnamed_fields = []
names = set([x[0] for x in struct.members])
anon_prefix = "unnamed_"
n = 1
for mi in range(len(struct.members)):
mem = list(struct.members[mi])
if mem[0] is None:
while True:
name = "%s%i" % (anon_prefix, n)
n += 1
if name not in names:
break
mem[0] = name
names.add(name)
unnamed_fields.append(name)
struct.members[mi] = mem
print >>self.file, '%s_%s.__slots__ = [' % (struct.variety, struct.tag)
for name,ctype in struct.members:
print >>self.file, " '%s'," % name
print >>self.file, ']'
if len(unnamed_fields) > 0:
print >>self.file, '%s_%s._anonymous_ = [' % (struct.variety,
struct.tag)
for name in unnamed_fields:
print >>self.file, " '%s'," % name
print >>self.file, ']'
print >>self.file, '%s_%s._fields_ = [' % (struct.variety, struct.tag)
for name,ctype in struct.members:
if isinstance(ctype,CtypesBitfield):
print >>self.file, " ('%s', %s, %s)," % \
(name, ctype.py_string(), ctype.bitfield.py_string(False))
else:
print >>self.file, " ('%s', %s)," % (name, ctype.py_string())
print >>self.file, ']'
def print_enum(self,enum):
print >>self.file, 'enum_%s = c_int' % enum.tag,
self.srcinfo(enum.src)
# Values of enumerator are output as constants.
def print_function(self, function):
if function.variadic:
self.print_variadic_function(function)
else:
self.print_fixed_function(function)
def print_fixed_function(self, function):
self.srcinfo(function.src)
# If we know what library the function lives in, look there.
# Otherwise, check all the libraries.
if function.source_library:
print >>self.file, "if hasattr(_libs[%r], %r):" % \
(function.source_library,function.c_name())
print >>self.file, " %s = _libs[%r].%s" % \
(function.py_name(),function.source_library,function.c_name())
else:
print >>self.file, "for _lib in _libs.itervalues():"
print >>self.file, " if not hasattr(_lib, %r):" % function.c_name()
print >>self.file, " continue"
print >>self.file, " %s = _lib.%s" % \
(function.py_name(),function.c_name())
# Argument types
print >>self.file, " %s.argtypes = [%s]" % (function.py_name(),
', '.join([a.py_string() for a in function.argtypes]))
# Return value
if function.restype.py_string() == "String":
print >>self.file, " if sizeof(c_int) == sizeof(c_void_p):"
print >>self.file, " %s.restype = ReturnString" % \
(function.py_name())
print >>self.file, " else:"
print >>self.file, " %s.restype = %s" % \
(function.py_name(),function.restype.py_string())
print >>self.file, " %s.errcheck = ReturnString" % \
(function.py_name())
else:
print >>self.file, " %s.restype = %s" % \
(function.py_name(),function.restype.py_string())
if not function.source_library:
print >>self.file, " break"
def print_variadic_function(self,function):
self.srcinfo(function.src)
if function.source_library:
print >>self.file, "if hasattr(_libs[%r], %r):" % \
(function.source_library,function.c_name())
print >>self.file, " _func = _libs[%r].%s" % \
(function.source_library,function.c_name())
print >>self.file, " _restype = %s" % function.restype.py_string()
print >>self.file, " _argtypes = [%s]" % \
', '.join([a.py_string() for a in function.argtypes])
print >>self.file, " %s = _variadic_function(_func,_restype,_argtypes)" % \
function.py_name()
else:
print >>self.file, "for _lib in _libs.values():"
print >>self.file, " if hasattr(_lib, %r):" % function.c_name()
print >>self.file, " _func = _lib.%s" % \
(function.c_name())
print >>self.file, " _restype = %s" % function.restype.py_string()
print >>self.file, " _argtypes = [%s]" % \
', '.join([a.py_string() for a in function.argtypes])
print >>self.file, " %s = _variadic_function(_func,_restype,_argtypes)" % \
function.py_name()
def print_variable(self, variable):
self.srcinfo(variable.src)
if variable.source_library:
print >>self.file, 'try:'
print >>self.file, ' %s = (%s).in_dll(_libs[%r], %r)' % \
(variable.py_name(),
variable.ctype.py_string(),
variable.source_library,
variable.c_name())
print >>self.file, 'except:'
print >>self.file, ' pass'
else:
print >>self.file, "for _lib in _libs.values():"
print >>self.file, ' try:'
print >>self.file, ' %s = (%s).in_dll(_lib, %r)' % \
(variable.py_name(),
variable.ctype.py_string(),
variable.c_name())
print >>self.file, " break"
print >>self.file, ' except:'
print >>self.file, ' pass'
def print_macro(self, macro):
if macro.params:
self.print_func_macro(macro)
else:
self.print_simple_macro(macro)
def print_simple_macro(self, macro):
# The macro translator makes heroic efforts but it occasionally fails.
# We want to contain the failures as much as possible.
# Hence the try statement.
self.srcinfo(macro.src)
print >>self.file, "try:"
print >>self.file, " %s = %s" % (macro.name,macro.expr.py_string(True))
print >>self.file, "except:"
print >>self.file, " pass"
def print_func_macro(self, macro):
self.srcinfo(macro.src)
print >>self.file, "def %s(%s):" % \
(macro.name,", ".join(macro.params))
print >>self.file, " return %s" % macro.expr.py_string(True)
def insert_file(self,filename):
try:
inserted_file = file(filename,"r")
except IOError:
error_message("Cannot open file \"%s\". Skipped it." % filename,
cls = 'missing-file')
print >>self.file,"# Begin \"%s\"" % filename
print >>self.file
self.file.write(inserted_file.read())
print >>self.file
print >>self.file,"# End \"%s\"" % filename
inserted_file.close()

Wyświetl plik

@ -0,0 +1,6 @@
"""
ctypesgencore.printer.printer imports this module so that it can find the path
to defaulttemplate.py and defaultloader.py.
"""
pass

Wyświetl plik

@ -0,0 +1,12 @@
#!/usr/bin/env python
"""
This module contains functions to operate on the DeclarationCollection produced
by the parser module and prepare it for output.
A convenience_function, process(), calls everything else.
"""
__all__ = ["process"]
from pipeline import process

Wyświetl plik

@ -0,0 +1,137 @@
#!/usr/bin/env python
"""
The dependencies module determines which descriptions depend on which other
descriptions.
"""
from ctypesgencore.descriptions import *
from ctypesgencore.ctypedescs import *
from ctypesgencore.messages import *
def find_dependencies(data, opts):
"""Visit each description in `data` and figure out which other descriptions
it depends on, putting the results in desc.requirements. Also find errors in
ctypedecls or expressions attached to the description and transfer them to the
description."""
struct_names = {}
enum_names = {}
typedef_names = {}
ident_names = {}
# Start the lookup tables with names from imported modules
for name in opts.other_known_names:
typedef_names[name] = None
ident_names[name] = None
if name.startswith("struct_") or name.startswith("enum_"):
variety = name.split("_")[0]
tag = "_".join(name.split("_")[1:])
struct_names[(variety,tag)] = None
if name.startswith("enum_"):
enum_names[name] = None
def depend(desc, nametable, name):
"""Try to add `name` as a requirement for `desc`, looking `name` up in
`nametable`. Returns True if found."""
if name in nametable:
requirement = nametable[name]
if requirement: desc.add_requirements([requirement])
return True
else:
return False
def find_dependencies_for(desc, kind):
"""Find all the descriptions that `desc` depends on and add them as
dependencies for `desc`. Also collect error messages regarding `desc` and
convert unlocateable descriptions into error messages."""
if kind == "constant": roots = [desc.value]
if kind == "struct": roots = []
if kind == "struct-body": roots = [desc.ctype]
if kind == "enum": roots = []
if kind == "typedef": roots = [desc.ctype]
if kind == "function": roots = desc.argtypes + [desc.restype]
if kind == "variable": roots = [desc.ctype]
if kind == "macro":
if desc.expr: roots = [desc.expr]
else: roots = []
cstructs,cenums,ctypedefs,errors,identifiers = [], [], [], [], []
for root in roots:
s, e, t, errs, i = visit_type_and_collect_info(root)
cstructs.extend(s)
cenums.extend(e)
ctypedefs.extend(t)
errors.extend(errs)
identifiers.extend(i)
unresolvables = []
for cstruct in cstructs:
if kind == "struct" and desc.variety == cstruct.variety and \
desc.tag == cstruct.tag:
continue
if not depend(desc, struct_names, (cstruct.variety, cstruct.tag)):
unresolvables.append("%s \"%s\"" % \
(cstruct.variety, cstruct.tag))
for cenum in cenums:
if kind == "enum" and desc.tag == cenum.tag:
continue
if not depend(desc, enum_names, cenum.tag):
unresolvables.append("enum \"%s\"" % cenum.tag)
for ctypedef in ctypedefs:
if not depend(desc, typedef_names, ctypedef):
unresolvables.append("typedef \"%s\"" % ctypedef)
for ident in identifiers:
if isinstance(desc, MacroDescription) and \
desc.params and ident in desc.params:
continue
if not depend(desc, ident_names, ident):
unresolvables.append("identifier \"%s\"" % ident)
for u in unresolvables:
errors.append(("%s depends on an unknown %s." % \
(desc.casual_name(), u), None))
for err, cls in errors:
err += " %s will not be output" % desc.casual_name()
desc.error(err, cls = cls)
def add_to_lookup_table(desc, kind):
"""Add `desc` to the lookup table so that other descriptions that use
it can find it."""
if kind == "struct":
if (desc.variety, desc.tag) not in struct_names:
struct_names[(desc.variety, desc.tag)] = desc
if kind == "enum":
if desc.tag not in enum_names:
enum_names[desc.tag] = desc
if kind == "typedef":
if desc.name not in typedef_names:
typedef_names[desc.name] = desc
if kind in ("function", "constant", "variable", "macro"):
if desc.name not in ident_names:
ident_names[desc.name] = desc
# Macros are handled differently from everything else because macros can
# call other macros that are referenced after them in the input file, but
# no other type of description can look ahead like that.
for kind, desc in data.output_order:
if kind!="macro":
find_dependencies_for(desc, kind)
add_to_lookup_table(desc, kind)
for kind, desc in data.output_order:
if kind=="macro":
add_to_lookup_table(desc, kind)
for kind, desc in data.output_order:
if kind=="macro":
find_dependencies_for(desc, kind)

Wyświetl plik

@ -0,0 +1,200 @@
#!/usr/bin/env python
"""
The operations module contains various functions to process the
DescriptionCollection and prepare it for output.
ctypesgencore.processor.pipeline calls the operations module.
"""
import ctypes, re, os, sys, keyword
from ctypesgencore.descriptions import *
from ctypesgencore.messages import *
import ctypesgencore.libraryloader
# Processor functions
def automatically_typedef_structs(data,options):
"""automatically_typedef_structs() aliases "struct_<tag>" to "<tag>" for
every struct and union."""
# XXX Check if it has already been aliased in the C code.
for struct in data.structs:
if not struct.ctype.anonymous: # Don't alias anonymous structs
typedef=TypedefDescription(struct.tag,
struct.ctype,
src=struct.src)
typedef.add_requirements(set([struct]))
data.typedefs.append(typedef)
data.all.insert(data.all.index(struct)+1,typedef)
data.output_order.append(("typedef", typedef))
def remove_NULL(data, options):
"""remove_NULL() removes any NULL definitions from the C headers because
ctypesgen supplies its own NULL definition."""
for macro in data.macros:
if macro.name=="NULL":
macro.include_rule = "never"
def remove_descriptions_in_system_headers(data,opts):
"""remove_descriptions_in_system_headers() removes descriptions if they came
from files outside of the header files specified from the command line."""
known_headers = [os.path.basename(x) for x in opts.headers]
for description in data.all:
if description.src!=None:
if description.src[0] == "<command line>":
description.include_rule = "if_needed"
elif description.src[0] == "<built-in>":
if not opts.builtin_symbols:
description.include_rule="if_needed"
elif os.path.basename(description.src[0]) not in known_headers:
if not opts.all_headers:
# If something else requires this, include it even though
# it is in a system header file.
description.include_rule="if_needed"
def remove_macros(data,opts):
"""remove_macros() removes macros if --no-macros is set."""
if not opts.include_macros:
for macro in data.macros:
macro.include_rule = "never"
def filter_by_regexes_exclude(data,opts):
"""filter_by_regexes_exclude() uses regular expressions specified by options
dictionary to filter symbols."""
if opts.exclude_symbols:
expr=re.compile(opts.exclude_symbols)
for object in data.all:
if expr.match(object.py_name()):
object.include_rule="never"
def filter_by_regexes_include(data,opts):
"""filter_by_regexes_include() uses regular expressions specified by options
dictionary to re-include symbols previously rejected by other operations."""
if opts.include_symbols:
expr=re.compile(opts.include_symbols)
for object in data.all:
if object.include_rule!="never":
if expr.match(object.py_name()):
object.include_rule="yes"
def fix_conflicting_names(data,opts):
"""If any descriptions from the C code would overwrite Python builtins or
other important names, fix_conflicting_names() adds underscores to resolve
the name conflict."""
# This is the order of priority for names
descriptions = data.functions + data.variables + data.structs + \
data.typedefs + data.enums + data.constants + data.macros
# This dictionary maps names to a string representing where the name
# came from.
important_names={}
preamble_names=set()
preamble_names=preamble_names.union(['DarwinLibraryLoader',
'LibraryLoader', 'LinuxLibraryLoader', 'WindowsLibraryLoader',
'_WindowsLibrary', 'add_library_search_dirs', '_environ_path', 'ctypes',
'load_library', 'loader', 'os', 're', 'sys'])
preamble_names=preamble_names.union(['ArgumentError', 'CFUNCTYPE',
'POINTER', 'ReturnString', 'String', 'Structure', 'UNCHECKED', 'Union',
'UserString', '_variadic_function', 'addressof', 'c_buffer', 'c_byte',
'c_char', 'c_char_p', 'c_double', 'c_float', 'c_int', 'c_int16',
'c_int32', 'c_int64', 'c_int8', 'c_long', 'c_longlong', 'c_ptrdiff_t',
'c_short', 'c_size_t', 'c_ubyte', 'c_uint', 'c_uint16', 'c_uint32',
'c_uint64', 'c_uint8', 'c_ulong', 'c_ulonglong', 'c_ushort', 'c_void',
'c_void_p', 'c_voidp', 'c_wchar', 'c_wchar_p', 'cast', 'ctypes', 'os',
'pointer', 'sizeof'])
for name in preamble_names:
important_names[name] = "a name needed by ctypes or ctypesgen"
for name in dir(__builtins__): important_names[name] = "a Python builtin"
for name in opts.other_known_names:
important_names[name] = "a name from an included Python module"
for name in keyword.kwlist: important_names[name] = "a Python keyword"
for description in descriptions:
if description.py_name() in important_names:
conflict_name = important_names[description.py_name()]
original_name=description.casual_name()
while description.py_name() in important_names:
if isinstance(description,
(StructDescription, EnumDescription)):
description.tag+="_"
else:
description.name="_"+description.name
if not description.dependents:
description.warning("%s has been renamed to %s due to a name " \
"conflict with %s." % \
(original_name,
description.casual_name(),
conflict_name),
cls = 'rename')
else:
description.warning("%s has been renamed to %s due to a name " \
"conflict with %s. Other objects depend on %s - those " \
"objects will be skipped." % \
(original_name, description.casual_name(),
conflict_name, original_name),
cls = 'rename')
for dependent in description.dependents:
dependent.include_rule = "never"
if description.include_rule=="yes":
important_names[description.py_name()] = \
description.casual_name()
# Names of struct members don't conflict with much, but they can conflict
# with Python keywords.
for struct in data.structs:
if not struct.opaque:
for i,(name,type) in enumerate(struct.members):
if name in keyword.kwlist:
struct.members[i] = ("_"+name,type)
struct.warning("Member \"%s\" of %s has been renamed to " \
"\"%s\" because it has the same name as a Python " \
"keyword." % (name, struct.casual_name(), "_"+name),
cls = 'rename')
# Macro arguments may be have names that conflict with Python keywords.
# In a perfect world, this would simply rename the parameter instead
# of throwing an error message.
for macro in data.macros:
if macro.params:
for param in macro.params:
if param in keyword.kwlist:
macro.error("One of the parameters to %s, \"%s\" has the " \
"same name as a Python keyword. %s will be skipped." % \
(macro.casual_name(), param, macro.casual_name()),
cls = 'name-conflict')
def find_source_libraries(data,opts):
"""find_source_libraries() determines which library contains each function
and variable."""
all_symbols=data.functions+data.variables
for symbol in all_symbols:
symbol.source_library=None
ctypesgencore.libraryloader.add_library_search_dirs(opts.compile_libdirs)
for library_name in opts.libraries:
try:
library=ctypesgencore.libraryloader.load_library(library_name)
except ImportError,e:
warning_message("Could not load library \"%s\". Okay, I'll " \
"try to load it at runtime instead. " % (library_name),
cls = 'missing-library')
continue
for symbol in all_symbols:
if symbol.source_library==None:
if hasattr(library,symbol.c_name()):
symbol.source_library=library_name

Wyświetl plik

@ -0,0 +1,135 @@
#!/usr/bin/env python
import ctypes, re, os
from ctypesgencore.processor.operations import *
from ctypesgencore.processor.dependencies import find_dependencies
from ctypesgencore.ctypedescs import *
from ctypesgencore.messages import *
"""
A brief explanation of the processing steps:
1. The dependencies module builds a dependency graph for the descriptions.
2. Operation functions are called to perform various operations on the
descriptions. The operation functions are found in operations.py.
3. If an operation function decides to exclude a description from the output, it
sets 'description.include_rule' to "never"; if an operation function decides not
to include a description by default, but to allow if required, it sets
'description.include_rule' to "if_needed".
4. If an operation function encounters an error that makes a description unfit
for output, it appends a string error message to 'description.errors'.
'description.warnings' is a list of warning messages that will be displayed but
will not prevent the description from being output.
5. Based on 'description.include_rule', calculate_final_inclusion() decides
which descriptions to include in the output. It sets 'description.included' to
True or False.
6. For each description, print_errors_encountered() checks if there are error
messages in 'description.errors'. If so, print_errors_encountered() prints the
error messages, but only if 'description.included' is True - it doesn't bother
the user with error messages regarding descriptions that would not be in the
output anyway. It also prints 'description.warnings'.
7. calculate_final_inclusion() is called again to recalculate based on
the errors that print_errors_encountered() has flagged.
"""
def process(data,options):
status_message("Processing description list.")
find_dependencies(data,options)
automatically_typedef_structs(data,options)
remove_NULL(data, options)
remove_descriptions_in_system_headers(data,options)
filter_by_regexes_exclude(data,options)
filter_by_regexes_include(data,options)
remove_macros(data,options)
if options.output_language == "python":
# this function is python specific
fix_conflicting_names(data,options)
find_source_libraries(data,options)
calculate_final_inclusion(data,options)
print_errors_encountered(data,options)
calculate_final_inclusion(data,options)
def calculate_final_inclusion(data,opts):
"""calculate_final_inclusion() calculates which descriptions will be included in the
output library.
An object with include_rule="never" is never included.
An object with include_rule="yes" is included if its requirements can be
included.
An object with include_rule="if_needed" is included if an object to be
included requires it and if its requirements can be included.
"""
def can_include_desc(desc):
if desc.can_include==None:
if desc.include_rule=="no":
desc.can_include=False
elif desc.include_rule=="yes" or desc.include_rule=="if_needed":
desc.can_include=True
for req in desc.requirements:
if not can_include_desc(req):
desc.can_include=False
return desc.can_include
def do_include_desc(desc):
if desc.included:
return # We've already been here
desc.included = True
for req in desc.requirements:
do_include_desc(req)
for desc in data.all:
desc.can_include=None # None means "Not Yet Decided"
desc.included=False
for desc in data.all:
if desc.include_rule=="yes":
if can_include_desc(desc):
do_include_desc(desc)
def print_errors_encountered(data,opts):
# See descriptions.py for an explanation of the error-handling mechanism
for desc in data.all:
# If description would not have been included, dont bother user by
# printing warnings.
if desc.included or opts.show_all_errors:
if opts.show_long_errors or len(desc.errors)+len(desc.warnings)<=2:
for (error,cls) in desc.errors:
# Macro errors will always be displayed as warnings.
if isinstance(desc, MacroDescription):
if opts.show_macro_warnings:
warning_message(error,cls)
else:
error_message(error,cls)
for (warning,cls) in desc.warnings:
warning_message(warning,cls)
else:
if desc.errors:
error1,cls1 = desc.errors[0]
error_message(error1,cls1)
numerrs = len(desc.errors)-1
numwarns = len(desc.warnings)
if numwarns:
error_message("%d more errors and %d more warnings " \
"for %s" % (numerrs,numwarns,desc.casual_name()))
else:
error_message("%d more errors for %s " % \
(numerrs,desc.casual_name()))
else:
warning1,cls1 = desc.warnings[0]
warning_message(warning1,cls1)
warning_message("%d more errors for %s" % \
(len(desc.warnings)-1, desc.casual_name()))
if desc.errors:
# process() will recalculate to take this into account
desc.include_rule = "never"

Wyświetl plik

@ -0,0 +1,27 @@
/*
** Trivial ctypesgen demo library consumer
** from http://code.google.com/p/ctypesgen
**
** This demoapp it self is not useful, it is a sanity check for the library.
**
** Build static: cc -o demoapp demoapp.c demolib.c demolib.h
**
*/
#include <stdlib.h>
#include <stdio.h>
#include "demolib.h"
int main(int argc, char **argv)
{
int a = 1;
int b = 2;
int result = 0;
result = trivial_add(a, b);
printf("a %d\n", a);
printf("b %d\n", b);
printf("result %d\n", result);
}

Wyświetl plik

@ -0,0 +1,40 @@
#!/usr/bin/env python
# -*- coding: us-ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
"""
Trivial ctypesgen demo library consumer
from http://code.google.com/p/ctypesgen
NOTE demolib.py needs to be generated via:
../ctypesgen.py -o pydemolib.py -l demolib demolib.h
../ctypesgen.py -o pydemolib.py -l demolib.so demolib.h
"""
import sys
import pydemolib # generated from demolib.h by ctypesgen
def do_demo():
a = 1
b = 2
result = pydemolib.trivial_add(a, b)
print "a", a
print "b", b
print "result", result
def main(argv=None):
if argv is None:
argv = sys.argv
do_demo()
return 0
if __name__ == "__main__":
sys.exit(main())

Wyświetl plik

@ -0,0 +1,20 @@
/*
** Trivial ctypesgen demo library
** from http://code.google.com/p/ctypesgen
Dumb manual build with:
gcc -fPIC -c demolib.c
gcc -shared -o demolib.so demolib.o
gcc -fPIC -shared -o demolib.so demolib.c
*/
#include "demolib.h"
int trivial_add(int a, int b)
{
return a + b;
}

Wyświetl plik

@ -0,0 +1,6 @@
/*
** Trivial ctypesgen demo library
** from http://code.google.com/p/ctypesgen
*/
int trivial_add(int a, int b);

Wyświetl plik

@ -0,0 +1,606 @@
'''Wrapper for demolib.h
Generated with:
../ctypesgen.py -o pydemolib.py -l demolib.so demolib.h
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
# Begin preamble
import ctypes, os, sys
from ctypes import *
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
del t
del _int_types
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
def POINTER(obj):
p = ctypes.POINTER(obj)
# Convert None to a real NULL pointer to work around bugs
# in how ctypes handles None on 64-bit platforms
if not isinstance(p.from_param, classmethod):
def from_param(cls, x):
if x is None:
return cls()
else:
return x
p.from_param = classmethod(from_param)
return p
class UserString:
def __init__(self, seq):
if isinstance(seq, basestring):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, basestring):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=sys.maxint):
return self.data.count(sub, start, end)
def decode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.decode(encoding, errors))
else:
return self.__class__(self.data.decode(encoding))
else:
return self.__class__(self.data.decode())
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxint):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=sys.maxint):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=sys.maxint):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=sys.maxint):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=sys.maxint):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=sys.maxint):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
class MutableString(UserString):
"""mutable string objects
Python strings are immutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
self.data = string
def __hash__(self):
raise TypeError("unhashable type (it is mutable)")
def __setitem__(self, index, sub):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + sub + self.data[index+1:]
def __delitem__(self, index):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + self.data[index+1:]
def __setslice__(self, start, end, sub):
start = max(start, 0); end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start]+sub.data+self.data[end:]
elif isinstance(sub, basestring):
self.data = self.data[:start]+sub+self.data[end:]
else:
self.data = self.data[:start]+str(sub)+self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, basestring):
self.data += other
else:
self.data += str(other)
return self
def __imul__(self, n):
self.data *= n
return self
class String(MutableString, Union):
_fields_ = [('raw', POINTER(c_char)),
('data', c_char_p)]
def __init__(self, obj=""):
if isinstance(obj, (str, unicode, UserString)):
self.data = str(obj)
else:
self.raw = obj
def __len__(self):
return self.data and len(self.data) or 0
def from_param(cls, obj):
# Convert None or 0
if obj is None or obj == 0:
return cls(POINTER(c_char)())
# Convert from String
elif isinstance(obj, String):
return obj
# Convert from str
elif isinstance(obj, str):
return cls(obj)
# Convert from c_char_p
elif isinstance(obj, c_char_p):
return obj
# Convert from POINTER(c_char)
elif isinstance(obj, POINTER(c_char)):
return obj
# Convert from raw pointer
elif isinstance(obj, int):
return cls(cast(obj, POINTER(c_char)))
# Convert from object
else:
return String.from_param(obj._as_parameter_)
from_param = classmethod(from_param)
def ReturnString(obj, func=None, arguments=None):
return String.from_param(obj)
# As of ctypes 1.0, ctypes does not support custom error-checking
# functions on callbacks, nor does it support custom datatypes on
# callbacks, so we must ensure that all callbacks return
# primitive datatypes.
#
# Non-primitive return values wrapped with UNCHECKED won't be
# typechecked, and will be converted to c_void_p.
def UNCHECKED(type):
if (hasattr(type, "_type_") and isinstance(type._type_, str)
and type._type_ != "P"):
return type
else:
return c_void_p
# ctypes doesn't have direct support for variadic functions, so we have to write
# our own wrapper class
class _variadic_function(object):
def __init__(self,func,restype,argtypes):
self.func=func
self.func.restype=restype
self.argtypes=argtypes
def _as_parameter_(self):
# So we can pass this variadic function as a function pointer
return self.func
def __call__(self,*args):
fixed_args=[]
i=0
for argtype in self.argtypes:
# Typecheck what we can
fixed_args.append(argtype.from_param(args[i]))
i+=1
return self.func(*fixed_args+list(args[i:]))
# End preamble
_libs = {}
_libdirs = []
# Begin loader
# ----------------------------------------------------------------------------
# Copyright (c) 2008 David James
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import os.path, re, sys, glob
import ctypes
import ctypes.util
def _environ_path(name):
if name in os.environ:
return os.environ[name].split(":")
else:
return []
class LibraryLoader(object):
def __init__(self):
self.other_dirs=[]
def load_library(self,libname):
"""Given the name of a library, load it."""
paths = self.getpaths(libname)
for path in paths:
if os.path.exists(path):
return self.load(path)
raise ImportError("%s not found." % libname)
def load(self,path):
"""Given a path to a library, load it."""
try:
# Darwin requires dlopen to be called with mode RTLD_GLOBAL instead
# of the default RTLD_LOCAL. Without this, you end up with
# libraries not being loadable, resulting in "Symbol not found"
# errors
if sys.platform == 'darwin':
return ctypes.CDLL(path, ctypes.RTLD_GLOBAL)
else:
return ctypes.cdll.LoadLibrary(path)
except OSError,e:
raise ImportError(e)
def getpaths(self,libname):
"""Return a list of paths where the library might be found."""
if os.path.isabs(libname):
yield libname
else:
# FIXME / TODO return '.' and os.path.dirname(__file__)
for path in self.getplatformpaths(libname):
yield path
path = ctypes.util.find_library(libname)
if path: yield path
def getplatformpaths(self, libname):
return []
# Darwin (Mac OS X)
class DarwinLibraryLoader(LibraryLoader):
name_formats = ["lib%s.dylib", "lib%s.so", "lib%s.bundle", "%s.dylib",
"%s.so", "%s.bundle", "%s"]
def getplatformpaths(self,libname):
if os.path.pathsep in libname:
names = [libname]
else:
names = [format % libname for format in self.name_formats]
for dir in self.getdirs(libname):
for name in names:
yield os.path.join(dir,name)
def getdirs(self,libname):
'''Implements the dylib search as specified in Apple documentation:
http://developer.apple.com/documentation/DeveloperTools/Conceptual/
DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html
Before commencing the standard search, the method first checks
the bundle's ``Frameworks`` directory if the application is running
within a bundle (OS X .app).
'''
dyld_fallback_library_path = _environ_path("DYLD_FALLBACK_LIBRARY_PATH")
if not dyld_fallback_library_path:
dyld_fallback_library_path = [os.path.expanduser('~/lib'),
'/usr/local/lib', '/usr/lib']
dirs = []
if '/' in libname:
dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
else:
dirs.extend(_environ_path("LD_LIBRARY_PATH"))
dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
dirs.extend(self.other_dirs)
dirs.append(".")
dirs.append(os.path.dirname(__file__))
if hasattr(sys, 'frozen') and sys.frozen == 'macosx_app':
dirs.append(os.path.join(
os.environ['RESOURCEPATH'],
'..',
'Frameworks'))
dirs.extend(dyld_fallback_library_path)
return dirs
# Posix
class PosixLibraryLoader(LibraryLoader):
_ld_so_cache = None
def _create_ld_so_cache(self):
# Recreate search path followed by ld.so. This is going to be
# slow to build, and incorrect (ld.so uses ld.so.cache, which may
# not be up-to-date). Used only as fallback for distros without
# /sbin/ldconfig.
#
# We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
directories = []
for name in ("LD_LIBRARY_PATH",
"SHLIB_PATH", # HPUX
"LIBPATH", # OS/2, AIX
"LIBRARY_PATH", # BE/OS
):
if name in os.environ:
directories.extend(os.environ[name].split(os.pathsep))
directories.extend(self.other_dirs)
directories.append(".")
directories.append(os.path.dirname(__file__))
try: directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')])
except IOError: pass
directories.extend(['/lib', '/usr/lib', '/lib64', '/usr/lib64'])
cache = {}
lib_re = re.compile(r'lib(.*)\.s[ol]')
ext_re = re.compile(r'\.s[ol]$')
for dir in directories:
try:
for path in glob.glob("%s/*.s[ol]*" % dir):
file = os.path.basename(path)
# Index by filename
if file not in cache:
cache[file] = path
# Index by library name
match = lib_re.match(file)
if match:
library = match.group(1)
if library not in cache:
cache[library] = path
except OSError:
pass
self._ld_so_cache = cache
def getplatformpaths(self, libname):
if self._ld_so_cache is None:
self._create_ld_so_cache()
result = self._ld_so_cache.get(libname)
if result: yield result
path = ctypes.util.find_library(libname)
if path: yield os.path.join("/lib",path)
# Windows
class _WindowsLibrary(object):
def __init__(self, path):
self.cdll = ctypes.cdll.LoadLibrary(path)
self.windll = ctypes.windll.LoadLibrary(path)
def __getattr__(self, name):
try: return getattr(self.cdll,name)
except AttributeError:
try: return getattr(self.windll,name)
except AttributeError:
raise
class WindowsLibraryLoader(LibraryLoader):
name_formats = ["%s.dll", "lib%s.dll", "%slib.dll"]
def load_library(self, libname):
try:
result = LibraryLoader.load_library(self, libname)
except ImportError:
result = None
if os.path.sep not in libname:
for name in self.name_formats:
try:
result = getattr(ctypes.cdll, name % libname)
if result:
break
except WindowsError:
result = None
if result is None:
try:
result = getattr(ctypes.cdll, libname)
except WindowsError:
result = None
if result is None:
raise ImportError("%s not found." % libname)
return result
def load(self, path):
return _WindowsLibrary(path)
def getplatformpaths(self, libname):
if os.path.sep not in libname:
for name in self.name_formats:
dll_in_current_dir = os.path.abspath(name % libname)
if os.path.exists(dll_in_current_dir):
yield dll_in_current_dir
path = ctypes.util.find_library(name % libname)
if path:
yield path
# Platform switching
# If your value of sys.platform does not appear in this dict, please contact
# the Ctypesgen maintainers.
loaderclass = {
"darwin": DarwinLibraryLoader,
"cygwin": WindowsLibraryLoader,
"win32": WindowsLibraryLoader
}
loader = loaderclass.get(sys.platform, PosixLibraryLoader)()
def add_library_search_dirs(other_dirs):
loader.other_dirs = other_dirs
load_library = loader.load_library
del loaderclass
# End loader
add_library_search_dirs([])
# Begin libraries
_libs["demolib.so"] = load_library("demolib.so")
# 1 libraries
# End libraries
# No modules
# /home/clach04/dev/python/ctypesgen/demo/demolib.h: 6
if hasattr(_libs['demolib.so'], 'trivial_add'):
trivial_add = _libs['demolib.so'].trivial_add
trivial_add.argtypes = [c_int, c_int]
trivial_add.restype = c_int
# No inserted files

Wyświetl plik

@ -0,0 +1,25 @@
#!/usr/bin/env python
# -*- coding: us-ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
"""Examples:
setup.py sdist
setup.py bdist_wininst
"""
from distutils.core import setup
import ctypesgencore
setup(name='ctypesgen',
version=ctypesgencore.VERSION,
description='Python wrapper generator for ctypes',
url='http://code.google.com/p/ctypesgen/',
license='BSD License',
packages=['ctypesgencore',
'ctypesgencore.parser',
'ctypesgencore.printer_python',
'ctypesgencore.printer_json',
'ctypesgencore.processor'],
scripts=['ctypesgen.py'])

Wyświetl plik

@ -0,0 +1,61 @@
import os
import sys
import StringIO
import optparse
import glob
sys.path.append(".") # Allow tests to be called from parent directory with Python 2.6
sys.path.append("..")
import ctypesgencore
"""ctypesgentest is a simple module for testing ctypesgen on various C constructs. It consists of a
single function, test(). test() takes a string that represents a C header file, along with some
keyword arguments representing options. It processes the header using ctypesgen and returns a tuple
containing the resulting module object and the output that ctypesgen produced."""
# set redirect_stdout to False if using console based debugger like pdb
redirect_stdout = True
def test(header, **more_options):
assert isinstance(header, str)
file("temp.h", "w").write(header)
options = ctypesgencore.options.get_default_options()
options.headers = ["temp.h"]
for opt in more_options:
setattr(options, opt, more_options[opt])
if redirect_stdout:
# Redirect output
sys.stdout = StringIO.StringIO()
# Step 1: Parse
descriptions = ctypesgencore.parser.parse(options.headers, options)
# Step 2: Process
ctypesgencore.processor.process(descriptions, options)
# Step 3: Print
ctypesgencore.printer.WrapperPrinter("temp.py", options, descriptions)
if redirect_stdout:
# Un-redirect output
output = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = sys.__stdout__
else:
output = ''
# Load the module we have just produced
module = __import__("temp")
reload(module) # import twice, this hack ensure that "temp" is force loaded (there *must* be a better way to do this)
return module, output
def cleanup(filepattern='temp.*'):
fnames = glob.glob(filepattern)
for fname in fnames:
os.unlink(fname)

Wyświetl plik

@ -0,0 +1,8 @@
#!/bin/bash
# Generate parser tests outputs (assumes parser is working!)
# Reuben Thomas 26th September 2011
# This program is in the public domain.
for i in test-headers/*.h; do
../ctypesgen.py --output-language=json $i > ${i%%.h}.json
done

Wyświetl plik

@ -0,0 +1 @@
int bar2(int a);

Wyświetl plik

@ -0,0 +1,21 @@
[
{
"args": [
{
"errors": [],
"longs": 0,
"name": "int",
"signed": true
}
],
"name": "bar2",
"return": {
"errors": [],
"longs": 0,
"name": "int",
"signed": true
},
"type": "function",
"variadic": false
}
]

Wyświetl plik

@ -0,0 +1 @@
int bar(int);

Wyświetl plik

@ -0,0 +1,21 @@
[
{
"args": [
{
"errors": [],
"longs": 0,
"name": "int",
"signed": true
}
],
"name": "bar",
"return": {
"errors": [],
"longs": 0,
"name": "int",
"signed": true
},
"type": "function",
"variadic": false
}
]

Wyświetl plik

@ -0,0 +1 @@
void foo(void);

Wyświetl plik

@ -0,0 +1,14 @@
[
{
"args": [],
"name": "foo",
"return": {
"errors": [],
"longs": 0,
"name": "void",
"signed": true
},
"type": "function",
"variadic": false
}
]

Wyświetl plik

@ -0,0 +1 @@
#define A 1

Wyświetl plik

@ -0,0 +1,7 @@
[
{
"name": "A",
"type": "macro",
"value": "1"
}
]

Wyświetl plik

@ -0,0 +1 @@
#define divide_macro(x,y) x/y

Wyświetl plik

@ -0,0 +1,11 @@
[
{
"args": [
"x",
"y"
],
"body": "(x / y)",
"name": "divide_macro",
"type": "macro_function"
}
]

Wyświetl plik

@ -0,0 +1 @@
#define mod_macro(x,y) x%y

Wyświetl plik

@ -0,0 +1,11 @@
[
{
"args": [
"x",
"y"
],
"body": "(x % y)",
"name": "mod_macro",
"type": "macro_function"
}
]

Wyświetl plik

@ -0,0 +1 @@
#define multipler_macro(x,y) x*y

Wyświetl plik

@ -0,0 +1,11 @@
[
{
"args": [
"x",
"y"
],
"body": "(x * y)",
"name": "multipler_macro",
"type": "macro_function"
}
]

Wyświetl plik

@ -0,0 +1 @@
#define B(x,y) x+y

Wyświetl plik

@ -0,0 +1,11 @@
[
{
"args": [
"x",
"y"
],
"body": "(x + y)",
"name": "B",
"type": "macro_function"
}
]

Wyświetl plik

@ -0,0 +1 @@
#define funny(x) "funny" #x

Wyświetl plik

@ -0,0 +1,10 @@
[
{
"args": [
"x"
],
"body": "('funny' + x)",
"name": "funny",
"type": "macro_function"
}
]

Wyświetl plik

@ -0,0 +1 @@
#define minus_macro(x,y) x-y

Wyświetl plik

@ -0,0 +1,11 @@
[
{
"args": [
"x",
"y"
],
"body": "(x - y)",
"name": "minus_macro",
"type": "macro_function"
}
]

Wyświetl plik

@ -0,0 +1 @@
#define C(a,b,c) a?b:c

Wyświetl plik

@ -0,0 +1,12 @@
[
{
"args": [
"a",
"b",
"c"
],
"body": "a and b or c",
"name": "C",
"type": "macro_function"
}
]

Wyświetl plik

@ -0,0 +1,6 @@
struct foo
{
int a;
int b;
int c;
};

Wyświetl plik

@ -0,0 +1,79 @@
[
{
"fields": [
{
"ctype": {
"errors": [],
"longs": 0,
"name": "int",
"signed": true
},
"name": "a"
},
{
"ctype": {
"errors": [],
"longs": 0,
"name": "int",
"signed": true
},
"name": "b"
},
{
"ctype": {
"errors": [],
"longs": 0,
"name": "int",
"signed": true
},
"name": "c"
}
],
"name": "foo",
"type": "struct"
},
{
"ctype": {
"anonymous": false,
"errors": [],
"members": [
[
"a",
{
"errors": [],
"longs": 0,
"name": "int",
"signed": true
}
],
[
"b",
{
"errors": [],
"longs": 0,
"name": "int",
"signed": true
}
],
[
"c",
{
"errors": [],
"longs": 0,
"name": "int",
"signed": true
}
]
],
"opaque": false,
"src": [
"/home/rrt/repo/ctypesgen/test/test-headers/struct.h",
1
],
"tag": "foo",
"variety": "struct"
},
"name": "foo",
"type": "typedef"
}
]

Wyświetl plik

@ -0,0 +1,20 @@
#!/bin/bash
# Test JSON parser on header fragments
# Reuben Thomas 26th September 2011
# This program is in the public domain.
errs=0
for i in test-headers/*.h; do
../ctypesgen.py --output-language=json $i > test.json 2> /dev/null
echo $i
diff ${i%%.h}.json test.json
errs=$[ $errs + $? ]
done
if test $errs = "0"; then
echo "All tests passed"
exit 0
else
echo "$errs errors"
exit 1
fi

Wyświetl plik

@ -0,0 +1,326 @@
#!/usr/bin/env python
# -*- coding: ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
"""Simple test suite using unittest.
By clach04 (Chris Clark).
Calling:
python test/testsuite.py
or
cd test
./testsuite.py
Could use any unitest compatible test runner (nose, etc.)
Aims to test for regressions. Where possible use stdlib to
avoid the need to compile C code.
Known to run clean with:
* 32bit Linux (python 2.5.2, 2.6)
* 32bit Windows XP (python 2.4, 2.5, 2.6.1)
"""
import sys
import os
import ctypes
import math
import unittest
import logging
test_directory = os.path.abspath(os.path.dirname(__file__))
sys.path.append(test_directory)
sys.path.append(os.path.join(test_directory, '..'))
import ctypesgentest # TODO consider moving test() from ctypesgentest into this module
class StdlibTest(unittest.TestCase):
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = '#include <stdlib.h>\n'
if sys.platform == "win32":
# pick something from %windir%\system32\msvc*dll that include stdlib
libraries = ["msvcrt.dll"]
libraries = ["msvcrt"]
elif sys.platform.startswith("linux"):
libraries = ["libc.so.6"]
else:
libraries = ["libc"]
self.module, output = ctypesgentest.test(header_str, libraries=libraries, all_headers=True)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_getenv_returns_string(self):
"""Issue 8 - Regression for crash with 64 bit and bad strings on 32 bit.
See http://code.google.com/p/ctypesgen/issues/detail?id=8
Test that we get a valid (non-NULL, non-empty) string back
"""
module = self.module
if sys.platform == "win32":
# Check a variable that is already set
env_var_name = 'USERNAME' # this is always set (as is windir, ProgramFiles, USERPROFILE, etc.)
expect_result = os.environ[env_var_name]
self.assert_(expect_result, 'this should not be None or empty')
# reason for using an existing OS variable is that unless the
# MSVCRT dll imported is the exact same one that Python was
# built with you can't share structures, see
# http://msdn.microsoft.com/en-us/library/ms235460.aspx
# "Potential Errors Passing CRT Objects Across DLL Boundaries"
else:
env_var_name = 'HELLO'
os.environ[env_var_name] = 'WORLD' # This doesn't work under win32
expect_result = 'WORLD'
result = module.getenv(env_var_name)
self.failUnlessEqual(expect_result, result)
def test_getenv_returns_null(self):
"""Related to issue 8. Test getenv of unset variable.
"""
module = self.module
env_var_name = 'NOT SET'
expect_result = None
try:
# ensure variable is not set, ignoring not set errors
del os.environ[env_var_name]
except KeyError:
pass
result = module.getenv(env_var_name)
self.failUnlessEqual(expect_result, result)
class StdBoolTest(unittest.TestCase):
"Test correct parsing and generation of bool type"
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = '''
#include <stdbool.h>
struct foo
{
bool is_bar;
int a;
};
'''
self.module, _ = ctypesgentest.test(header_str)#, all_headers=True)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_stdbool_type(self):
"""Test is bool is correctly parsed"""
module = self.module
struct_foo = module.struct_foo
self.failUnlessEqual(struct_foo._fields_, [("is_bar", ctypes.c_bool), ("a", ctypes.c_int)])
class SimpleMacrosTest(unittest.TestCase):
"""Based on simple_macros.py
"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = '''
#define A 1
#define B(x,y) x+y
#define C(a,b,c) a?b:c
#define funny(x) "funny" #x
#define multipler_macro(x,y) x*y
#define minus_macro(x,y) x-y
#define divide_macro(x,y) x/y
#define mod_macro(x,y) x%y
'''
libraries = None
self.module, output = ctypesgentest.test(header_str)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_macro_constant_int(self):
"""Tests from simple_macros.py
"""
module = self.module
self.failUnlessEqual(module.A, 1)
def test_macro_addition(self):
"""Tests from simple_macros.py
"""
module = self.module
self.failUnlessEqual(module.B(2, 2), 4)
def test_macro_ternary_true(self):
"""Tests from simple_macros.py
"""
module = self.module
self.failUnlessEqual(module.C(True, 1, 2), 1)
def test_macro_ternary_false(self):
"""Tests from simple_macros.py
"""
module = self.module
self.failUnlessEqual(module.C(False, 1, 2), 2)
def test_macro_ternary_true_complex(self):
"""Test ?: with true, using values that can not be confused between True and 1
"""
module = self.module
self.failUnlessEqual(module.C(True, 99, 100), 99)
def test_macro_ternary_false_complex(self):
"""Test ?: with false, using values that can not be confused between True and 1
"""
module = self.module
self.failUnlessEqual(module.C(False, 99, 100), 100)
def test_macro_string_compose(self):
"""Tests from simple_macros.py
"""
module = self.module
self.failUnlessEqual(module.funny("bunny"), "funnybunny")
def test_macro_math_multipler(self):
module = self.module
x, y = 2, 5
self.failUnlessEqual(module.multipler_macro(x, y), x * y)
def test_macro_math_minus(self):
module = self.module
x, y = 2, 5
self.failUnlessEqual(module.minus_macro(x, y), x - y)
def test_macro_math_divide(self):
module = self.module
x, y = 2, 5
self.failUnlessEqual(module.divide_macro(x, y), x / y)
def test_macro_math_mod(self):
module = self.module
x, y = 2, 5
self.failUnlessEqual(module.mod_macro(x, y), x % y)
class StructuresTest(unittest.TestCase):
"""Based on structures.py
"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = '''
struct foo
{
int a;
int b;
int c;
};
'''
libraries = None
self.module, output = ctypesgentest.test(header_str)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_structures(self):
"""Tests from structures.py
"""
module = self.module
struct_foo = module.struct_foo
self.failUnlessEqual(struct_foo._fields_, [("a", ctypes.c_int), ("b", ctypes.c_int), ("c", ctypes.c_int)])
class MathTest(unittest.TestCase):
"""Based on math_functions.py"""
def setUp(self):
"""NOTE this is called once for each test* method
(it is not called once per class).
FIXME This is slightly inefficient as it is called *way* more times than it needs to be.
"""
header_str = '#include <math.h>\n'
if sys.platform == "win32":
# pick something from %windir%\system32\msvc*dll that include stdlib
libraries = ["msvcrt.dll"]
libraries = ["msvcrt"]
elif sys.platform.startswith("linux"):
libraries = ["libm.so.6"]
else:
libraries = ["libc"]
self.module, output = ctypesgentest.test(header_str, libraries=libraries, all_headers=True)
def tearDown(self):
del self.module
ctypesgentest.cleanup()
def test_sin(self):
"""Based on math_functions.py"""
module = self.module
self.failUnlessEqual(module.sin(2), math.sin(2))
def test_sqrt(self):
"""Based on math_functions.py"""
module = self.module
self.failUnlessEqual(module.sqrt(4), 2)
def local_test():
module.sin("foobar")
self.failUnlessRaises(ctypes.ArgumentError, local_test)
def test_bad_args_string_not_number(self):
"""Based on math_functions.py"""
module = self.module
def local_test():
module.sin("foobar")
self.failUnlessRaises(ctypes.ArgumentError, local_test)
def main(argv=None):
if argv is None:
argv = sys.argv
ctypesgentest.ctypesgencore.messages.log.setLevel(logging.CRITICAL) # do not log anything
unittest.main()
return 0
if __name__ == "__main__":
sys.exit(main())

Wyświetl plik

@ -0,0 +1,2 @@
1. Convert defines from "errno.h" into imports from the Python errno module.
2. Search through code for "XXX" and see what can be done.

Wyświetl plik

@ -34,24 +34,11 @@
#endif
/****************************//* times_two_tc *//****************************/
/* The simplest test case. ever. Used to check for sanity */
/***************************** test cases *******************************/
/* Parameters in */
struct times_two_tc_params {
int input;
} times_two_tc_params;
/* Results out */
struct times_two_tc_results {
int result;
} times_two_tc_results;
/* Function */
__verification__ void times_two_tc(void) {
#include "times_two.h"
times_two_tc_results.result = 2 * times_two_tc_params.input;
}
/*******************************//* tc_main *//********************************/
/******************************* tc_main ********************************/
typedef void (*tc_ptr_type)(void);
volatile tc_ptr_type tc_ptr;
@ -60,8 +47,6 @@ volatile tc_ptr_type tc_ptr;
* Runs a test case
*/
__verification__ void tc_run() {
(*tc_ptr)();
}

Wyświetl plik

@ -0,0 +1,9 @@
# Import every module in this subdirectory
import os
import glob
modules = glob.glob(os.path.dirname(__file__)+"/*.py")
__all__ = [ os.path.basename(f)[:-3] for f in modules if not f.endswith('__init__.py')]
del os
del glob
del f
del modules

Wyświetl plik

@ -0,0 +1,20 @@
#ifndef __verification__
#define __verification__
#endif
/****************************//* times_two_tc *//****************************/
/* The simplest test case. ever. Used to check for sanity */
/* Parameters in */
struct times_two_tc_params {
int input;
} times_two_tc_params;
/* Results out */
struct times_two_tc_results {
int result;
} times_two_tc_results;
/* Function */
__verification__ void times_two_tc(void) {
times_two_tc_results.result = 2 * times_two_tc_params.input;
}

Wyświetl plik

@ -4,12 +4,9 @@
# Imports
# ------------------------------------------------------------------------------
import os
import sys
sys.path.append("./tools/verification")
from verification import *
import verification_tc
sys.path.append("./test")
import main
from random import randint
@ -25,12 +22,12 @@ class times_two_tc:
def get_test(self):
"""Returns some suitable test parameters"""
params = verification_tc.struct_times_two_tc_params()
params = main.struct_times_two_tc_params()
params.input = randint(0, 10000)
return params
def is_correct(self, params, result):
def is_correct(self, params, result, print_info):
"""Returns if a result is correct for the given parameters"""
print_info("%d * 2 = %d"%(params.input, result['result']))
@ -39,12 +36,3 @@ class times_two_tc:
return True
else:
return False
# ------------------------------------------------------------------------------
# Run test
# ------------------------------------------------------------------------------
if __name__ == "__main__":
tester = samd20_test()
tester.run_test_case(times_two_tc())
del tester

Wyświetl plik

@ -0,0 +1,198 @@
#!/usr/bin/env python
# ------------------------------------------------------------------------------
# Verification Framework
# ------------------------------------------------------------------------------
from __future__ import print_function
import gdb
import re
import sys
import importlib
from time import *
from colorama import *
sys.path.append("./test")
import tc
from tc import *
LINE_LENGTH = 80
class Tests():
def printf(self, string):
"""All writes go to stderr"""
print (string, file=sys.stderr)
def print_info(self, string):
"""Prints an info line"""
self.printf(Fore.CYAN + "INFO " + Fore.RESET + string)
def print_good(self, string):
"""Prints an good line"""
self.printf(Fore.GREEN + "GOOD " + Fore.RESET + string)
def print_error(self, string):
"""Prints an error line"""
self.printf(Fore.RED + "ERROR " + Fore.RESET + string)
def print_centre(self, string):
"""Prints something in the centre of the line"""
count = (LINE_LENGTH - len(string)) / 2
self.printf ((" " * count) + string)
def print_header_line(self):
"""Prints a yellow line. Yo"""
self.printf (Fore.YELLOW + ("*" * LINE_LENGTH) + Fore.RESET)
def print_header(self, string):
"""Prints a pretty header"""
self.printf ("")
self.print_header_line()
self.print_centre(string)
self.print_header_line()
def print_pass(self, tc_name, time):
"""Nice green pass notice"""
offset = (LINE_LENGTH / 2) - len(tc_name)
self.printf("")
self.printf(Fore.GREEN + " " + tc_name + " - PASS" \
+ (" " * offset) + "CLOCKS = " + str(time) + Fore.RESET)
def print_fail(self, tc_name, time):
"""Evil red pass notice"""
offset = (LINE_LENGTH / 2) - len(tc_name)
self.printf("")
self.printf(Fore.RED + " p " + tc_name + "- FAIL" \
+ (" " * offset) + "CLOCKS = " + str(time) + Fore.RESET)
def print_summary(self, results):
passes = 0
for result in results:
(passed) = result
if passed:
passes += 1
self.print_header("SUMMARY - %d%% PASS"%(100*passes/len(results)))
for result in results:
(passed, name, time) = result
if passed:
self.print_pass(name, time)
else:
self.print_fail(name, time)
self.print_header("")
#### GDB
def __init__(self):
self.inferior = gdb.selected_inferior()
# Pagination is not helpful here
gdb.execute("set pagination off")
# Connect to our target
gdb.execute("att 1")
# Load everything into gdb and run
gdb.execute("load")
gdb.execute("b main")
gdb.execute("run")
# Stopped at the top of main. Go to tc_main
gdb.execute("del 1")
gdb.execute("b tc_main")
gdb.execute("set $pc=tc_main")
gdb.execute("c")
def __del__(self):
self.print_info("quit")
gdb.execute("quit")
def hw_run_tc(self, tc_name, parameters):
"""Runs a test case on hardware"""
# If we're stopped where we'd expect
if self.read_variable("($pc == tc_main + 4)"):
# Write the parameters
self.write_varible(tc_name+"_params", parameters)
# Presuming there"s a breakpoint at the top of tc_main
gdb.execute("set tc_ptr="+tc_name+"+1")
gdb.execute("c")
# Test case done. Return results
return self.read_variable(tc_name+"_results")
else:
return None
def hw_get_last_time(self):
"""Returns the number of clocks the last test case took"""
return 1
#### Read / Write
def read_variable(self, name):
gdb.execute("p " + name, to_string=True)
return gdb.history(0)
def write_varible(self, name, value):
pvar = self.read_variable(name)
self.inferior.write_memory(pvar.address, value)
#### Test Case
def run_test_case(self, test_case):
tc_name = test_case.__class__.__name__
self.print_header(tc_name)
fail = False
ttime = 0
if hasattr(test_case, 'iterations'):
for i in range(test_case.iterations):
params = test_case.get_test()
result = self.hw_run_tc(tc_name, params)
ttime += self.hw_get_last_time()
if result:
if not test_case.is_correct(params, result, self.print_info):
fail = True
break
else: # No result, Failure
fail = True
else:
params = test_case.get_test()
while (params):
result = self.hw_run_tc(tc_name, params)
ttime += self.hw_get_last_time()
if result:
if not test_case.is_correct(params, result, self.print_info):
fail = True
break
else: # No result, Failure
fail = True
params = test_case.get_test()
if not fail:
self.print_pass(tc_name, ttime)
else:
self.print_fail(tc_name, ttime)
# Return data tuple
return (not fail, tc_name, ttime)
def get_testcase_from_name(self, name):
tc_module = importlib.import_module('tc.'+name)
return getattr(tc_module, name+'_tc')
def print_testcases(self):
for tc_name in tc.__all__:
self.print_header(tc_name)
# ------------------------------------------------------------------------------
# Entry Point
# ------------------------------------------------------------------------------
if __name__ == '__main__':
t = Tests()
#t.print_testcases()
t.run_test_case(t.get_testcase_from_name('times_two')())
del t

Wyświetl plik

@ -1,71 +0,0 @@
#!/bin/bash
# ------------------------------------------------------------------------------
# Get arm-none-eabi-gdb with python support
# Should take about 15 minutes
# ------------------------------------------------------------------------------
mkdir gdb-build
cd gdb-build
# Grab the pre-requisites
sudo apt-get install apt-src \
gawk \
gzip \
perl \
autoconf \
m4 \
automake \
libtool \
libncurses5-dev \
gettext \
gperf \
dejagnu \
expect \
tcl \
autogen \
flex \
flip \
bison \
tofrodos \
texinfo \
g++ \
gcc-multilib \
libgmp3-dev \
libmpfr-dev \
debhelper \
texlive \
texlive-extra-utils
# Grab the sources - UPDATE WITH LATEST SOURCES
#wget https://launchpad.net/gcc-arm-embedded/4.8/4.8-2014-q2-update/+download/gcc-arm-none-eabi-4_8-2014q2-20140609-src.tar.bz2
# Extract
tar xjf gcc*
cd gcc*
# Extract gdb
cd src
tar xf gdb*
cd gdb*
# Configure
host_arch=`uname -m | sed 'y/XI/xi/'`
PKGVERSION="GNU Tools for ARM Embedded Processors --with-python=yes"
./configure --target=arm-none-eabi \
--disable-nls \
--disable-sim \
--with-libexpat \
--with-python=yes \
--with-lzma=no \
--build=$host_arch-linux-gnu --host=$host_arch-linux-gnu \
--with-pkgversion="$PKGVERSION"
# Make (j = Ncores + 1)
make -j3
sudo make install
# Cleanup
cd ../../../..
#sudo rm -rf gdb-build

Wyświetl plik

@ -1,59 +0,0 @@
#!/usr/bin/env python
# ------------------------------------------------------------------------------
# Imports
# ------------------------------------------------------------------------------
import os
import sys
sys.path.append("./tools/verification")
from verification import *
import verification_tc
# ------------------------------------------------------------------------------
# Test Script
# ------------------------------------------------------------------------------
class times_two_tc:
def __init__(self):
self.name = self.__class__.__name__
self.iterations = 20
def get_test(self):
"""Returns some suitable test parameters"""
params = verification_tc.struct_times_two_tc_params()
params.input = randint(0, 10000)
return params
def is_correct(self, params, result):
"""Returns if a result is correct for the given parameters"""
print_info("%d * 2 = %d"%(params.input, result['result']))
if (params.input * 2 == result['result']):
return True
else:
return False
# ------------------------------------------------------------------------------
# Run test
# ------------------------------------------------------------------------------
sys.path.append("./tools/verification/tc")
tester = samd20_test()
results = []
# Times Two
import times_two
results.append(tester.run_test_case(times_two.times_two_tc()))
# Summary
tester.print_summary(results)
# Clean Up
del tester

Wyświetl plik

@ -1,57 +0,0 @@
$PSRFTXTVersion GSW3.2.1PAT_3.1.00.12-SDK001P1.00c *3F
$PSRFTXTHTC GPS_ART_321000_GEN*20
$PSRFTXTTOW: 423546*3B
$PSRFTXTWK: 1412*4F
$PSRFTXTPOS: 1533096 -4464909 4274442*2A
$PSRFTXTCLK: 94810*05
$PSRFTXTCHNL: 12*5F
$PSRFTXTBaud rate: 57600 *51
$GPGGA,213912.270,,,,,0,00,,,M,0.0,M,,0000*59
$GPGSA,A,1,,,,,,,,,,,,,,,*1E
$GPRMC,213912.270,V,,,,,,,010207,,,N*46
$GPGGA,213913.211,,,,,0,00,,,M,0.0,M,,0000*5F
$GPGSA,A,1,,,,,,,,,,,,,,,*1E
$GPRMC,213913.211,V,,,,,,,010207,,,N*40
$GPGGA,213914.200,,,,,0,00,,,M,0.0,M,,0000*58
$GPGSA,A,1,,,,,,,,,,,,,,,*1E
$GPRMC,213914.200,V,,,,,,,010207,,,N*47
$GPGGA,213915.196,,,,,0,00,,,M,0.0,M,,0000*55
$GPGSA,A,1,,,,,,,,,,,,,,,*1E
$GPRMC,213915.196,V,,,,,,,010207,,,N*4A
$GPGGA,213916.199,4221.0377,N,07102.9778,W,1,03,13.4,-32.4,M,-33.7,M,,0000*45
$GPGSA,A,2,26,07,06,,,,,,,,,,13.4,13.4,1.0*37
$GPGSV,3,1,11,26,55,180,28,06,46,258,33,07,43,272,30,21,22,304,*77
$GPGSV,3,2,11,24,83,061,,29,64,170,,10,45,056,,02,23,121,*7F
$GPGSV,3,3,11,08,17,067,,27,10,041,,18,05,257,*4B
$GPRMC,213916.199,A,4221.0377,N,07102.9778,W,0.00,,010207,,,A*6A
$GPGGA,213917.199,4221.0510,N,07102.9549,W,1,04,3.9,-65.2,M,-33.7,M,,0000*7C
$GPGSA,A,3,26,07,06,21,,,,,,,,,4.1,3.9,1.0*3A
$GPRMC,213917.199,A,4221.0510,N,07102.9549,W,0.23,175.43,010207,,,A*77
$GPGGA,213918.199,4221.0853,N,07102.9382,W,1,04,3.9,50.0,M,-33.7,M,,0000*51
$GPGSA,A,3,26,07,06,21,,,,,,,,,4.1,3.9,1.0*3A
$GPRMC,213918.199,A,4221.0853,N,07102.9382,W,0.95,326.55,010207,,,A*7D
$GPGGA,213919.000,4221.0975,N,07102.9300,W,1,04,3.9,86.2,M,-33.7,M,,0000*57
$GPGSA,A,3,26,07,06,21,,,,,,,,,4.1,3.9,1.0*3A
$GPRMC,213919.000,A,4221.0975,N,07102.9300,W,0.55,332.53,010207,,,A*7D
$GPGGA,213920.000,4221.1129,N,07102.9146,W,1,04,3.9,129.7,M,-33.7,M,,0000*6C
$GPGSA,A,3,26,07,06,21,,,,,,,,,4.1,3.9,1.0*3A
$GPRMC,213920.000,A,4221.1129,N,07102.9146,W,0.00,,010207,,,A*6D
$GPGGA,213921.000,4221.1129,N,07102.9146,W,1,04,3.9,129.7,M,-33.7,M,,0000*6D
$GPGSA,A,3,26,07,06,21,,,,,,,,,4.1,3.9,1.0*3A
$GPGSV,3,1,11,26,55,180,29,06,46,258,26,07,43,272,33,21,22,304,26*75
$GPGSV,3,2,11,24,83,061,,29,64,170,,10,45,056,,02,23,121,*7F
$GPGSV,3,3,11,08,17,067,28,27,10,041,,18,05,257,*41
$GPRMC,213921.000,A,4221.1129,N,07102.9146,W,0.00,,010207,,,A*6C
$GPGGA,213922.000,4221.1129,N,07102.9146,W,1,04,3.9,129.7,M,-33.7,M,,0000*6E
$GPGSA,A,3,26,07,06,21,,,,,,,,,4.1,3.9,1.0*3A
$GPRMC,213922.000,A,4221.1129,N,07102.9146,W,0.00,,010207,,,A*6F
$GPGGA,213923.000,4221.1129,N,07102.9146,W,1,04,3.9,129.7,M,-33.7,M,,0000*6F
$GPGSA,A,3,26,07,06,21,,,,,,,,,4.1,3.9,1.0*3A
$GPRMC,213923.000,A,4221.1129,N,07102.9146,W,0.00,,010207,,,A*6E
$GPGGA,213924.000,4221.1129,N,07102.9146,W,1,04,3.9,129.7,M,-33.7,M,,0000*68
$GPGSA,A,3,26,07,06,21,,,,,,,,,4.0,3.9,1.0*3B
$GPRMC,213924.000,A,4221.1129,N,07102.9146,W,0.00,,010207,,,A*69
$GPGGA,213925.000,4221.1129,N,07102.9146,W,1,04,3.9,129.7,M,-33.7,M,,0000*69
$GPGSA,A,3,26,07,06,21,,,,,,,,,4.0,3.9,1.0*3B
$GPRMC,213925.000,A,4221.1129,N,07102.9146,W,0.00,,010207,,,A*68
$GPGGA,213926.000,4221.1112,N,07102.9177,W,1,04,3.9,136.5,M,-33.7,M,,0000*6C

Wyświetl plik

@ -1,120 +0,0 @@
#!/usr/bin/env python
# ------------------------------------------------------------------------------
# Imports
# ------------------------------------------------------------------------------
import os
import sys
sys.path.append("./tools/verification")
from verification import *
import verification_tc
from random import randint
from pynmea.streamer import NMEAStream
# ------------------------------------------------------------------------------
# Test Script
# ------------------------------------------------------------------------------
class nmea_tc:
def __init__(self):
self.name = self.__class__.__name__
with open("tools/verification/tc/misc/gpslog.txt") as f:
self.lines = f.readlines()
self.index = 0
def get_test(self):
"""Returns some suitable test parameters"""
try:
line_string = self.lines[self.index]
params = verification_tc.struct_nmea_tc_params()
params.buff = line_string
return params
except:
return None
def is_correct(self, params, result):
"""Returns if a result is correct for the given parameters"""
nmeastreamer = NMEAStream()
pynmea_obj = nmeastreamer.get_objects(data=params.buff)
pynmea_obj = nmeastreamer.get_objects(data=params.buff)
try:
pynmea = pynmea_obj[0]
except:
pynmea = None
if pynmea:
#print_info(str(params.buff))
#print_info(str(pynmea))
if hasattr(pynmea, 'latitude') and hasattr(pynmea, 'longitude'):
# GPGGA frame
print_info("%s::: lat:%s,lon:%s,alt:%s"%
(str(pynmea),
pynmea.latitude,
pynmea.longitude,
pynmea.antenna_altitude
))
# Check lat/lon/alt
if pynmea.latitude:
lat = float(pynmea.latitude)
lat *= 1 if (pynmea.lat_direction == 'N') else -1
if lat != float(result['lat']):
print_error("Latitude %f != %f"%
(result['lat'], lat))
return False
else:
print_good("Latitude %f == %f"%
(result['lat'], lat))
if pynmea.longitude:
lon = float(pynmea.longitude)
lon *= 1 if (pynmea.lon_direction == 'E') else -1
if lon != float(result['lon']):
print_error("Longitude %f != %f"%
(result['lon'], lon))
return False
else:
print_good("Longitude %f == %f"%
(result['lon'], lon))
if pynmea.antenna_altitude:
alt = float(pynmea.antenna_altitude)
if alt != float(result['elv']):
print_error("Altitude %f != %f"%
(result['elv'], lon))
return False
else:
print_good("Altitude %f == %f"%
(result['elv'], alt))
# Move on the the next line
self.index += 1
return True
# ------------------------------------------------------------------------------
# Run test
# ------------------------------------------------------------------------------
#if __name__ == "__main__":
tester = samd20_test()
tester.run_test_case(nmea_tc())
if __name__ == "__main__":
del tester

Wyświetl plik

@ -1,206 +0,0 @@
#!/usr/bin/env python
# ------------------------------------------------------------------------------
# Verification Framework
# ------------------------------------------------------------------------------
# This script is inteneded to be run from inside gdb, with gdb already
# attached to the target - probably this is done in .gdbinit.
#
# You should be running gdb like this:
#
# arm-none-eabi-gdb -q -x tools/verification/tc/my_tc.py
#
#
# The useful output from this script appears on stderror, sorry about
# that. Hence you should supress stdout if you don't want loads of
# info from gdb.
#
# Something like this should work:
# > /dev/null arm-none-eabi-gdb -q -x tools/verfication/tc/my_tc.py
#
# ------------------------------------------------------------------------------
from __future__ import print_function
import gdb
import re
import sys
from time import *
from colorama import *
LINE_LENGTH = 80
##### Public Print utilities
def printf(string):
"""All writes go to stderr"""
print (string, file=sys.stderr)
def print_info(string):
"""Prints an info line"""
printf("")
printf(Fore.CYAN + "INFO " + Fore.RESET + string)
def print_good(string):
"""Prints an good line"""
printf(Fore.GREEN + "GOOD " + Fore.RESET + string)
def print_error(string):
"""Prints an error line"""
printf(Fore.RED + "ERROR " + Fore.RESET + string)
##### Tester
class samd20_test:
def print_centre(self, string):
"""Prints something in the centre of the line"""
count = (LINE_LENGTH - len(string)) / 2
printf ((" " * count) + string)
def print_header_line(self):
"""Prints a yellow line. Yo"""
printf (Fore.YELLOW + ("*" * LINE_LENGTH) + Fore.RESET)
def print_header(self, string):
"""Prints a pretty header"""
printf ("")
self.print_header_line()
self.print_centre(string)
self.print_header_line()
def print_pass(self, tc_name, time):
"""Nice green pass notice"""
offset = (LINE_LENGTH / 2) - len(tc_name)
printf("")
printf(Fore.GREEN + " " + tc_name + " - PASS" \
+ (" " * offset) + "CLOCKS = " + str(time) + Fore.RESET)
def print_fail(self, tc_name, time):
"""Evil red pass notice"""
offset = (LINE_LENGTH / 2) - len(tc_name)
printf("")
printf(Fore.RED + " p " + tc_name + "- FAIL" \
+ (" " * offset) + "CLOCKS = " + str(time) + Fore.RESET)
def print_summary(self, results):
passes = 0
for result in results:
(passed) = result
if passed:
passes += 1
self.print_header("SUMMARY - %d%% PASS"%(100*passes/len(results)))
for result in results:
(passed, name, time) = result
if passed:
self.print_pass(name, time)
else:
self.print_fail(name, time)
self.print_header("")
#### GDB
def __init__(self):
self.inferior = gdb.selected_inferior()
# Pagination is not helpful here
gdb.execute("set pagination off")
# Load everything into gdb and run
gdb.execute("load")
gdb.execute("b main")
gdb.execute("run")
# Stopped at the top of main. Go to tc_main
gdb.execute("del 1")
gdb.execute("b tc_main")
gdb.execute("set $pc=tc_main")
gdb.execute("c")
def __del__(self):
print_info("quit")
gdb.execute("quit")
def hw_run_tc(self, tc_name, parameters):
"""Runs a test case on hardware"""
# If we're stopped where we'd expect
if self.read_variable("($pc == tc_main + 4)"):
# Write the parameters
self.write_varible(tc_name+"_params", parameters)
# Presuming there"s a breakpoint at the top of tc_main
gdb.execute("set tc_ptr="+tc_name+"+1")
gdb.execute("c")
# Test case done. Return results
return self.read_variable(tc_name+"_results")
else:
return None
def hw_get_last_time(self):
"""Returns the number of clocks the last test case took"""
return 1
#### Read / Write
def read_variable(self, name):
gdb.execute("p " + name, to_string=True)
return gdb.history(0)
def write_varible(self, name, value):
pvar = self.read_variable(name)
self.inferior.write_memory(pvar.address, value)
#### Test Case
def run_test_case(self, test_case):
tc_name = test_case.__class__.__name__
self.print_header(tc_name)
fail = False
ttime = 0
if hasattr(test_case, 'iterations'):
for i in range(test_case.iterations):
params = test_case.get_test()
result = self.hw_run_tc(tc_name, params)
ttime += self.hw_get_last_time()
if result:
if not test_case.is_correct(params, result):
fail = True
break
else: # No result, Failure
fail = True
else:
params = test_case.get_test()
while (params):
result = self.hw_run_tc(tc_name, params)
ttime += self.hw_get_last_time()
if result:
if not test_case.is_correct(params, result):
fail = True
break
else: # No result, Failure
fail = True
params = test_case.get_test()
if not fail:
self.print_pass(tc_name, ttime)
else:
self.print_fail(tc_name, ttime)
# Return data tuple
return (not fail, tc_name, ttime)

Wyświetl plik

@ -45,6 +45,8 @@
|BLM15BB100SN1D|MURATA - FERRITE BEAD, 0.1OHM, 300MA, 0402|[1515765](http://uk.farnell.com/webapp/wcs/stores/servlet/Search?st=1515765)|2|FB1, FB2
|CG0603MLC-05LE|BOURNS - ESD SUPPRESSOR, 0603, 5V, 0.5PF|[1828732](http://uk.farnell.com/webapp/wcs/stores/servlet/Search?st=1828732)|2|ESD1, ESD2
|55|KEYSTONE - BATTERY CLIP AAA|[1702644](http://uk.farnell.com/webapp/wcs/stores/servlet/Search?st=1702644)|2|J1, J1
|4276101511|VARTA - BATTERY, BUTTON CELL, 1.5V LR 44|[3055917](http://uk.farnell.com/webapp/wcs/stores/servlet/Search?st=3055917)|1|Battery
|2997|KEYSTONE - BATTERY HOLDER, 11.6MM COIN CELL, THT|[2293265](http://uk.farnell.com/webapp/wcs/stores/servlet/Search?st=2293265) [534-2997](http://uk.mouser.com/Search/Refine.aspx?N=1323043&Keyword=534-2997)|1|Single-piece battery holder
----
## Extras for Development Version TODO

Wyświetl plik

@ -45,6 +45,8 @@
1515765 2 FB1, FB2
1828732 2 ESD1, ESD2
1702644 2 J1, J1
3055917 1 Battery
2293265 534-2997 1 Single-piece battery holder
----
## Extras for Development Version TODO