Move LONG_BIT from intobject.c to pyport.h. #error if it's already been
#define'd to an unreasonable value (several recent gcc systems have misdefined it, causing bogus overflows in integer multiplication). Nuke CHAR_BIT entirely.
This commit is contained in:
parent
c85eb0bd4b
commit
d57731f74b
|
@ -374,6 +374,19 @@ typedef struct fd_set {
|
|||
#define LONG_MIN (-LONG_MAX-1)
|
||||
#endif
|
||||
|
||||
#ifndef LONG_BIT
|
||||
#define LONG_BIT (8 * SIZEOF_LONG)
|
||||
#endif
|
||||
|
||||
#if LONG_BIT != 8 * SIZEOF_LONG
|
||||
/* 04-Oct-2000 LONG_BIT is apparently (mis)defined as 64 on some recent
|
||||
* 32-bit platforms using gcc. We try to catch that here at compile-time
|
||||
* rather than waiting for integer multiplication to trigger bogus
|
||||
* overflows.
|
||||
*/
|
||||
#error "LONG_BIT definition appears wrong for platform (bad gcc config?)."
|
||||
#endif
|
||||
|
||||
#ifdef __NeXT__
|
||||
#ifdef __sparc__
|
||||
/*
|
||||
|
|
|
@ -4,14 +4,6 @@
|
|||
#include "Python.h"
|
||||
#include <ctype.h>
|
||||
|
||||
#ifndef CHAR_BIT
|
||||
#define CHAR_BIT 8
|
||||
#endif
|
||||
|
||||
#ifndef LONG_BIT
|
||||
#define LONG_BIT (CHAR_BIT * sizeof(long))
|
||||
#endif
|
||||
|
||||
long
|
||||
PyInt_GetMax(void)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue