mirror of
https://github.com/Mercury-Language/mercury.git
synced 2025-12-13 04:44:39 +00:00
Estimated hours taken: 5 More cleanup of the memory management code. This time we clean up the signal handler setup code. runtime/Mmakefile: Add new files. runtime/mercury_memory.c: Rename setup_signal() to setup_signals(). runtime/mercury_memory_handlers.c: runtime/mercury_memory_handlers.h: Clean up signal handling. Use MR_setup_signal to setup signal handlers. Define bus_handler and segv_handler signal handlers, the old signal handlers are just one or the other (or both). runtime/mercury_prof.c: Use MR_setup_signal to setup signal handler. runtime/mercury_signal.c: runtime/mercury_signal.h: New files -- a standard interface for setting up signal handlers (a porting base, if you like).
330 lines
7.3 KiB
C
330 lines
7.3 KiB
C
/*
|
|
** Copyright (C) 1994-1998 The University of Melbourne.
|
|
** This file may only be copied under the terms of the GNU Library General
|
|
** Public License - see the file COPYING.LIB in the Mercury distribution.
|
|
*/
|
|
|
|
/*
|
|
** This module defines the register array and data regions of the
|
|
** execution algorithm.
|
|
** They are defined together here to allow us to control how they map
|
|
** onto direct mapped caches.
|
|
** We allocate a large arena, preferably aligned on a boundary that
|
|
** is a multiple of both the page size and the primary cache size.
|
|
**
|
|
** We then allocate the heap and the stacks in such a way that
|
|
**
|
|
** the register array
|
|
** the bottom of the heap
|
|
** the bottom of the detstack
|
|
** the bottom of the nondstack
|
|
**
|
|
** all start at different offsets from multiples of the primary cache size.
|
|
** This should reduce cache conflicts (especially for small programs).
|
|
**
|
|
** If the operating system of the machine supports the mprotect syscall,
|
|
** we also protect a chunk at the end of each area against access,
|
|
** thus detecting area overflow.
|
|
**
|
|
** The code for handling the allocation and management of different
|
|
** memory zones is in mercury_memory_zones.{c,h}.
|
|
** The code for handling overflows and memory access errors in general
|
|
** is in mercury_memory_handlers.{c,h}.
|
|
*/
|
|
|
|
/*---------------------------------------------------------------------------*/
|
|
|
|
#include "mercury_imp.h"
|
|
|
|
#ifdef HAVE_SIGCONTEXT_STRUCT
|
|
/*
|
|
** Some versions of Linux call it struct sigcontext_struct, some call it
|
|
** struct sigcontext. The following #define eliminates the differences.
|
|
*/
|
|
#define sigcontext_struct sigcontext /* must be before #include <signal.h> */
|
|
|
|
/*
|
|
** On some systems (e.g. most versions of Linux) we need to #define
|
|
** __KERNEL__ to get sigcontext_struct from <signal.h>.
|
|
** This stuff must come before anything else that might include <signal.h>,
|
|
** otherwise the #define __KERNEL__ may not work.
|
|
*/
|
|
#define __KERNEL__
|
|
#include <signal.h> /* must come third */
|
|
#undef __KERNEL__
|
|
|
|
/*
|
|
** Some versions of Linux define it in <signal.h>, others define it in
|
|
** <asm/sigcontext.h>. We try both.
|
|
*/
|
|
#ifdef HAVE_ASM_SIGCONTEXT
|
|
#include <asm/sigcontext.h>
|
|
#endif
|
|
#else
|
|
#include <signal.h>
|
|
#endif
|
|
|
|
#include <unistd.h>
|
|
#include <stdio.h>
|
|
#include <string.h>
|
|
|
|
#ifdef HAVE_SYS_SIGINFO
|
|
#include <sys/siginfo.h>
|
|
#endif
|
|
|
|
#ifdef HAVE_MPROTECT
|
|
#include <sys/mman.h>
|
|
#endif
|
|
|
|
#ifdef HAVE_UCONTEXT
|
|
#include <ucontext.h>
|
|
#endif
|
|
|
|
#ifdef HAVE_SYS_UCONTEXT
|
|
#include <sys/ucontext.h>
|
|
#endif
|
|
|
|
#include "mercury_imp.h"
|
|
#include "mercury_trace.h"
|
|
#include "mercury_memory_handlers.h"
|
|
|
|
/*---------------------------------------------------------------------------*/
|
|
|
|
#if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)
|
|
#define getpagesize() sysconf(_SC_PAGESIZE)
|
|
#elif !defined(HAVE_GETPAGESIZE)
|
|
#define getpagesize() 8192
|
|
#endif
|
|
|
|
/*---------------------------------------------------------------------------*/
|
|
|
|
static void setup_mprotect(void);
|
|
|
|
#ifdef HAVE_SIGINFO
|
|
static bool try_munprotect(void *address, void *context);
|
|
static char *explain_context(void *context);
|
|
#endif /* HAVE_SIGINFO */
|
|
|
|
MemoryZone *detstack_zone;
|
|
MemoryZone *nondetstack_zone;
|
|
#ifndef CONSERVATIVE_GC
|
|
MemoryZone *heap_zone;
|
|
MemoryZone *solutions_heap_zone;
|
|
#endif
|
|
#ifdef MR_LOWLEVEL_DEBUG
|
|
MemoryZone *dumpstack_zone;
|
|
int dumpindex;
|
|
#endif
|
|
|
|
size_t unit;
|
|
size_t page_size;
|
|
|
|
void
|
|
init_memory(void)
|
|
{
|
|
/*
|
|
** Convert all the sizes are from kilobytes to bytes and
|
|
** make sure they are multiples of the page and cache sizes.
|
|
*/
|
|
|
|
page_size = getpagesize();
|
|
unit = max(page_size, pcache_size);
|
|
|
|
#ifdef CONSERVATIVE_GC
|
|
heap_zone_size = 0;
|
|
heap_size = 0;
|
|
solutions_heap_zone_size = 0;
|
|
solutions_heap_size = 0;
|
|
#else
|
|
heap_zone_size = round_up(heap_zone_size * 1024, unit);
|
|
heap_size = round_up(heap_size * 1024, unit);
|
|
solutions_heap_zone_size = round_up(solutions_heap_zone_size * 1024,
|
|
unit);
|
|
solutions_heap_size = round_up(solutions_heap_size * 1024, unit);
|
|
#endif
|
|
|
|
detstack_size = round_up(detstack_size * 1024, unit);
|
|
detstack_zone_size = round_up(detstack_zone_size * 1024, unit);
|
|
nondstack_size = round_up(nondstack_size * 1024, unit);
|
|
nondstack_zone_size = round_up(nondstack_zone_size * 1024, unit);
|
|
|
|
#ifdef MR_USE_TRAIL
|
|
trail_size = round_up(trail_size * 1024, unit);
|
|
trail_zone_size = round_up(trail_zone_size * 1024, unit);
|
|
#else
|
|
trail_size = 0;
|
|
trail_zone_size = 0;
|
|
#endif
|
|
|
|
/*
|
|
** If the zone sizes were set to something too big, then
|
|
** set them to a single unit.
|
|
*/
|
|
|
|
#ifndef CONSERVATIVE_GC
|
|
if (heap_zone_size >= heap_size) {
|
|
heap_zone_size = unit;
|
|
}
|
|
if (solutions_heap_zone_size >= solutions_heap_size) {
|
|
solutions_heap_zone_size = unit;
|
|
}
|
|
#endif
|
|
|
|
if (detstack_zone_size >= detstack_size) {
|
|
detstack_zone_size = unit;
|
|
}
|
|
|
|
if (nondstack_zone_size >= nondstack_size) {
|
|
nondstack_zone_size = unit;
|
|
}
|
|
|
|
#ifdef MR_USE_TRAIL
|
|
if (trail_zone_size >= trail_size) {
|
|
trail_zone_size = unit;
|
|
}
|
|
#endif
|
|
|
|
init_memory_arena();
|
|
init_zones();
|
|
setup_signals();
|
|
if (memdebug) debug_memory();
|
|
} /* end init_memory() */
|
|
|
|
void
|
|
init_heap(void)
|
|
{
|
|
#ifndef CONSERVATIVE_GC
|
|
heap_zone = create_zone("heap", 1, heap_size, next_offset(),
|
|
heap_zone_size, default_handler);
|
|
|
|
restore_transient_registers();
|
|
MR_hp = heap_zone->min;
|
|
save_transient_registers();
|
|
|
|
solutions_heap_zone = create_zone("solutions_heap", 1,
|
|
solutions_heap_size, next_offset(),
|
|
solutions_heap_zone_size, default_handler);
|
|
restore_transient_registers();
|
|
MR_sol_hp = solutions_heap_zone->min;
|
|
save_transient_registers();
|
|
|
|
#endif
|
|
|
|
#ifdef MR_LOWLEVEL_DEBUG
|
|
/*
|
|
** Create the dumpstack, used for debugging stack traces.
|
|
** Note that we can just make the dumpstack the same size as
|
|
** the detstack and we never have to worry about the dumpstack
|
|
** overflowing.
|
|
*/
|
|
|
|
dumpstack_zone = create_zone("dumpstack", 1, detstack_size,
|
|
next_offset(), detstack_zone_size, default_handler);
|
|
#endif
|
|
} /* end init_heap() */
|
|
|
|
|
|
#ifdef CONSERVATIVE_GC
|
|
|
|
void *
|
|
allocate_bytes(size_t numbytes)
|
|
{
|
|
void *tmp;
|
|
|
|
#ifdef PARALLEL
|
|
if (numprocs > 1) {
|
|
fatal_error("shared memory not supported (yet)");
|
|
}
|
|
#endif
|
|
|
|
tmp = GC_MALLOC(numbytes);
|
|
|
|
if (tmp == NULL) {
|
|
fatal_error("could not allocate memory");
|
|
}
|
|
|
|
return tmp;
|
|
}
|
|
|
|
#elif defined(PARALLEL)
|
|
|
|
#error "shared memory not implemented"
|
|
|
|
#else /* not CONSERVATIVE_GC && not PARALLEL */
|
|
|
|
void *
|
|
allocate_bytes(size_t numbytes)
|
|
{
|
|
void *tmp;
|
|
|
|
#ifdef PARALLEL
|
|
if (numprocs > 1) {
|
|
fatal_error("shared memory not supported (yet)");
|
|
}
|
|
#endif
|
|
|
|
tmp = malloc(numbytes);
|
|
|
|
if (tmp == NULL) {
|
|
fatal_error("could not allocate memory");
|
|
}
|
|
|
|
return tmp;
|
|
}
|
|
|
|
#endif
|
|
|
|
void
|
|
deallocate_memory(void *ptr)
|
|
{
|
|
#ifdef CONSERVATIVE_GC
|
|
#ifdef PARALLEL
|
|
if (numprocs > 1) {
|
|
fatal_error("shared memory not supported");
|
|
}
|
|
#endif
|
|
GC_FREE(ptr);
|
|
|
|
#else
|
|
#ifdef PARALLEL
|
|
if (numprocs > 1) {
|
|
fatal_error("shared memory not supported");
|
|
}
|
|
#endif
|
|
free(ptr);
|
|
#endif
|
|
}
|
|
|
|
|
|
/* Note: checked_malloc()ed structures */
|
|
/* never contain pointers into GCed */
|
|
/* memory, so we don't need to */
|
|
/* GC_malloc() them. (cf. newmem()) */
|
|
void *
|
|
checked_malloc(size_t n)
|
|
{
|
|
reg void *p;
|
|
|
|
p = malloc(n);
|
|
if (p == NULL && n != 0) {
|
|
fatal_error("ran out of memory");
|
|
}
|
|
|
|
return p;
|
|
}
|
|
|
|
|
|
void *
|
|
checked_realloc(void *old, size_t n)
|
|
{
|
|
reg void *p;
|
|
|
|
p = realloc(old, n);
|
|
if (p == NULL && n != 0) {
|
|
fatal_error("ran out of memory");
|
|
}
|
|
|
|
return p;
|
|
}
|
|
|