[kaffe] CVS kaffe (robilad): Fixed remaining ABORT and EXIT ranaming

Kaffe CVS cvs-commits at kaffe.org
Sat May 7 02:00:52 PDT 2005


PatchSet 6447 
Date: 2005/05/07 08:55:39
Author: robilad
Branch: HEAD
Tag: (none) 
Log:
Fixed remaining ABORT and EXIT ranaming

2005-05-07  Dalibor Topic  <robilad at kaffe.org>

        * kaffe/kaffevm/systems/unix-jthreads/signal.c (setupSigAltStack),
        kaffe/kaffevm/systems/unix-jthreads/jthread.c (restore_fds_and_exit)
        kaffe/kaffevm/systems/oskit-pthreads/pjthread.c (remove_thread),
        kaffe/jvmpi/jvmpi_kaffe.c (jvmpiProfilerExit),
        config/mips/netbsd1/md.c (sysdepCallMethod),
        kaffe/kaffevm/intrp/icode.h (softcall_breakpoint),
        libraries/clib/net/InetAddressImpl.c (java_net_VMInetAddress_getLocalHostname),
        test/internal/jit_stub.c (main): Renamed ABORT and EXIT to
        KAFFEVM_ABORT and KAFFEVM_EXIT.

Members: 
	ChangeLog:1.3975->1.3976 
	config/mips/netbsd1/md.c:1.5->1.6 
	kaffe/jvmpi/jvmpi_kaffe.c:INITIAL->1.10 
	kaffe/kaffevm/intrp/icode.h:INITIAL->1.24 
	kaffe/kaffevm/systems/oskit-pthreads/pjthread.c:1.12->1.13 
	kaffe/kaffevm/systems/unix-jthreads/jthread.c:1.134->1.135 
	kaffe/kaffevm/systems/unix-jthreads/signal.c:INITIAL->1.32 
	libraries/clib/net/InetAddressImpl.c:1.30->1.31 

Index: kaffe/ChangeLog
diff -u kaffe/ChangeLog:1.3975 kaffe/ChangeLog:1.3976
--- kaffe/ChangeLog:1.3975	Sat May  7 08:06:26 2005
+++ kaffe/ChangeLog	Sat May  7 08:55:39 2005
@@ -1,3 +1,15 @@
+2005-05-07  Dalibor Topic  <robilad at kaffe.org>
+
+	* kaffe/kaffevm/systems/unix-jthreads/signal.c (setupSigAltStack),
+	kaffe/kaffevm/systems/unix-jthreads/jthread.c (restore_fds_and_exit)
+	kaffe/kaffevm/systems/oskit-pthreads/pjthread.c (remove_thread),
+	kaffe/jvmpi/jvmpi_kaffe.c (jvmpiProfilerExit),
+	config/mips/netbsd1/md.c (sysdepCallMethod),
+	kaffe/kaffevm/intrp/icode.h (softcall_breakpoint),
+	libraries/clib/net/InetAddressImpl.c (java_net_VMInetAddress_getLocalHostname),
+	test/internal/jit_stub.c (main): Renamed ABORT and EXIT to
+	KAFFEVM_ABORT and KAFFEVM_EXIT.
+
 2005-05-07  Guilhem Lavaux  <guilhem at kaffe.org>
 
 	* kaffe/kaffevm/kaffe-gc/gc-incremental.c
Index: kaffe/config/mips/netbsd1/md.c
diff -u kaffe/config/mips/netbsd1/md.c:1.5 kaffe/config/mips/netbsd1/md.c:1.6
--- kaffe/config/mips/netbsd1/md.c:1.5	Sat Dec 13 19:31:29 2003
+++ kaffe/config/mips/netbsd1/md.c	Sat May  7 08:55:43 2005
@@ -117,7 +117,7 @@
 	    call->ret->i = iret;
 	  break;
 	default:
-	  ABORT();
+	  KAFFEVM_ABORT();
 	  break;
 	}
       }
===================================================================
Checking out kaffe/kaffe/jvmpi/jvmpi_kaffe.c
RCS:  /home/cvs/kaffe/kaffe/kaffe/jvmpi/jvmpi_kaffe.c,v
VERS: 1.10
***************
--- /dev/null	Sun Aug  4 19:57:58 2002
+++ kaffe/kaffe/jvmpi/jvmpi_kaffe.c	Sat May  7 09:00:51 2005
@@ -0,0 +1,755 @@
+/*
+ * jvmpi_kaffe.c
+ * Routines for generating an assembly file with debugging information
+ *
+ * Copyright (c) 2003 University of Utah and the Flux Group.
+ * All rights reserved.
+ *
+ * This file is licensed under the terms of the GNU Public License.
+ * See the file "license.terms" for information on usage and redistribution
+ * of this file, and for a DISCLAIMER OF ALL WARRANTIES.
+ *
+ * Contributed by the Flux Research Group, Department of Computer Science,
+ * University of Utah, http://www.cs.utah.edu/flux/
+ */
+
+#include "config.h"
+
+#if defined(ENABLE_JVMPI)
+
+#include "debug.h"
+#include "config-std.h"
+#include "config-mem.h"
+#include "config-hacks.h"
+#include "gtypes.h"
+#include "native.h"
+#include "object.h"
+#include "jni.h"
+#include "code.h"
+#include "classMethod.h"
+#include "java_lang_Thread.h"
+#include "thread.h"
+#include "stackTrace.h"
+#include "stringSupport.h"
+
+#include <assert.h>
+
+#include "jvmpi_kaffe.h"
+
+JVMPI_Interface *jvmpiCreateInterface(jint version)
+{
+	JVMPI_Interface *retval;
+
+	assert((version == JVMPI_VERSION_1) ||
+	       (version == JVMPI_VERSION_1_1));
+	
+	retval = &jvmpi_data.jk_Interface;
+	retval->version = version;
+	return( retval );
+}
+
+void jvmpiPostEvent(JVMPI_Event *ev)
+{
+	assert(ev != NULL);
+	assert(ev->event_type >= 0);
+	assert((ev->event_type & ~JVMPI_REQUESTED_EVENT) < JVMPI_EVENT_COUNT);
+
+	ev->env_id = THREAD_JNIENV();
+	switch( ev->event_type )
+	{
+	case JVMPI_EVENT_CLASS_LOAD:
+	case JVMPI_EVENT_CLASS_UNLOAD:
+	case JVMPI_EVENT_OBJECT_ALLOC:
+		gc_disableGC();
+		break;
+	default:
+		break;
+	}
+	jvmpi_data.jk_Interface.NotifyEvent(ev);
+	switch( ev->event_type )
+	{
+	case JVMPI_EVENT_CLASS_LOAD:
+	case JVMPI_EVENT_CLASS_UNLOAD:
+	case JVMPI_EVENT_OBJECT_ALLOC:
+		gc_enableGC();
+		break;
+	default:
+		break;
+	}
+}
+
+void jvmpiConvertField(JVMPI_Field *dst, fields *src)
+{
+	assert(dst != NULL);
+	assert(src != NULL);
+
+	dst->field_name = src->name->data;
+	dst->field_signature = src->signature->data;
+}
+
+void jvmpiConvertMethod(JVMPI_Method *dst, methods *src)
+{
+	assert(dst != NULL);
+	assert(src != NULL);
+
+	dst->method_name = src->name->data;
+	dst->method_signature = src->parsed_sig->signature->data;
+	if( src->lines != NULL )
+	{
+		dst->start_lineno = src->lines->entry[0].line_nr;
+		dst->end_lineno =
+			src->lines->entry[src->lines->length].line_nr;
+	}
+	else
+	{
+		dst->start_lineno = -1;
+		dst->end_lineno = -1;
+	}
+	dst->method_id = src;
+}
+
+void jvmpiConvertLineno(JVMPI_Lineno *dst,
+			lineNumberEntry *src,
+			void *start_pc)
+{
+	assert(dst != NULL);
+	assert(src != NULL);
+
+	dst->offset = src->start_pc - (uintp)start_pc;
+	dst->lineno = src->line_nr;
+}
+
+void jvmpiFillObjectAlloc(JVMPI_Event *ev, struct Hjava_lang_Object *obj)
+{
+	struct Hjava_lang_Class *cl;
+	
+	assert(ev != NULL);
+	assert(obj != NULL);
+
+	cl = OBJECT_CLASS(obj);
+	ev->event_type = JVMPI_EVENT_OBJECT_ALLOC;
+	ev->u.obj_alloc.arena_id = -1;
+	ev->u.obj_alloc.class_id = cl;
+	if( CLASS_IS_ARRAY(cl) )
+	{
+		jint prim_type = 0;
+		
+		switch( CLASS_PRIM_SIG(CLASS_ELEMENT_TYPE(cl)) )
+		{
+		case 'I':
+			prim_type = JVMPI_INT;
+			break;
+		case 'Z':
+			prim_type = JVMPI_BOOLEAN;
+			break;
+		case 'S':
+			prim_type = JVMPI_SHORT;
+			break;
+		case 'B':
+			prim_type = JVMPI_BYTE;
+			break;
+		case 'C':
+			prim_type = JVMPI_CHAR;
+			break;
+		case 'F':
+			prim_type = JVMPI_FLOAT;
+			break;
+		case 'D':
+			prim_type = JVMPI_DOUBLE;
+			break;
+		case 'J':
+			prim_type = JVMPI_LONG;
+			break;
+		default:
+			assert(0);
+			break;
+		}
+		ev->u.obj_alloc.is_array = prim_type;
+	}
+	else
+	{
+		ev->u.obj_alloc.is_array = JVMPI_NORMAL_OBJECT;
+	}
+	ev->u.obj_alloc.size = KGC_getObjectSize(main_collector, obj);
+	ev->u.obj_alloc.obj_id = obj;
+}
+
+void jvmpiFillThreadStart(JVMPI_Event *ev, struct Hjava_lang_Thread *tid)
+{
+	struct Hjava_lang_String *name;
+	
+	assert(ev != NULL);
+	assert(tid != NULL);
+	
+	ev->event_type = JVMPI_EVENT_THREAD_START;
+	if( (name = stringCharArray2Java(unhand_char_array(tid->name->value),
+					 tid->name->count)) != NULL )
+	{
+		ev->u.thread_start.thread_name = stringJava2C(name);
+	}
+	else
+	{
+		ev->u.thread_start.thread_name = NULL;
+	}
+	ev->u.thread_start.group_name = stringJava2C(tid->group->name);
+	ev->u.thread_start.parent_name = NULL;
+	ev->u.thread_start.thread_id = tid;
+	ev->u.thread_start.thread_env_id = 
+		&KTHREAD(get_data)((jthread_t)tid->vmThread->jthreadID)->jniEnv;
+}
+
+void jvmpiFillClassLoad(JVMPI_Event *ev, struct Hjava_lang_Class *cl)
+{
+	int lpc;
+	
+	assert(ev != NULL);
+	assert(cl != NULL);
+
+	for( lpc = 0; lpc < CLASS_NMETHODS(cl); lpc++ )
+	{
+		jvmpiConvertMethod(&ev->u.class_load.methods[lpc],
+				   &CLASS_METHODS(cl)[lpc]);
+	}
+	for( lpc = 0; lpc < CLASS_NSFIELDS(cl); lpc++ )
+	{
+		jvmpiConvertField(&ev->u.class_load.statics[lpc],
+				  &CLASS_SFIELDS(cl)[lpc]);
+	}
+	for( lpc = 0; lpc < CLASS_NIFIELDS(cl); lpc++ )
+	{
+		jvmpiConvertField(&ev->u.class_load.statics[lpc],
+				  &CLASS_IFIELDS(cl)[lpc]);
+	}
+	ev->event_type = JVMPI_EVENT_CLASS_LOAD;
+	ev->u.class_load.class_name = CLASS_CNAME(cl);
+	ev->u.class_load.source_name = CLASS_SOURCEFILE(cl);
+	ev->u.class_load.num_interfaces = cl->interface_len;
+	ev->u.class_load.num_methods = CLASS_NMETHODS(cl);
+	ev->u.class_load.num_static_fields = CLASS_NSFIELDS(cl);
+	ev->u.class_load.num_instance_fields = CLASS_NIFIELDS(cl);
+	ev->u.class_load.class_id = cl;
+}
+
+static jint jvmpiCreateSystemThread(char *name,
+				    jint priority,
+				    void (*f)(void *))
+{
+	jint retval;
+
+	if( (priority != JVMPI_NORMAL_PRIORITY) &&
+	    (priority != JVMPI_MAXIMUM_PRIORITY) &&
+	    (priority != JVMPI_MINIMUM_PRIORITY) )
+	{
+		retval = JNI_ERR;
+	}
+	else
+	{
+		jint mapped_priority = 0;
+		Hjava_lang_Thread *th;
+		errorInfo einfo;
+
+		switch( priority )
+		{
+		case JVMPI_NORMAL_PRIORITY:
+			mapped_priority = java_lang_Thread_NORM_PRIORITY;
+			break;
+		case JVMPI_MAXIMUM_PRIORITY:
+			mapped_priority = java_lang_Thread_MAX_PRIORITY;
+			break;
+		case JVMPI_MINIMUM_PRIORITY:
+			mapped_priority = java_lang_Thread_MIN_PRIORITY;
+			break;
+		default:
+			assert(0);
+			break;
+		}
+		if( (th = createDaemon(f,
+				       name,
+				       NULL,
+				       mapped_priority,
+				       32 * 1024, // XXX
+				       &einfo)) != NULL )
+		{
+			retval = JNI_OK;
+		}
+		else
+		{
+			discardErrorInfo(&einfo);
+			retval = JNI_ERR;
+		}
+	}
+	return( retval );
+}
+
+static jint jvmpiDisableEvent(jint event_type, void *arg)
+{
+	jint retval;
+
+	switch( event_type )
+	{
+	case JVMPI_EVENT_HEAP_DUMP:
+	case JVMPI_EVENT_MONITOR_DUMP:
+	case JVMPI_EVENT_OBJECT_DUMP:
+		retval = JVMPI_NOT_AVAILABLE;
+		break;
+	default:
+		BITMAP_CLEAR(jvmpi_data.jk_EventMask, event_type);
+		retval = JVMPI_SUCCESS;
+		break;
+	}
+	return( retval );
+}
+
+static void jvmpiDisableGC(void)
+{
+	gc_disableGC();
+}
+
+static jint jvmpiEnableEvent(jint event_type, void *arg)
+{
+	jint retval = JVMPI_NOT_AVAILABLE;
+	
+	switch( event_type )
+	{
+	case JVMPI_EVENT_HEAP_DUMP:
+	case JVMPI_EVENT_MONITOR_DUMP:
+	case JVMPI_EVENT_OBJECT_DUMP:
+		retval = JVMPI_NOT_AVAILABLE;
+		break;
+	default:
+		{
+			BITMAP_SET(jvmpi_data.jk_EventMask, event_type);
+			retval = JVMPI_SUCCESS;
+
+			assert(BITMAP_ISSET(jvmpi_data.jk_EventMask,
+					    event_type));
+		}
+		break;
+	}
+	return( retval );
+}
+
+static void jvmpiEnableGC(void)
+{
+	gc_enableGC();
+}
+
+static void jvmpiGetCallTrace(JVMPI_CallTrace *trace, jint depth)
+{
+	stackTraceInfo *sti = NULL;
+	jthread_t jt;
+
+	assert(trace != NULL);
+	assert(trace->env_id != NULL);
+	assert(trace->frames != NULL);
+	assert(depth > 0);
+
+	trace->num_frames = 0;
+	if( (jt = KTHREAD(from_data)((threadData *)trace->env_id,
+				    &jvmpi_data)) != NULL )
+	{
+		sti = (stackTraceInfo *)
+			buildStackTrace(jt == KTHREAD(current)() ?
+					NULL :
+					&KTHREAD(get_data)(jt)->topFrame);
+		KTHREAD(resume)(jt, &jvmpi_data);
+	}
+	if( sti != NULL )
+	{
+		int lpc;
+
+		for( lpc = 0;
+		     (sti[lpc].meth != ENDOFSTACK) && (depth > 0);
+		     lpc++ )
+		{
+			JVMPI_CallFrame *cf;
+			Method *meth;
+
+			if( (meth = sti[lpc].meth) == NULL )
+				continue;
+			
+			cf = &trace->frames[trace->num_frames];
+			cf->lineno = -1;
+			if( meth->lines != NULL )
+			{
+				uintp linepc = 0;
+				int lpc2;
+				
+				for( lpc2 = 0;
+				     lpc2 < meth->lines->length;
+				     lpc2++ )
+				{
+					if( (sti[lpc].pc >=
+					     meth->lines->entry[lpc2].
+					     start_pc) &&
+					    (linepc <=
+					     meth->lines->entry[lpc2].
+					     start_pc) )
+					{
+						cf->lineno = meth->lines->
+							entry[lpc2].line_nr;
+						linepc = meth->lines->
+							entry[lpc2].start_pc;
+					}
+				}
+			}
+			cf->method_id = meth;
+			trace->num_frames += 1;
+			depth -= 1;
+		}
+	}
+}
+
+static jlong jvmpiGetCurrentThreadCpuTime(void)
+{
+	jlong retval;
+
+	retval = KTHREAD(get_usage)(KTHREAD(current)());
+	return( retval );
+}
+
+static jobjectID jvmpiGetMethodClass(jmethodID mid)
+{
+	jobjectID retval;
+
+	assert(mid != NULL);
+	
+	retval = ((Method *)mid)->class;
+	return( retval );
+}
+
+static void *jvmpiGetThreadLocalStorage(JNIEnv *env_id)
+{
+	void *retval = NULL;
+	jthread_t jt;
+
+	assert(env_id != NULL);
+	
+	if( (jt = KTHREAD(from_data)((threadData *)env_id,
+				    &jvmpi_data)) != NULL )
+	{
+		retval = KTHREAD(get_data)(jt)->jvmpiData;
+		KTHREAD(resume)(jt, &jvmpi_data);
+	}
+	return( retval );
+}
+
+static jobjectID jvmpiGetThreadObject(JNIEnv *env_id)
+{
+	jobjectID retval = NULL;
+	jthread_t jt;
+
+	assert(env_id != NULL);
+	
+	if( (jt = KTHREAD(from_data)((threadData *)env_id,
+				    &jvmpi_data)) != NULL )
+	{
+		retval = KTHREAD(get_data)(jt)->jlThread;
+		KTHREAD(resume)(jt, &jvmpi_data);
+	}
+	return( retval );
+}
+
+static jint jvmpiGetThreadStatus(JNIEnv *env_id)
+{
+	jint retval = 0;
+	jthread_t jt;
+
+	assert(env_id != NULL);
+
+	if( (jt = KTHREAD(from_data)((threadData *)env_id,
+				    &jvmpi_data)) != NULL )
+	{
+		if( KTHREAD(on_mutex)(jt) )
+		{
+			retval = JVMPI_THREAD_MONITOR_WAIT;
+		}
+		else if( KTHREAD(on_condvar)(jt) )
+		{
+			retval = JVMPI_THREAD_CONDVAR_WAIT;
+		}
+		else
+		{
+			switch( KTHREAD(get_status)(jt) )
+			{
+			case THREAD_RUNNING:
+				retval = JVMPI_THREAD_RUNNABLE;
+				break;
+			case THREAD_SUSPENDED:
+				/* XXX Should be IO_WAIT or something. */
+				retval = JVMPI_THREAD_MONITOR_WAIT;
+				break;
+			}
+		}
+		if( KTHREAD(is_interrupted)(jt) )
+		{
+			retval |= JVMPI_THREAD_INTERRUPTED;
+		}
+		KTHREAD(resume)(jt, &jvmpi_data);
+	}
+	else
+	{
+		retval = JVMPI_FAIL;
+	}
+	return( retval );
+}
+
+static void jvmpiProfilerExit(jint err_code)
+{
+	KAFFEVM_EXIT(err_code);
+}
+
+static JVMPI_RawMonitor jvmpiRawMonitorCreate(char *lock_name)
+{
+	JVMPI_RawMonitor retval;
+
+	if( (retval = jmalloc(sizeof(struct _JVMPI_RawMonitor))) != NULL )
+	{
+		jmutex_initialise(&retval->mux);
+		jcondvar_initialise(&retval->cv);
+		retval->lock_name = lock_name;
+	}
+	return( retval );
+}
+
+static void jvmpiRawMonitorDestroy(JVMPI_RawMonitor lock_id)
+{
+	if( lock_id != NULL )
+	{
+		KMUTEX(destroy)(&lock_id->mux);
+		KCONDVAR(destroy)(&lock_id->cv);
+		jfree(lock_id);
+	}
+}
+
+static void jvmpiRawMonitorEnter(JVMPI_RawMonitor lock_id)
+{
+	assert(lock_id != NULL);
+	
+	KMUTEX(lock)(&lock_id->mux);
+}
+
+static void jvmpiRawMonitorExit(JVMPI_RawMonitor lock_id)
+{
+	assert(lock_id != NULL);
+	
+	KMUTEX(unlock)(&lock_id->mux);
+}
+
+static void jvmpiRawMonitorNotifyAll(JVMPI_RawMonitor lock_id)
+{
+	assert(lock_id != NULL);
+	
+	KCONDVAR(broadcast)(&lock_id->cv, &lock_id->mux);
+}
+
+static void jvmpiRawMonitorWait(JVMPI_RawMonitor lock_id, jlong ms)
+{
+	assert(lock_id != NULL);
+	
+	KCONDVAR(wait)(&lock_id->cv, &lock_id->mux, ms);
+}
+
+static jint jvmpiRequestEvent(jint event_type, void *arg)
+{
+	jint retval = JVMPI_NOT_AVAILABLE;
+
+	switch( event_type )
+	{
+	case JVMPI_EVENT_HEAP_DUMP:
+		break;
+	case JVMPI_EVENT_MONITOR_DUMP:
+		break;
+	case JVMPI_EVENT_OBJECT_DUMP:
+		break;
+	case JVMPI_EVENT_CLASS_LOAD:
+		{
+			struct Hjava_lang_Class *cl;
+			JVMPI_Method *jvmpi_methods;
+			JVMPI_Field *jvmpi_fields;
+			JVMPI_Event ev;
+
+			cl = (struct Hjava_lang_Class *)arg;
+			jvmpi_methods = alloca(sizeof(JVMPI_Method) *
+					       CLASS_NMETHODS(cl));
+			jvmpi_fields = alloca(sizeof(JVMPI_Field) *
+					      (CLASS_NSFIELDS(cl) +
+					       CLASS_NFIELDS(cl)));
+			ev.u.class_load.methods = jvmpi_methods;
+			ev.u.class_load.statics = &jvmpi_fields[0];
+			ev.u.class_load.instances =
+				&jvmpi_fields[CLASS_NSFIELDS(cl)];
+			jvmpiFillClassLoad(&ev, cl);
+			ev.event_type |= JVMPI_REQUESTED_EVENT;
+			jvmpiPostEvent(&ev);
+		}
+		break;
+	case JVMPI_EVENT_THREAD_START:
+		{
+			struct Hjava_lang_Thread *tid;
+			JVMPI_Event ev;
+
+			tid = (struct Hjava_lang_Thread *)arg;
+			jvmpiFillThreadStart(&ev, tid);
+			ev.event_type |= JVMPI_REQUESTED_EVENT;
+			jvmpiPostEvent(&ev);
+			gc_free(ev.u.thread_start.parent_name);
+			gc_free(ev.u.thread_start.group_name);
+			gc_free(ev.u.thread_start.thread_name);
+		}
+		break;
+	case JVMPI_EVENT_OBJECT_ALLOC:
+		{
+			struct Hjava_lang_Object *obj;
+			JVMPI_Event ev;
+
+			obj = (struct Hjava_lang_Object *)arg;
+			jvmpiFillObjectAlloc(&ev, obj);
+			ev.event_type |= JVMPI_REQUESTED_EVENT;
+			jvmpiPostEvent(&ev);
+		}
+		break;
+	}
+	return( retval );
+}
+
+static void jvmpiResumeThread(JNIEnv *env)
+{
+	jthread_t jt;
+
+	assert(env != NULL);
+
+	if( (jt = KTHREAD(from_data)((threadData *)env, &jvmpi_data)) != NULL )
+	{
+		KTHREAD(resume)(jt, &jvmpi_data);
+		KTHREAD(resume)(jt, &jvmpi_data);
+	}
+}
+
+static void jvmpiResumeThreadList(jint reqCount, JNIEnv **reqList, jint *results)
+{
+	int lpc;
+
+	/* XXX */
+	for( lpc = 0; lpc < reqCount; lpc++ )
+	{
+		jvmpiResumeThread(reqList[lpc]);
+		results[lpc] = 0;
+	}
+}
+
+static void jvmpiRunGC(void)
+{
+	invokeGC();
+}
+
+static void jvmpiSetThreadLocalStorage(JNIEnv *env_id, void *ptr)
+{
+	jthread_t jt;
+
+	assert(env_id != NULL);
+	
+	if( (jt = KTHREAD(from_data)((threadData *)env_id,
+				    &jvmpi_data)) != NULL )
+	{
+		KTHREAD(get_data)(jt)->jvmpiData = ptr;
+		KTHREAD(resume)(jt, &jvmpi_data);
+	}
+}
+
+static void jvmpiSuspendThread(JNIEnv *env_id)
+{
+	jthread_t jt;
+
+	jt = KTHREAD(from_data)((threadData *)env_id, &jvmpi_data);
+	KTHREAD(clear_run)(jt);
+}
+
+static void jvmpiSuspendThreadList(jint reqCount, JNIEnv **reqList, jint *results)
+{
+	int lpc;
+
+	assert(reqCount > 0);
+	assert(reqList != NULL);
+	assert(results != NULL);
+
+	/* XXX */
+	for( lpc = 0; lpc < reqCount; lpc++ )
+	{
+		jvmpiSuspendThread(reqList[lpc]);
+		results[lpc] = 0;
+	}
+}
+
+static jboolean jvmpiThreadHasRun(JNIEnv *env)
+{
+	jboolean retval = JNI_FALSE;
+	jthread_t jt;
+
+	if( (jt = KTHREAD(from_data)((threadData *)env, &jvmpi_data)) != NULL )
+	{
+		retval = KTHREAD(has_run)(jt);
+		KTHREAD(resume)(jt, &jvmpi_data);
+	}
+	return( retval );
+}
+
+static jobject jvmpijobjectID2jobject(jobjectID jid)
+{
+	return( jid );
+}
+
+static jobjectID jvmpijobject2jobjectID(jobject j)
+{
+	return( j );
+}
+
+jvmpi_kaffe_t jvmpi_data = {
+	{ 0 },
+	{
+		JVMPI_VERSION_1_1,
+		
+		NULL,
+		
+		jvmpiEnableEvent,
+		jvmpiDisableEvent,
+		jvmpiRequestEvent,
+		
+		jvmpiGetCallTrace,
+		
+		jvmpiProfilerExit,
+		
+		jvmpiRawMonitorCreate,
+		jvmpiRawMonitorEnter,
+		jvmpiRawMonitorExit,
+		jvmpiRawMonitorWait,
+		jvmpiRawMonitorNotifyAll,
+		jvmpiRawMonitorDestroy,
+
+		jvmpiGetCurrentThreadCpuTime,
+		jvmpiSuspendThread,
+		jvmpiResumeThread,
+		jvmpiGetThreadStatus,
+		jvmpiThreadHasRun,
+		jvmpiCreateSystemThread,
+
+		jvmpiSetThreadLocalStorage,
+		jvmpiGetThreadLocalStorage,
+
+		jvmpiDisableGC,
+		jvmpiEnableGC,
+		jvmpiRunGC,
+
+		jvmpiGetThreadObject,
+		jvmpiGetMethodClass,
+
+		jvmpijobjectID2jobject,
+		jvmpijobject2jobjectID,
+
+		jvmpiSuspendThreadList,
+		jvmpiResumeThreadList
+		
+	}
+};
+
+#endif
===================================================================
Checking out kaffe/kaffe/kaffevm/intrp/icode.h
RCS:  /home/cvs/kaffe/kaffe/kaffe/kaffevm/intrp/icode.h,v
VERS: 1.24
***************
--- /dev/null	Sun Aug  4 19:57:58 2002
+++ kaffe/kaffe/kaffevm/intrp/icode.h	Sat May  7 09:00:52 2005
@@ -0,0 +1,354 @@
+/*
+ * icode.h
+ * Define the instruction codes macros.
+ *
+ * Copyright (c) 1996, 1997
+ *	Transvirtual Technologies, Inc.  All rights reserved.
+ *
+ * See the file "license.terms" for information on usage and redistribution
+ * of this file.
+ */
+
+#ifndef __icode_h
+#define	__icode_h
+
+#include "slots.h"
+#include "soft.h"
+
+#if defined(KAFFE_VMDEBUG)
+static const int32 UNINITIALIZED_STACK_SLOT = 0x00c0ffee;
+#endif
+
+#define	move_long_const(t, c)			(t)[0].v.tlong = (c)
+#define	add_long(t, f1, f2)			(t)[0].v.tlong = (f1)[0].v.tlong + (f2)[0].v.tlong
+#define	sub_long(t, f1, f2)			(t)[0].v.tlong = (f1)[0].v.tlong - (f2)[0].v.tlong
+#define	mul_long(t, f1, f2)			(t)[0].v.tlong = (f1)[0].v.tlong * (f2)[0].v.tlong
+#define div_long(t, f1, f2)			(t)[0].v.tlong = (((((f1)[0].v.tlong) == JLONG_MIN) && (((f2)[0].v.tlong) == -1)) ? JLONG_MIN : (((f1)[0].v.tlong) / ((f2)[0].v.tlong)))
+#define rem_long(t, f1, f2)                     (t)[0].v.tlong = ((((f2)[0].v.tlong) != -1) ? (((f1)[0].v.tlong) % ((f2)[0].v.tlong)) : 0)
+#define	neg_long(t, f)				(t)[0].v.tlong = -(f)[0].v.tlong
+
+#define	and_long(t, f1, f2)			(t)[0].v.tlong = (f1)[0].v.tlong & (f2)[0].v.tlong
+#define	or_long(t, f1, f2)			(t)[0].v.tlong = (f1)[0].v.tlong | (f2)[0].v.tlong
+#define	xor_long(t, f1, f2)			(t)[0].v.tlong = (f1)[0].v.tlong ^ (f2)[0].v.tlong
+
+#define	lshl_long(t, f1, f2)			(t)[0].v.tlong = ((f1)[0].v.tlong) << ((f2)[0].v.tint & 63)
+#define	ashr_long(t, f1, f2)			(t)[0].v.tlong = ((int64)(f1)[0].v.tlong) >> ((f2)[0].v.tint & 63)
+#define	lshr_long(t, f1, f2)			(t)[0].v.tlong = ((uint64)(f1)[0].v.tlong) >> ((f2)[0].v.tint & 63)
+
+#define	lcmp(t, f1, f2)				do { jlong l2 = ((f2)[0].v.tlong); jlong l1 = ((f1)[0].v.tlong); \
+						(t)[0].v.tint = ((l1 == l2)? 0 : ((l1 > l2) ? -1 : 1)); } while(0);
+
+#define	cvt_int_long(t, f)			(t)[0].v.tlong = (f)[0].v.tint
+#define	cvt_long_int(t, f)			(t)[0].v.tint = (f)[0].v.tlong
+#define	cvt_long_float(t, f)			(t)[0].v.tfloat = (f)[0].v.tlong
+#define	cvt_long_double(t, f)			(t)[0].v.tdouble = (f)[0].v.tlong
+#define	cvt_float_long(t, f)			(t)[0].v.tlong = soft_cvtfl((f)[0].v.tfloat)
+#define	cvt_double_long(t, f)			(t)[0].v.tlong = soft_cvtdl((f)[0].v.tdouble)
+
+#define	move_int_const(t, c)			(t)[0].v.tint = (c)
+#define	move_ref_const(t, c)			(t)[0].v.taddr = (void*)(c)
+#define	move_label_const(t, c)			move_ref_const(t, c)
+#define	move_string_const(t, c)			move_ref_const(t, c)
+
+#define	move_int(t, f)				(t)[0].v.tint = (f)[0].v.tint
+#define	move_ref(t, f)				(t)[0].v.taddr = (f)[0].v.taddr
+#define	move_any(t, f)				move_long(t, f)
+
+#define	swap_any(t1, t2)			{			\
+						  tmp[0] = (t1)[0];	\
+						  (t1)[0] = (t2)[0];	\
+						  (t2)[0] = tmp[0];	\
+						}
+
+#define	load_int(t, f)				(t)[0].v.tint = *(jint*)((f)[0].v.taddr)
+#define	load_ref(t, f)				(t)[0].v.taddr = *(void**)((f)[0].v.taddr)
+#define	load_byte(t, f)				(t)[0].v.tint = *(jbyte*)((f)[0].v.taddr)
+#define	load_char(t, f)				(t)[0].v.tint = *(jchar*)((f)[0].v.taddr)
+
+#define	store_int(t, f)				*(jint*)((t)[0].v.taddr) = ((f)[0].v.tint)
+#define	store_ref(t, f)				*(void**)((t)[0].v.taddr) = ((f)[0].v.taddr)
+#define	store_byte(t, f)			*(jbyte*)((t)[0].v.taddr) = ((f)[0].v.tint)
+#define	store_char(t, f)			*(jchar*)((t)[0].v.taddr) = ((f)[0].v.tint)
+
+#define	load_any(t, f)				load_int(t, f)
+#define load_offset_any(t, f, o)		load_offset_int(t, f, o)
+#define	store_any(t, f)				store_int(t, f)
+#define store_offset_any(t, f, o)		store_offset_int(t, f, o)
+
+#define	add_int_const(t, f, c)			(t)[0].v.tint = ((f)[0].v.tint) + (c)
+#define	add_ref_const(t, f, c)			(t)[0].v.taddr = (void*)((uint8*)((f)[0].v.taddr) + (c))
+#define	sub_int_const(t, f, c)			(t)[0].v.tint = ((f)[0].v.tint) - (c)
+#define	mul_int_const(t, f, c)			(t)[0].v.tint = ((f)[0].v.tint) * (c)
+
+#define	add_int(t, f1, f2)			(t)[0].v.tint = ((f1)[0].v.tint) + ((f2)[0].v.tint)
+#define	add_ref(t, f1, f2)			(t)[0].v.taddr = (void*)((uint8*)((f1)[0].v.taddr) + ((f2)[0].v.tint))
+#define	sub_int(t, f1, f2)			(t)[0].v.tint = ((f1)[0].v.tint) - ((f2)[0].v.tint)
+#define	mul_int(t, f1, f2)			(t)[0].v.tint = ((f1)[0].v.tint) * ((f2)[0].v.tint)
+#define div_int(t, f1, f2)			(t)[0].v.tint = (((((f1)[0].v.tint) == JINT_MIN) && (((f2)[0].v.tint) == -1)) ? JINT_MIN : (((f1)[0].v.tint) / ((f2)[0].v.tint)))
+#define rem_int(t, f1, f2)                      (t)[0].v.tint = ((((f2)[0].v.tint) != -1) ? (((f1)[0].v.tint) % ((f2)[0].v.tint)) : 0)
+#define	neg_int(t, f)				(t)[0].v.tint = -((f)[0].v.tint)
+#define	lshl_int_const(t, f, c)			(t)[0].v.tint = ((f)[0].v.tint) << (c & 31)
+#define	lshl_int(t, f1, f2)			(t)[0].v.tint = ((f1)[0].v.tint) << ((f2)[0].v.tint & 31)
+#define	ashr_int(t, f1, f2)			(t)[0].v.tint = ((int32)(f1)[0].v.tint) >> ((f2)[0].v.tint & 31)
+#define	lshr_int(t, f1, f2)			(t)[0].v.tint = ((uint32)(f1)[0].v.tint) >> ((f2)[0].v.tint & 31)
+#define	and_int(t, f1, f2)			(t)[0].v.tint = ((f1)[0].v.tint) & ((f2)[0].v.tint)
+#define	or_int(t, f1, f2)			(t)[0].v.tint = ((f1)[0].v.tint) | ((f2)[0].v.tint)
+#define	xor_int(t, f1, f2)			(t)[0].v.tint = ((f1)[0].v.tint) ^ ((f2)[0].v.tint)
+
+#define	cvt_int_byte(t, f)			(t)[0].v.tint = (((f)[0].v.tint) << 24) >> 24
+#define	cvt_int_char(t, f)			(t)[0].v.tint = ((f)[0].v.tint) & 0xFFFF
+#define	cvt_int_short(t, f)			(t)[0].v.tint = (((f)[0].v.tint) << 16) >> 16
+
+#define	branch_indirect(w)			w
+#define	branch_a(w)				w
+
+#define	cbranch_int_eq(s1, s2, w)		if ((s1)[0].v.tint == (s2)[0].v.tint) w
+#define	cbranch_int_ne(s1, s2, w)		if ((s1)[0].v.tint != (s2)[0].v.tint) w
+#define	cbranch_int_lt(s1, s2, w)		if ((s1)[0].v.tint < (s2)[0].v.tint) w
+#define	cbranch_int_le(s1, s2, w)		if ((s1)[0].v.tint <= (s2)[0].v.tint) w
+#define	cbranch_int_gt(s1, s2, w)		if ((s1)[0].v.tint > (s2)[0].v.tint) w
+#define	cbranch_int_ge(s1, s2, w)		if ((s1)[0].v.tint >= (s2)[0].v.tint) w
+#define	cbranch_int_ult(s1, s2, w)		if ((unsigned int)(s1)[0].v.tint < (unsigned int)(s2)[0].v.tint) w
+
+#define	cbranch_int_const_eq(s1, s2, w)		if ((s1)[0].v.tint == (s2)) w
+#define	cbranch_int_const_ne(s1, s2, w)		if ((s1)[0].v.tint != (s2)) w
+#define	cbranch_int_const_lt(s1, s2, w)		if ((s1)[0].v.tint < (s2)) w
+#define	cbranch_int_const_le(s1, s2, w)		if ((s1)[0].v.tint <= (s2)) w
+#define	cbranch_int_const_gt(s1, s2, w)		if ((s1)[0].v.tint > (s2)) w
+#define	cbranch_int_const_ge(s1, s2, w)		if ((s1)[0].v.tint >= (s2)) w
+#define	cbranch_int_const_ult(s1, s2, w)	if ((unsigned int)(s1)[0].v.tint < (unsigned int)(s2)) w
+
+#define cbranch_ref_eq(s1, s2, w)		if ((s1)[0].v.taddr == (s2)[0].v.taddr) w
+#define cbranch_ref_ne(s1, s2, w)		if ((s1)[0].v.taddr != (s2)[0].v.taddr) w
+#define cbranch_ref_const_eq(s1, s2, w)		if ((s1)[0].v.taddr == (void*)(s2)) w
+#define cbranch_ref_const_ne(s1, s2, w)		if ((s1)[0].v.taddr != (void*)(s2)) w
+
+#define	call(m)					softcall_initialise_class(method_class()); \
+						virtualMachine((methods*)(m)[0].v.taddr, sp+1, retval, thread_data)
+#define	call_indirect_method(m)			softcall_initialise_class(method_class()); \
+						virtualMachine(m, sp+1, retval, thread_data)
+
+#define	ret()					goto end
+
+#define	returnarg_int(s)			retval[0].v.tint = (s)[0].v.tint
+#define	returnarg_ref(s)			retval[0].v.taddr = (s)[0].v.taddr
+#define	returnarg_long(s)			retval[0].v.tlong = (s)[0].v.tlong
+#define	returnarg_float(s)			retval[0].v.tfloat = (s)[0].v.tfloat
+#define	returnarg_double(s)			retval[0].v.tdouble = (s)[0].v.tdouble
+
+#define	pusharg_int_const(c, i)			/* Not needed for interpreter */
+#define	pusharg_int(f, i)			/* Not needed for interpreter */
+#define	pusharg_ref(f, i)			/* Not needed for interpreter */
+#define	pusharg_ref_const(f, i)			/* Not needed for interpreter */
+#define	pusharg_any(f, i)			/* Not needed for interpreter */
+#define	pusharg_anylong(f, i)			/* Not needed for interpreter */
+#define	popargs()				/* Not needed for interpreter */
+
+#define	return_int(s)				(s)[0].v.tint = retval[0].v.tint
+#define	return_long(s)				(s)[0].v.tlong = retval[0].v.tlong
+#define	return_float(s)				(s)[0].v.tfloat = retval[0].v.tfloat
+#define	return_double(s)			(s)[0].v.tdouble = retval[0].v.tdouble
+#define	return_ref(s)				(s)[0].v.taddr = retval[0].v.taddr
+
+#define	monitor_enter()				/* Not needed for interpreter */
+#define	monitor_exit()				/* Not needed for interpreter */
+
+#define	start_function()			/* Not needed for interpreter */
+#define	start_basic_block()			/* Not needed for interpreter */
+#define	end_basic_block()			/* Not needed for interpreter */
+#define	end_function()				/* Not needed for interpreter */
+#define	start_sub_block()			/* Not needed for interpreter */
+#define	end_sub_block()				/* Not needed for interpreter */
+#define	begin_func_sync()			/* Not needed for interpreter */
+#define	end_func_sync()				/* Not needed for interpreter */
+#define	begin_sync()				/* Not needed for interpreter */
+#define	end_sync()				/* Not needed for interpreter */
+

*** Patch too long, truncated ***




More information about the kaffe mailing list