summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJuho Snellman <jsnell@iki.fi>2006-01-08 00:33:13 +0000
committerJuho Snellman <jsnell@iki.fi>2006-01-08 00:33:13 +0000
commit6e6670a5c26b3594a0eaa8da59db75b48e0db878 (patch)
tree78c7fe2dc7e274e3829aa297849009dd6159f17d
parent97423182206cfe8c078eff105fea00dceb03be99 (diff)
0.9.8.20:
Final batch from sbcl-devel "Changes to GENCGC memory zeroing" in 2005-12). Use hand-coded assembly for zeroing memory in GENCGC instead of the platform memset/bzero. * Use MOVNTDQ on x86-64 * Use MOVNTDQ on x86 that supports SSE2 (basically Pentium 4 and newer) * Difference to the version posted on sbcl-devel: Do *not* use the MMX MOVNTQ for x86 that supports MMX but not SSE2. MOVNTQ apparently had very bad performance on K7 Athlons/Durons. * Use REP STOSL on remaining x86.
-rw-r--r--src/runtime/gencgc.c6
-rw-r--r--src/runtime/x86-64-assem.S43
-rw-r--r--src/runtime/x86-assem.S139
-rw-r--r--version.lisp-expr2
4 files changed, 185 insertions, 5 deletions
diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c
index 3d1a3e4bc..ca7fa4713 100644
--- a/src/runtime/gencgc.c
+++ b/src/runtime/gencgc.c
@@ -420,6 +420,9 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */
fpu_restore(fpu_state);
}
+
+void fast_bzero(void*, size_t); /* in <arch>-assem.S */
+
/* Zero the pages from START to END (inclusive), but use mmap/munmap instead
* if zeroing it ourselves, i.e. in practice give the memory back to the
* OS. Generally done after a large GC.
@@ -451,7 +454,7 @@ zero_pages(page_index_t start, page_index_t end) {
if (start > end)
return;
- memset(page_address(start), 0, PAGE_BYTES*(1+end-start));
+ fast_bzero(page_address(start), PAGE_BYTES*(1+end-start));
}
/* Zero the pages from START to END (inclusive), except for those
@@ -473,6 +476,7 @@ zero_dirty_pages(page_index_t start, page_index_t end) {
page_table[i].need_to_zero = 1;
}
}
+'
/*
diff --git a/src/runtime/x86-64-assem.S b/src/runtime/x86-64-assem.S
index c654861cd..dbabd4d1e 100644
--- a/src/runtime/x86-64-assem.S
+++ b/src/runtime/x86-64-assem.S
@@ -344,4 +344,45 @@ GNAME(post_signal_tramp):
ret
.size GNAME(post_signal_tramp),.-GNAME(post_signal_tramp)
- .end
+ .text
+ .align align_8byte,0x90
+ .global GNAME(fast_bzero)
+ .type GNAME(fast_bzero),@function
+
+GNAME(fast_bzero):
+ /* A fast routine for zero-filling blocks of memory that are
+ * guaranteed to start and end at a 4096-byte aligned address.
+ */
+ shr $6, %rsi /* Amount of 64-byte blocks to copy */
+ jz Lend /* If none, stop */
+ mov %rsi, %rcx /* Save start address */
+ movups %xmm7, -16(%rsp) /* Save XMM register */
+ xorps %xmm7, %xmm7 /* Zero the XMM register */
+ jmp Lloop
+ .align 16
+Lloop:
+
+ /* Copy the 16 zeroes from xmm7 to memory, 4 times. MOVNTDQ is the
+ * non-caching double-quadword moving variant, i.e. the memory areas
+ * we're touching are not fetched into the L1 cache, since we're just
+ * going to overwrite the memory soon anyway.
+ */
+ movntdq %xmm7, 0(%rdi)
+ movntdq %xmm7, 16(%rdi)
+ movntdq %xmm7, 32(%rdi)
+ movntdq %xmm7, 48(%rdi)
+
+ add $64, %rdi /* Advance pointer */
+ dec %rsi /* Decrement 64-byte block count */
+ jnz Lloop
+ mfence /* Ensure that the writes are globally visible, since
+ * MOVNTDQ is weakly ordered */
+ movups -16(%rsp), %xmm7 /* Restore the XMM register */
+ prefetcht0 0(%rcx) /* Prefetch the start of the block into cache,
+ * since it's likely to be used immediately. */
+Lend:
+ ret
+ .size GNAME(fast_bzero), .-GNAME(fast_bzero)
+
+
+ .end
diff --git a/src/runtime/x86-assem.S b/src/runtime/x86-assem.S
index 0a65cd552..d1c2f49d9 100644
--- a/src/runtime/x86-assem.S
+++ b/src/runtime/x86-assem.S
@@ -841,7 +841,7 @@ GNAME(sigtrap_trampoline):
int3
.byte trap_ContextRestore
hlt # We should never return here.
-
+
/*
* This is part of the funky magic for exception handling on win32.
* see handle_exception() in win32-os.c for details.
@@ -858,5 +858,140 @@ GNAME(exception_trampoline):
.byte trap_ContextRestore
hlt # We should never return here.
#endif
-
+
+ /* fast_bzero implementations and code to detect which implementation
+ * to use.
+ */
+
+ .global GNAME(fast_bzero_pointer)
+ .data
+ .align 4
+GNAME(fast_bzero_pointer):
+ /* Variable containing a pointer to the bzero function to use.
+ * Initially points to a function that detects which implementation
+ * should be used, and then updates the variable. */
+ .long fast_bzero_detect
+
+ .text
+ .align align_8byte,0x90
+ .global GNAME(fast_bzero)
+ TYPE(GNAME(fast_bzero))
+GNAME(fast_bzero):
+ /* Indirect function call */
+ jmp *fast_bzero_pointer
+ SIZE(GNAME(fast_bzero))
+
+
+ .text
+ .align align_8byte,0x90
+ .global GNAME(fast_bzero_detect)
+ TYPE(GNAME(fast_bzero_detect))
+GNAME(fast_bzero_detect):
+ /* Decide whether to use SSE, MMX or REP version */
+ push %eax /* CPUID uses EAX-EDX */
+ push %ebx
+ push %ecx
+ push %edx
+ mov $1, %eax
+ cpuid
+ test $0x04000000, %edx /* SSE2 needed for MOVNTDQ */
+ jnz Lsse2
+ /* Originally there was another case here for using the
+ * MOVNTQ instruction for processors that supported MMX but
+ * not SSE2. This turned out to be a loss especially on
+ * Athlons (where this instruction is apparently microcoded
+ * somewhat slowly). So for simplicity revert to REP STOSL
+ * for all non-SSE2 processors.
+ */
+Lbase:
+ movl $fast_bzero_base, fast_bzero_pointer
+ jmp Lrestore
+Lsse2:
+ movl $fast_bzero_sse, fast_bzero_pointer
+ jmp Lrestore
+
+Lrestore:
+ pop %edx
+ pop %ecx
+ pop %ebx
+ pop %eax
+ jmp *fast_bzero_pointer
+
+ SIZE(GNAME(fast_bzero_detect))
+
+
+ .text
+ .align align_8byte,0x90
+ .global GNAME(fast_bzero_sse)
+ TYPE(GNAME(fast_bzero_sse))
+
+GNAME(fast_bzero_sse):
+ /* A fast routine for zero-filling blocks of memory that are
+ * guaranteed to start and end at a 4096-byte aligned address.
+ */
+ push %esi /* Save temporary registers */
+ push %edi
+ mov 16(%esp), %esi /* Parameter: amount of bytes to fill */
+ mov 12(%esp), %edi /* Parameter: start address */
+ shr $6, %esi /* Amount of 64-byte blocks to copy */
+ jz Lend_sse /* If none, stop */
+ movups %xmm7, -16(%esp) /* Save XMM register */
+ xorps %xmm7, %xmm7 /* Zero the XMM register */
+ jmp Lloop_sse
+ .align 16
+Lloop_sse:
+
+ /* Copy the 16 zeroes from xmm7 to memory, 4 times. MOVNTDQ is the
+ * non-caching double-quadword moving variant, i.e. the memory areas
+ * we're touching are not fetched into the L1 cache, since we're just
+ * going to overwrite the memory soon anyway.
+ */
+ movntdq %xmm7, 0(%edi)
+ movntdq %xmm7, 16(%edi)
+ movntdq %xmm7, 32(%edi)
+ movntdq %xmm7, 48(%edi)
+
+ add $64, %edi /* Advance pointer */
+ dec %esi /* Decrement 64-byte block count */
+ jnz Lloop_sse
+ movups -16(%esp), %xmm7 /* Restore the XMM register */
+ sfence /* Ensure that weakly ordered writes are flushed. */
+Lend_sse:
+ mov 12(%esp), %esi /* Parameter: start address */
+ prefetcht0 0(%esi) /* Prefetch the start of the block into cache,
+ * since it's likely to be used immediately. */
+ pop %edi /* Restore temp registers */
+ pop %esi
+ ret
+ SIZE(GNAME(fast_bzero_sse))
+
+
+ .text
+ .align align_8byte,0x90
+ .global GNAME(fast_bzero_base)
+ TYPE(GNAME(fast_bzero_base))
+
+GNAME(fast_bzero_base):
+ /* A fast routine for zero-filling blocks of memory that are
+ * guaranteed to start and end at a 4096-byte aligned address.
+ */
+ push %eax /* Save temporary registers */
+ push %ecx
+ push %edi
+ mov 20(%esp), %ecx /* Parameter: amount of bytes to fill */
+ mov 16(%esp), %edi /* Parameter: start address */
+ xor %eax, %eax /* Zero EAX */
+ shr $2, %ecx /* Amount of 4-byte blocks to copy */
+ jz Lend_base
+ cld /* Set direction of STOSL to increment */
+ rep stosl /* Store EAX to *EDI, ECX times, incrementing
+ * EDI by 4 after each store */
+Lend_base:
+ pop %edi /* Restore temp registers */
+ pop %ecx
+ pop %eax
+ ret
+ SIZE(GNAME(fast_bzero_base))
+
+
.end
diff --git a/version.lisp-expr b/version.lisp-expr
index 39130eac2..44462973e 100644
--- a/version.lisp-expr
+++ b/version.lisp-expr
@@ -17,4 +17,4 @@
;;; checkins which aren't released. (And occasionally for internal
;;; versions, especially for internal versions off the main CVS
;;; branch, it gets hairier, e.g. "0.pre7.14.flaky4.13".)
-"0.9.8.19"
+"0.9.8.20"